content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
library(dplyr)
library(stats)
library(sqldf)
library(ggplot2)
library(reshape2)
library(gridExtra)
df<-read.csv("C:/Users/Mooda Meghana/Downloads/adult.csv")
summary(df)
df$income<-ifelse(df$income=='>50K',1,0)
df$workclass<-ifelse(df$workclass=='?','Unknown',as.character(df$workclass))
Work_class<-sqldf('SELECT workclass, count(workclass) as Count
,sum(income) as Above from df group by workclass')
table<-data.frame(Class=Work_class$workclass, Proportion=Work_class$Above/Work_class$Count)
Work_class$Below<-Work_class$Count-Work_class$Above
Work_class<-Work_class[,c(1,3,4)]
Workclass<-melt(Work_class,id.vars = 'workclass')
gg<-ggplot(Workclass,aes(x=workclass,y=value,fill=variable))+geom_bar(stat = 'identity',position = 'stack')+theme_bw()+scale_fill_manual(values = c('red','green'))+theme(axis.text.x = element_text(angle = 45, hjust = 1))+ggtitle('Proportions of above-paid within different classes')
tbl <- tableGrob(t(table), rows=NULL)
grid.arrange(tbl, gg,
nrow=2,
as.table=TRUE,
heights=c(1,4))
education<-sqldf('SELECT education, count(education) as Count
,sum(income) as Above from df group by education')
education$Below<-education$Count-education$Above
table<-data.frame(Class=education$education, Proportion=education$Above/education$Count)
education<-education[,c(1,3,4)]
edu<-melt(education,id.vars = 'education')
gg<-ggplot(edu,aes(x=education,y=value,fill=variable))+geom_bar(stat = 'identity',position = 'stack')+theme_bw()+scale_fill_manual(values = c('red','green'))+theme(axis.text.x = element_text(angle = 45, hjust = 1))+ggtitle('Proportions of above-paid within different education level')
tbl <- tableGrob(t(table), rows=NULL)
grid.arrange(tbl, gg,
nrow=2,
as.table=TRUE,
heights=c(1,4))
colnames(df)[13]<-'Hours'
gg<-qplot(Hours, data=df, geom="histogram")+theme_bw()+ggtitle('Histogram of Working Hours')
gg
|
/predicting income/untitled.R
|
no_license
|
moodameghana/Predicting_Income
|
R
| false
| false
| 1,971
|
r
|
library(dplyr)
library(stats)
library(sqldf)
library(ggplot2)
library(reshape2)
library(gridExtra)
df<-read.csv("C:/Users/Mooda Meghana/Downloads/adult.csv")
summary(df)
df$income<-ifelse(df$income=='>50K',1,0)
df$workclass<-ifelse(df$workclass=='?','Unknown',as.character(df$workclass))
Work_class<-sqldf('SELECT workclass, count(workclass) as Count
,sum(income) as Above from df group by workclass')
table<-data.frame(Class=Work_class$workclass, Proportion=Work_class$Above/Work_class$Count)
Work_class$Below<-Work_class$Count-Work_class$Above
Work_class<-Work_class[,c(1,3,4)]
Workclass<-melt(Work_class,id.vars = 'workclass')
gg<-ggplot(Workclass,aes(x=workclass,y=value,fill=variable))+geom_bar(stat = 'identity',position = 'stack')+theme_bw()+scale_fill_manual(values = c('red','green'))+theme(axis.text.x = element_text(angle = 45, hjust = 1))+ggtitle('Proportions of above-paid within different classes')
tbl <- tableGrob(t(table), rows=NULL)
grid.arrange(tbl, gg,
nrow=2,
as.table=TRUE,
heights=c(1,4))
education<-sqldf('SELECT education, count(education) as Count
,sum(income) as Above from df group by education')
education$Below<-education$Count-education$Above
table<-data.frame(Class=education$education, Proportion=education$Above/education$Count)
education<-education[,c(1,3,4)]
edu<-melt(education,id.vars = 'education')
gg<-ggplot(edu,aes(x=education,y=value,fill=variable))+geom_bar(stat = 'identity',position = 'stack')+theme_bw()+scale_fill_manual(values = c('red','green'))+theme(axis.text.x = element_text(angle = 45, hjust = 1))+ggtitle('Proportions of above-paid within different education level')
tbl <- tableGrob(t(table), rows=NULL)
grid.arrange(tbl, gg,
nrow=2,
as.table=TRUE,
heights=c(1,4))
colnames(df)[13]<-'Hours'
gg<-qplot(Hours, data=df, geom="histogram")+theme_bw()+ggtitle('Histogram of Working Hours')
gg
|
library(ggplot2)
library(zoo)
library(scales)
rm(list = ls())
load("DataSet.Rda")
RawData$Date <- as.Date(RawData$Date)
LastDate <- max(RawData$Date, na.rm=TRUE)
Last6mRaw <- RawData[RawData$Date >
as.Date(as.yearmon(LastDate) - 0.5, frac = 1) &
(!is.na(RawData$CCYExposure)),]
Last6mRaw$FundCode <- factor(Last6mRaw$FundCode)
FundsNo <- levels(Last6mRaw$FundCode)
FundCounts <- as.data.frame(sapply(split(Last6mRaw$FundCode, Last6mRaw$Date),
function(x) length(unique(x))))
FundDatesCounts <- as.data.frame(sapply(split(Last6mRaw$Date, Last6mRaw$FundCode),
function(x) length(unique(x))))
Ranges <- as.data.frame(t(sapply(split(Last6mRaw$CCYExposure, Last6mRaw$FundCode),
range)))
colnames(Ranges) <- c("Min", "Max")
Ranges$Range <- Ranges$Max-Ranges$Min
FOUNDsub <- which(rownames(Ranges) %in%
subset(rownames(Ranges),
subset = grepl(glob2rx("FOUND*"),
rownames(Ranges))))
threshold <- 0.1
FundsOverThresh <- Ranges[Ranges$Range > threshold &
!rownames(Ranges) %in% rownames(Ranges)[FOUNDsub],]
FundsOverThresh <- FundsOverThresh[order(-FundsOverThresh$Range),]
PltsData <- Last6mRaw[Last6mRaw$FundCode %in% rownames(FundsOverThresh),]
PltsData$FundCode <- factor(PltsData$FundCode)
levels(PltsData$FundCode)
ggplot()+
geom_line(data=PltsData,
aes(y=CCYExposure, x = Date, group=FundCode),
stat= "identity") +
facet_grid(FundCode ~ .) +
scale_y_continuous(labels =percent)
|
/PlotHiRange.R
|
no_license
|
mcastagnaa/CCYexp
|
R
| false
| false
| 1,700
|
r
|
library(ggplot2)
library(zoo)
library(scales)
rm(list = ls())
load("DataSet.Rda")
RawData$Date <- as.Date(RawData$Date)
LastDate <- max(RawData$Date, na.rm=TRUE)
Last6mRaw <- RawData[RawData$Date >
as.Date(as.yearmon(LastDate) - 0.5, frac = 1) &
(!is.na(RawData$CCYExposure)),]
Last6mRaw$FundCode <- factor(Last6mRaw$FundCode)
FundsNo <- levels(Last6mRaw$FundCode)
FundCounts <- as.data.frame(sapply(split(Last6mRaw$FundCode, Last6mRaw$Date),
function(x) length(unique(x))))
FundDatesCounts <- as.data.frame(sapply(split(Last6mRaw$Date, Last6mRaw$FundCode),
function(x) length(unique(x))))
Ranges <- as.data.frame(t(sapply(split(Last6mRaw$CCYExposure, Last6mRaw$FundCode),
range)))
colnames(Ranges) <- c("Min", "Max")
Ranges$Range <- Ranges$Max-Ranges$Min
FOUNDsub <- which(rownames(Ranges) %in%
subset(rownames(Ranges),
subset = grepl(glob2rx("FOUND*"),
rownames(Ranges))))
threshold <- 0.1
FundsOverThresh <- Ranges[Ranges$Range > threshold &
!rownames(Ranges) %in% rownames(Ranges)[FOUNDsub],]
FundsOverThresh <- FundsOverThresh[order(-FundsOverThresh$Range),]
PltsData <- Last6mRaw[Last6mRaw$FundCode %in% rownames(FundsOverThresh),]
PltsData$FundCode <- factor(PltsData$FundCode)
levels(PltsData$FundCode)
ggplot()+
geom_line(data=PltsData,
aes(y=CCYExposure, x = Date, group=FundCode),
stat= "identity") +
facet_grid(FundCode ~ .) +
scale_y_continuous(labels =percent)
|
library(IONiseR)
library(magrittr)
library(dplyr)
library(ShortRead)
library(rlist)
library(mclust)
library(factoextra)
|
/backend/src/main/resources/r_scripts/loadLibraries.R
|
permissive
|
ambro01/NanoporeQC
|
R
| false
| false
| 119
|
r
|
library(IONiseR)
library(magrittr)
library(dplyr)
library(ShortRead)
library(rlist)
library(mclust)
library(factoextra)
|
#' @rdname proportion.test.onesample.approximate.simple
proportion.test.onesample.approximate <- function(
x
,success.value = 1 #Can be anything that compares with x with ==
,null.hypothesis.proportion = .5
,alternative = c("two.sided", "less", "greater")
,conf.level = .95
,continuity.correction = T
) {
validate.htest.alternative(alternative = alternative)
x <- na.omit(x)
count.success <- length(which(x == success.value))
sample.size <- length(x)
proportion.test.onesample.approximate.simple(
sample.proportion = count.success / sample.size
,sample.size = sample.size
,null.hypothesis.proportion = null.hypothesis.proportion
,alternative = alternative
,conf.level = conf.level
)
}
|
/R/proportion.test.onesample.approximate.R
|
permissive
|
burrm/lolcat
|
R
| false
| false
| 732
|
r
|
#' @rdname proportion.test.onesample.approximate.simple
proportion.test.onesample.approximate <- function(
x
,success.value = 1 #Can be anything that compares with x with ==
,null.hypothesis.proportion = .5
,alternative = c("two.sided", "less", "greater")
,conf.level = .95
,continuity.correction = T
) {
validate.htest.alternative(alternative = alternative)
x <- na.omit(x)
count.success <- length(which(x == success.value))
sample.size <- length(x)
proportion.test.onesample.approximate.simple(
sample.proportion = count.success / sample.size
,sample.size = sample.size
,null.hypothesis.proportion = null.hypothesis.proportion
,alternative = alternative
,conf.level = conf.level
)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pecan-functions.R
\name{arrhenius.scaling}
\alias{arrhenius.scaling}
\title{Scale temperature dependent trait from measurement temperature to reference temperature}
\usage{
arrhenius.scaling(observed.value, old.temp, new.temp = 25)
}
\arguments{
\item{observed.value}{observed value of temperature dependent trait, e.g. Vcmax, root respiration rate}
\item{old.temp}{temperature at which measurement was taken or previously scaled to}
\item{new.temp}{temperature to be scaled to, default = 25 C}
}
\value{
numeric value at reference temperature
}
\description{
Scale temperature dependent trait from measurement temperature to reference temperature
}
|
/man/arrhenius.scaling.Rd
|
permissive
|
ashiklom/fortebaseline
|
R
| false
| true
| 730
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pecan-functions.R
\name{arrhenius.scaling}
\alias{arrhenius.scaling}
\title{Scale temperature dependent trait from measurement temperature to reference temperature}
\usage{
arrhenius.scaling(observed.value, old.temp, new.temp = 25)
}
\arguments{
\item{observed.value}{observed value of temperature dependent trait, e.g. Vcmax, root respiration rate}
\item{old.temp}{temperature at which measurement was taken or previously scaled to}
\item{new.temp}{temperature to be scaled to, default = 25 C}
}
\value{
numeric value at reference temperature
}
\description{
Scale temperature dependent trait from measurement temperature to reference temperature
}
|
require(POT) # perform stastical analyses of peaks over a threshold (POT)
weather.df <- read.csv("/home/christopher/git/AnalysisOfSpatioTemporalData/data/data/environment/weather-dwd-2667/weather-dwd-10091.csv")
colnames(weather.df) <- c("datum", "qual", "bedeckung", "relfeuchte", "dampfdruck", "ltemp", "ldruck", "wges",
"temp_boden", "ltemp_min", "ltemp_max", "wind_max", "ndschlag_typ", "ndschlag_hoehe", "sonne",
"schnee")
weather.df$datum <- as.Date(as.character(weather.df$datum), format="%Y%m%d")
# eleminate empty values
weather.df$bedeckung[weather.df$bedeckung == -999.0] <- NA
weather.df$relfeuchte[weather.df$relfeuchte == -999.0] <- NA
weather.df$dampfdruck[weather.df$dampfdruck == -999.0] <- NA
weather.df$ltemp[weather.df$ltemp == -999.0] <- NA
weather.df$ldruck[weather.df$ldruck == -999.0] <- NA
weather.df$wges[weather.df$wges == -999.0] <- NA
weather.df$temp_boden[weather.df$temp_boden == -999.0] <- NA
weather.df$ltemp_min[weather.df$ltemp_min == -999.0] <- NA
weather.df$ltemp_max[weather.df$ltemp_max == -999.0] <- NA
weather.df$wind_max[weather.df$wind_max == -999.0] <- NA
weather.df$ndschlag_typ[weather.df$ndschlag_typ == -999.0] <- NA
weather.df$ndschlag_hoehe[weather.df$ndschlag_hoehe == -999.0] <- NA
weather.df$sonne[weather.df$sonne == -999.0] <- NA
weather.df$schnee[weather.df$schnee == -999.0] <- NA
# create a data frame that can be handeled by the "clust" function
wind.df.clust <- data.frame(time = weather.df$datum, obs = weather.df$wind_max)
# Threshold selection
# As this is a time series, independent events above a threshold must be selected. First, a relatively
# low threshold is passed to consider more events. Some of them are not extreme but regular events. This is
# necessary to select a reasonable threshold for the asymptotic approximation by a GPD.
# tim.cond - A preliminary study (Wernli et al., 2002) showed that two maximum wind events
# can be considered independent if they do not lie within a 3 day window.
# wind.df.max only retrieves independent values above threshold "u" by invoking clust() function
wind.df.clust.max.5 <- clust(na.omit(wind.df.clust), u=5, tim.cond = 3/365, clust.max = TRUE, plot = FALSE)
par(mfrow = c(2, 2))
mrlplot(wind.df.clust.max.5[, "obs"])
abline(v = 20, col = "red")
diplot(wind.df.clust.max.5)
abline(v = 20, col = "red")
tcplot(wind.df.clust.max.5[, "obs"], which = 1)
abline(v = 20, col = "red")
tcplot(wind.df.clust.max.5[, "obs"], which = 2)
abline(v = 20, col = "red")
wind.df.clust.max.15 <- clust(na.omit(wind.df.clust), u=25, tim.cond = 3/365, clust.max = TRUE, plot = TRUE)
# We can now define the mean number of events per year “npy”
npy <- length(wind.df.clust.max.15[, "obs"])/(diff((range(as.numeric(wind.df.clust[, "time"]), na.rm = TRUE)))/356)
mle <- fitgpd(wind.df.clust.max.15[, "obs"], thresh = 25)
par(mfrow = c(2, 2))
plot(mle, npy = npy, which=4)
loc = 25
# Return the estimated return period for wind gusts up to 25 m/s
prob <- pgpd(25, loc = loc, scale = mle$param["scale"], shape=mle$param["shape"])
prob2rp(prob, npy = npy)
# Return the estimated return period for wind gusts up to 30 m/s
prob <- pgpd(30, loc = loc, scale = mle$param["scale"], shape=mle$param["shape"])
prob2rp(prob, npy = npy)
# Return the estimated return period for wind gusts up to 35 m/s
prob <- pgpd(35, loc = loc, scale = mle$param["scale"], shape=mle$param["shape"])
prob2rp(prob, npy = npy)
# Return the estimated return period for the maximum observed wind gusts
prob <- pgpd(max(wind.df.clust.max.15[, "obs"]), loc = loc, scale = mle$param["scale"], shape=mle$param["shape"])
prob2rp(prob, npy = npy)
|
/Wather_Station_Analysis/windmax_10091.R
|
no_license
|
ChristopherStephan/AnalysisOfSpatioTemporalData
|
R
| false
| false
| 3,676
|
r
|
require(POT) # perform stastical analyses of peaks over a threshold (POT)
weather.df <- read.csv("/home/christopher/git/AnalysisOfSpatioTemporalData/data/data/environment/weather-dwd-2667/weather-dwd-10091.csv")
colnames(weather.df) <- c("datum", "qual", "bedeckung", "relfeuchte", "dampfdruck", "ltemp", "ldruck", "wges",
"temp_boden", "ltemp_min", "ltemp_max", "wind_max", "ndschlag_typ", "ndschlag_hoehe", "sonne",
"schnee")
weather.df$datum <- as.Date(as.character(weather.df$datum), format="%Y%m%d")
# eleminate empty values
weather.df$bedeckung[weather.df$bedeckung == -999.0] <- NA
weather.df$relfeuchte[weather.df$relfeuchte == -999.0] <- NA
weather.df$dampfdruck[weather.df$dampfdruck == -999.0] <- NA
weather.df$ltemp[weather.df$ltemp == -999.0] <- NA
weather.df$ldruck[weather.df$ldruck == -999.0] <- NA
weather.df$wges[weather.df$wges == -999.0] <- NA
weather.df$temp_boden[weather.df$temp_boden == -999.0] <- NA
weather.df$ltemp_min[weather.df$ltemp_min == -999.0] <- NA
weather.df$ltemp_max[weather.df$ltemp_max == -999.0] <- NA
weather.df$wind_max[weather.df$wind_max == -999.0] <- NA
weather.df$ndschlag_typ[weather.df$ndschlag_typ == -999.0] <- NA
weather.df$ndschlag_hoehe[weather.df$ndschlag_hoehe == -999.0] <- NA
weather.df$sonne[weather.df$sonne == -999.0] <- NA
weather.df$schnee[weather.df$schnee == -999.0] <- NA
# create a data frame that can be handeled by the "clust" function
wind.df.clust <- data.frame(time = weather.df$datum, obs = weather.df$wind_max)
# Threshold selection
# As this is a time series, independent events above a threshold must be selected. First, a relatively
# low threshold is passed to consider more events. Some of them are not extreme but regular events. This is
# necessary to select a reasonable threshold for the asymptotic approximation by a GPD.
# tim.cond - A preliminary study (Wernli et al., 2002) showed that two maximum wind events
# can be considered independent if they do not lie within a 3 day window.
# wind.df.max only retrieves independent values above threshold "u" by invoking clust() function
wind.df.clust.max.5 <- clust(na.omit(wind.df.clust), u=5, tim.cond = 3/365, clust.max = TRUE, plot = FALSE)
par(mfrow = c(2, 2))
mrlplot(wind.df.clust.max.5[, "obs"])
abline(v = 20, col = "red")
diplot(wind.df.clust.max.5)
abline(v = 20, col = "red")
tcplot(wind.df.clust.max.5[, "obs"], which = 1)
abline(v = 20, col = "red")
tcplot(wind.df.clust.max.5[, "obs"], which = 2)
abline(v = 20, col = "red")
wind.df.clust.max.15 <- clust(na.omit(wind.df.clust), u=25, tim.cond = 3/365, clust.max = TRUE, plot = TRUE)
# We can now define the mean number of events per year “npy”
npy <- length(wind.df.clust.max.15[, "obs"])/(diff((range(as.numeric(wind.df.clust[, "time"]), na.rm = TRUE)))/356)
mle <- fitgpd(wind.df.clust.max.15[, "obs"], thresh = 25)
par(mfrow = c(2, 2))
plot(mle, npy = npy, which=4)
loc = 25
# Return the estimated return period for wind gusts up to 25 m/s
prob <- pgpd(25, loc = loc, scale = mle$param["scale"], shape=mle$param["shape"])
prob2rp(prob, npy = npy)
# Return the estimated return period for wind gusts up to 30 m/s
prob <- pgpd(30, loc = loc, scale = mle$param["scale"], shape=mle$param["shape"])
prob2rp(prob, npy = npy)
# Return the estimated return period for wind gusts up to 35 m/s
prob <- pgpd(35, loc = loc, scale = mle$param["scale"], shape=mle$param["shape"])
prob2rp(prob, npy = npy)
# Return the estimated return period for the maximum observed wind gusts
prob <- pgpd(max(wind.df.clust.max.15[, "obs"]), loc = loc, scale = mle$param["scale"], shape=mle$param["shape"])
prob2rp(prob, npy = npy)
|
write.dir = "./"
user.wd <- "../" #"~/work/ICES/MGWG/SS_vs_SCAA/R/ccgomyt/"
user.od <- write.dir
model.id <- "Pollock"
ices.id = "POLLOCK_"
Fbar.ages = 5:7
library(TMB)
library(wham)
library(dplyr)
library(tidyr)
source("../../helper_code/convert_ICES_to_ASAP.r")
source("../../helper_code/wham_tab1.r")
source("../../helper_code/wham_tab2.r")
source("../../helper_code/wham_predict_index.r")
source("../../helper_code/wham_write_readme.r")
source("../../helper_code/wham_make_model_input.r")
source("../../helper_code/wham_output_sdrep.r")
# convert Lowestoft input files to vanilla ASAP
ICES2ASAP(user.wd, user.od, model.id = model.id, ices.id= ices.id)
asap3 = read_asap3_dat(paste0("ASAP_", model.id,".dat"))
file.remove(paste0("ASAP_", model.id,".dat"))
x = prepare_wham_input(asap3, model_name = model.id)
x$data$Fbar_ages = Fbar.ages
age.specific = 2:3
not.age.specific = (1:x$data$n_selblocks)[-age.specific]
x = set_age_sel0(x, age.specific)
x$par$logit_selpars[not.age.specific,c(1:x$data$n_ages,x$data$n_ages + 3:6)] = Inf
x$par$logit_selpars[3,1:9] = Inf
x$par$logit_selpars[2,9] = Inf
x$map$logit_selpars = matrix(x$map$logit_selpars, x$data$n_selblocks, x$data$n_ages + 6)
x$map$logit_selpars[is.infinite(x$par$logit_selpars)] = NA
x$map$logit_selpars[!is.infinite(x$par$logit_selpars)] = 1:sum(!is.infinite(x$par$logit_selpars))
x$map$logit_selpars = factor(x$map$logit_selpars)
base = x
#SCAA, but with random effects for recruitment and index observation error variances fixed
m1 <- fit_wham(make_m1())
#Like m1, but change age comp likelihoods to logistic normal
m2 <- fit_wham(make_m2())
#full state-space model, abundance is the state vector
m3 <- fit_wham(make_m3())
#Like m3, but change age comp likelihoods to logistic normal
m4 <- fit_wham(make_m4())
res <- compare_wham_models(list(m1=m1, m2=m2, m3=m3, m4=m4), fname="model_compare", sort = FALSE)
save(m1,m2,m3,m4, file = "fits.RData")
#write out standard errors
wham_output_sdrep()
#3-year projection for best model
wham_predict_index()
#Describe what we did for model 4
best = "m4"
wham_write_readme()
|
/state-space/Pollock/WHAM/fit_wham_models.r
|
no_license
|
ices-eg/wg_MGWG
|
R
| false
| false
| 2,089
|
r
|
write.dir = "./"
user.wd <- "../" #"~/work/ICES/MGWG/SS_vs_SCAA/R/ccgomyt/"
user.od <- write.dir
model.id <- "Pollock"
ices.id = "POLLOCK_"
Fbar.ages = 5:7
library(TMB)
library(wham)
library(dplyr)
library(tidyr)
source("../../helper_code/convert_ICES_to_ASAP.r")
source("../../helper_code/wham_tab1.r")
source("../../helper_code/wham_tab2.r")
source("../../helper_code/wham_predict_index.r")
source("../../helper_code/wham_write_readme.r")
source("../../helper_code/wham_make_model_input.r")
source("../../helper_code/wham_output_sdrep.r")
# convert Lowestoft input files to vanilla ASAP
ICES2ASAP(user.wd, user.od, model.id = model.id, ices.id= ices.id)
asap3 = read_asap3_dat(paste0("ASAP_", model.id,".dat"))
file.remove(paste0("ASAP_", model.id,".dat"))
x = prepare_wham_input(asap3, model_name = model.id)
x$data$Fbar_ages = Fbar.ages
age.specific = 2:3
not.age.specific = (1:x$data$n_selblocks)[-age.specific]
x = set_age_sel0(x, age.specific)
x$par$logit_selpars[not.age.specific,c(1:x$data$n_ages,x$data$n_ages + 3:6)] = Inf
x$par$logit_selpars[3,1:9] = Inf
x$par$logit_selpars[2,9] = Inf
x$map$logit_selpars = matrix(x$map$logit_selpars, x$data$n_selblocks, x$data$n_ages + 6)
x$map$logit_selpars[is.infinite(x$par$logit_selpars)] = NA
x$map$logit_selpars[!is.infinite(x$par$logit_selpars)] = 1:sum(!is.infinite(x$par$logit_selpars))
x$map$logit_selpars = factor(x$map$logit_selpars)
base = x
#SCAA, but with random effects for recruitment and index observation error variances fixed
m1 <- fit_wham(make_m1())
#Like m1, but change age comp likelihoods to logistic normal
m2 <- fit_wham(make_m2())
#full state-space model, abundance is the state vector
m3 <- fit_wham(make_m3())
#Like m3, but change age comp likelihoods to logistic normal
m4 <- fit_wham(make_m4())
res <- compare_wham_models(list(m1=m1, m2=m2, m3=m3, m4=m4), fname="model_compare", sort = FALSE)
save(m1,m2,m3,m4, file = "fits.RData")
#write out standard errors
wham_output_sdrep()
#3-year projection for best model
wham_predict_index()
#Describe what we did for model 4
best = "m4"
wham_write_readme()
|
olx_data <- read.csv(file = "Detail_info_Add.csv", header = TRUE, sep = ",")
new_mobile_data <- read.csv(file = "new_mobile_phones.csv", header = TRUE, sep = ",")
for (i in 1: length(new_mobile_data$Name)) {
patterns <- strsplit(as.character(new_mobile_data$Name[i]), " ")[[1]]
for (j in 1: length(patterns)) {
results <- grep(patterns[j], olx_data$Product_Title,
value = TRUE, ignore.case = TRUE)
}
}
results <- unique(results)
for (i in 1: length(results)) {
title <- results[i]
price <- trimws(get_price(olx_data, results[i]))
brand <- trimws(get_brand(olx_data, results[i]))
dfrm <- data.frame(Title = title, Old_Price = price, Brand = brand)
if (i == 1) {
write.table(dfrm, file = "Mapping.csv", sep = ",", row.names = FALSE)
} else { write.table(dfrm, file = "Mapping.csv", sep = ",", append = TRUE,
col.names = FALSE, row.names = FALSE)
}
}
get_brand <- function(data, title) {
for (i in 1: length(data$Product_Title)) {
if (data$Product_Title[i] == title)
return (data$Brand[i])
}
}
get_price <- function(data, title){
for (i in 1: length(data$Product_Title)) {
if (data$Product_Title[i] == title)
return (data$Price[i])
}
}
|
/Assignment # 2/map.R
|
no_license
|
Waleedhafiz/Project-and-All-Assignments
|
R
| false
| false
| 1,239
|
r
|
olx_data <- read.csv(file = "Detail_info_Add.csv", header = TRUE, sep = ",")
new_mobile_data <- read.csv(file = "new_mobile_phones.csv", header = TRUE, sep = ",")
for (i in 1: length(new_mobile_data$Name)) {
patterns <- strsplit(as.character(new_mobile_data$Name[i]), " ")[[1]]
for (j in 1: length(patterns)) {
results <- grep(patterns[j], olx_data$Product_Title,
value = TRUE, ignore.case = TRUE)
}
}
results <- unique(results)
for (i in 1: length(results)) {
title <- results[i]
price <- trimws(get_price(olx_data, results[i]))
brand <- trimws(get_brand(olx_data, results[i]))
dfrm <- data.frame(Title = title, Old_Price = price, Brand = brand)
if (i == 1) {
write.table(dfrm, file = "Mapping.csv", sep = ",", row.names = FALSE)
} else { write.table(dfrm, file = "Mapping.csv", sep = ",", append = TRUE,
col.names = FALSE, row.names = FALSE)
}
}
get_brand <- function(data, title) {
for (i in 1: length(data$Product_Title)) {
if (data$Product_Title[i] == title)
return (data$Brand[i])
}
}
get_price <- function(data, title){
for (i in 1: length(data$Product_Title)) {
if (data$Product_Title[i] == title)
return (data$Price[i])
}
}
|
lyon_validation_t1_rf <- read.table('Lyon_validations_cal_meteo_t1.csv', sep=';', header=TRUE)
lyon_validation_t1_rf$JVScolaire <- factor(lyon_validation_t1_rf$JVScolaire)
lyon_validation_t1_rf$JFerie <- factor(lyon_validation_t1_rf$JFerie)
lyon_validation_t1_rf$JGreve <- factor(lyon_validation_t1_rf$JGreve)
lyon_validation_t1_rf$arret <- factor(lyon_validation_t1_rf$arret)
lyon_validation_t1_rf$SEMESTRE <- factor(lyon_validation_t1_rf$SEMESTRE)
lyon_validation_t1_rf$TRIMESTRE <- factor(lyon_validation_t1_rf$TRIMESTRE)
lyon_validation_t1_rf$controle <- factor(lyon_validation_t1_rf$nombre_de_controles>0)
lyon_validation_t1_rf$jour_num <- factor(substr(lyon_validation_t1_rf$date,1,2))
lyon_validation_t1_rf[lyon_validation_t1_rf$taux_de_nonvalidation<0,]$taux_de_nonvalidation= NA
lyon_validation_t1_rf <-lyon_validation_t1_rf[!is.na(lyon_validation_t1_rf$taux_de_nonvalidation),]
levels(lyon_validation_t1_rf$meteo) = c("non_renseigne", "brouillard", "non_renseigne", "nuageux", "pluie", "pluie_legere", "temps_degrade")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# RF
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
require('randomForest')
N <- dim(lyon_validation_t1_rf)[1]
train_size <- ceiling(N*.7)
lyon_validation_t1_rf <- lyon_validation_t1_rf[sample(N), ]
df_train <- lyon_validation_t1_rf[1:train_size, ]
df_test <- lyon_validation_t1_rf[(train_size+1):N, ]
rf_mod1 <- randomForest(taux_de_nonvalidation ~ 1 + periode + arret + controle + SEMESTRE
+ TRIMESTRE + LIBELLE_MOIS + LIBELLE_JOUR + jour_num + JVScolaire + JFerie + JGreve + meteo,
importance=TRUE,
data=df_train, ntree=100)
# for filtering nights
night <- c("00:00 - 00:29", "00:30 - 00:59", "01:00 - 01:29", "01:30 - 01:59",
"02:00 - 02:29", "02:30 - 02:59", "03:00 - 03:29","03:30 - 03:59",
"04:00 - 04:29", "04:30 - 04:59")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
|
/R/RandomForest.R
|
no_license
|
ndjido/Data-Analysis-code-example
|
R
| false
| false
| 1,953
|
r
|
lyon_validation_t1_rf <- read.table('Lyon_validations_cal_meteo_t1.csv', sep=';', header=TRUE)
lyon_validation_t1_rf$JVScolaire <- factor(lyon_validation_t1_rf$JVScolaire)
lyon_validation_t1_rf$JFerie <- factor(lyon_validation_t1_rf$JFerie)
lyon_validation_t1_rf$JGreve <- factor(lyon_validation_t1_rf$JGreve)
lyon_validation_t1_rf$arret <- factor(lyon_validation_t1_rf$arret)
lyon_validation_t1_rf$SEMESTRE <- factor(lyon_validation_t1_rf$SEMESTRE)
lyon_validation_t1_rf$TRIMESTRE <- factor(lyon_validation_t1_rf$TRIMESTRE)
lyon_validation_t1_rf$controle <- factor(lyon_validation_t1_rf$nombre_de_controles>0)
lyon_validation_t1_rf$jour_num <- factor(substr(lyon_validation_t1_rf$date,1,2))
lyon_validation_t1_rf[lyon_validation_t1_rf$taux_de_nonvalidation<0,]$taux_de_nonvalidation= NA
lyon_validation_t1_rf <-lyon_validation_t1_rf[!is.na(lyon_validation_t1_rf$taux_de_nonvalidation),]
levels(lyon_validation_t1_rf$meteo) = c("non_renseigne", "brouillard", "non_renseigne", "nuageux", "pluie", "pluie_legere", "temps_degrade")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# RF
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
require('randomForest')
N <- dim(lyon_validation_t1_rf)[1]
train_size <- ceiling(N*.7)
lyon_validation_t1_rf <- lyon_validation_t1_rf[sample(N), ]
df_train <- lyon_validation_t1_rf[1:train_size, ]
df_test <- lyon_validation_t1_rf[(train_size+1):N, ]
rf_mod1 <- randomForest(taux_de_nonvalidation ~ 1 + periode + arret + controle + SEMESTRE
+ TRIMESTRE + LIBELLE_MOIS + LIBELLE_JOUR + jour_num + JVScolaire + JFerie + JGreve + meteo,
importance=TRUE,
data=df_train, ntree=100)
# for filtering nights
night <- c("00:00 - 00:29", "00:30 - 00:59", "01:00 - 01:29", "01:30 - 01:59",
"02:00 - 02:29", "02:30 - 02:59", "03:00 - 03:29","03:30 - 03:59",
"04:00 - 04:29", "04:30 - 04:59")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
|
library(datasets)
data("iris")
str(iris)
mean(iris[iris$Species == 'virginica',]$Sepal.Length)
with(iris, mean(Sepal.Length[Species == 'virginica']))
data("mtcars")
str(mtcars)
sapply(split(mtcars$mpg, mtcars$cyl), mean)
with(mtcars, tapply(mpg, cyl, mean))
tapply(mtcars$mpg, mtcars$cyl, mean)
abs(mean(mtcars[mtcars$cyl == 4,]$hp) - mean(mtcars[mtcars$cyl == 8,]$hp))
with(mtcars, abs(mean(hp[cyl == 4]) - mean(hp[cyl == 8])))
|
/rprog-032/quiz3.R
|
no_license
|
wangjiezhe/cousera
|
R
| false
| false
| 431
|
r
|
library(datasets)
data("iris")
str(iris)
mean(iris[iris$Species == 'virginica',]$Sepal.Length)
with(iris, mean(Sepal.Length[Species == 'virginica']))
data("mtcars")
str(mtcars)
sapply(split(mtcars$mpg, mtcars$cyl), mean)
with(mtcars, tapply(mpg, cyl, mean))
tapply(mtcars$mpg, mtcars$cyl, mean)
abs(mean(mtcars[mtcars$cyl == 4,]$hp) - mean(mtcars[mtcars$cyl == 8,]$hp))
with(mtcars, abs(mean(hp[cyl == 4]) - mean(hp[cyl == 8])))
|
#' \code{Ri2b2casecontrol} package
#'
#' i2b2 R package to run case-control analysis
#'
#' See the README on
#' \href{https://gitlab.partners.org/vc070/Ri2b2casecontrol}{GitHub}
#'
#' @docType package
#' @name Ri2b2casecontrol
#' @importFrom dplyr %>%
NULL
## quiets concerns of R CMD check re: the .'s that appear in pipelines
if(getRversion() >= "2.15.1") utils::globalVariables(
c(".",
"RACE_CD", "SEX_CD", "age_at_index", "birth_date", "cohort", "concept_cd", "concept_date",
"concept_order", "drug_date", "index_date", "index_year", "k2k_control",
"match_strata", "matched","min_drug_order","patient_num","riskWindow_end",
"riskWindow_start","visit_date","visit_year","visit_year.x", "visit_year.y",
"visits_in_window")
)
|
/R/Ri2b2casecontrol.R
|
no_license
|
vcastro/Ri2b2casecontrol
|
R
| false
| false
| 741
|
r
|
#' \code{Ri2b2casecontrol} package
#'
#' i2b2 R package to run case-control analysis
#'
#' See the README on
#' \href{https://gitlab.partners.org/vc070/Ri2b2casecontrol}{GitHub}
#'
#' @docType package
#' @name Ri2b2casecontrol
#' @importFrom dplyr %>%
NULL
## quiets concerns of R CMD check re: the .'s that appear in pipelines
if(getRversion() >= "2.15.1") utils::globalVariables(
c(".",
"RACE_CD", "SEX_CD", "age_at_index", "birth_date", "cohort", "concept_cd", "concept_date",
"concept_order", "drug_date", "index_date", "index_year", "k2k_control",
"match_strata", "matched","min_drug_order","patient_num","riskWindow_end",
"riskWindow_start","visit_date","visit_year","visit_year.x", "visit_year.y",
"visits_in_window")
)
|
# Generated by Target Markdown in targets 0.4.2.9000: do not edit by hand
library(targets)
lapply(
list.files(
"_targets_r/globals",
pattern = "\\.R$",
full.names = TRUE
),
source
)
lapply(
list.files(
"_targets_r/targets",
pattern = "\\.R$",
full.names = TRUE
),
function(x) source(x)$value
)
|
/LDM/_targets.R
|
no_license
|
brownag/sANDREWbox
|
R
| false
| false
| 330
|
r
|
# Generated by Target Markdown in targets 0.4.2.9000: do not edit by hand
library(targets)
lapply(
list.files(
"_targets_r/globals",
pattern = "\\.R$",
full.names = TRUE
),
source
)
lapply(
list.files(
"_targets_r/targets",
pattern = "\\.R$",
full.names = TRUE
),
function(x) source(x)$value
)
|
fuelOctaneRatings <- c(
88.5, 94.7, 84.3, 90.1, 89.0, 89.8, 91.6, 90.3,
90.0, 91.5, 89.9, 98.8, 88.3, 90.4, 91.2, 90.6,
92.2, 87.7, 91.1, 86.7, 93.4, 96.1, 89.6, 90.4,
91.6, 90.7, 88.6, 88.3, 94.2, 85.3, 90.1, 89.3,
91.1, 92.2, 83.4, 91.0, 88.2, 88.5, 93.3, 87.4,
91.1, 90.5, 100.3, 87.6, 92.7, 87.9, 93.0, 94.4,
90.4, 91.2, 86.7, 94.2, 90.8, 90.1, 91.8, 88.4,
92.6, 93.7, 96.5, 84.3, 93.2, 88.6, 88.7, 92.7,
89.3, 91.0, 87.5, 87.8, 88.3, 89.2, 92.3, 88.9,
89.8, 92.7, 93.3, 86.7, 91.0, 90.9, 89.9, 91.8,
89.7, 92.2)
par(mfrow = c(2, 1))
hist(fuelOctaneRatings, breaks = 8, freq = TRUE, main = "Frequency distribution")
# The already given function "hist" is not breaking up the data in the same number that
# is given by the argument "breaks", but it finds the closest appropriate value.
# "breaks = c(<value1>, <value2>, ...)" as a parametar for setting breakpoints manually
# for better control.
hist(fuelOctaneRatings, breaks = 8, freq = FALSE, main = "Histogram")
|
/Chapter 6/Exercise 6.30.r
|
no_license
|
kmahoski/Statistical-Data-Analysis-in-R
|
R
| false
| false
| 981
|
r
|
fuelOctaneRatings <- c(
88.5, 94.7, 84.3, 90.1, 89.0, 89.8, 91.6, 90.3,
90.0, 91.5, 89.9, 98.8, 88.3, 90.4, 91.2, 90.6,
92.2, 87.7, 91.1, 86.7, 93.4, 96.1, 89.6, 90.4,
91.6, 90.7, 88.6, 88.3, 94.2, 85.3, 90.1, 89.3,
91.1, 92.2, 83.4, 91.0, 88.2, 88.5, 93.3, 87.4,
91.1, 90.5, 100.3, 87.6, 92.7, 87.9, 93.0, 94.4,
90.4, 91.2, 86.7, 94.2, 90.8, 90.1, 91.8, 88.4,
92.6, 93.7, 96.5, 84.3, 93.2, 88.6, 88.7, 92.7,
89.3, 91.0, 87.5, 87.8, 88.3, 89.2, 92.3, 88.9,
89.8, 92.7, 93.3, 86.7, 91.0, 90.9, 89.9, 91.8,
89.7, 92.2)
par(mfrow = c(2, 1))
hist(fuelOctaneRatings, breaks = 8, freq = TRUE, main = "Frequency distribution")
# The already given function "hist" is not breaking up the data in the same number that
# is given by the argument "breaks", but it finds the closest appropriate value.
# "breaks = c(<value1>, <value2>, ...)" as a parametar for setting breakpoints manually
# for better control.
hist(fuelOctaneRatings, breaks = 8, freq = FALSE, main = "Histogram")
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 28078
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 28078
c
c Input Parameter (command line, file):
c input filename QBFLIB/Amendola-Ricca-Truszczynski/selection-hard/ctrl.e#1.a#3.E#130.A#48.c#.w#9.s#50.asp.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 9537
c no.of clauses 28078
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 28078
c
c QBFLIB/Amendola-Ricca-Truszczynski/selection-hard/ctrl.e#1.a#3.E#130.A#48.c#.w#9.s#50.asp.qdimacs 9537 28078 E1 [] 0 130 9407 28078 NONE
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Amendola-Ricca-Truszczynski/selection-hard/ctrl.e#1.a#3.E#130.A#48.c#.w#9.s#50.asp/ctrl.e#1.a#3.E#130.A#48.c#.w#9.s#50.asp.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false
| false
| 732
|
r
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 28078
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 28078
c
c Input Parameter (command line, file):
c input filename QBFLIB/Amendola-Ricca-Truszczynski/selection-hard/ctrl.e#1.a#3.E#130.A#48.c#.w#9.s#50.asp.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 9537
c no.of clauses 28078
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 28078
c
c QBFLIB/Amendola-Ricca-Truszczynski/selection-hard/ctrl.e#1.a#3.E#130.A#48.c#.w#9.s#50.asp.qdimacs 9537 28078 E1 [] 0 130 9407 28078 NONE
|
# Copyright {2015} Yuxiang Tan
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#subtract_psl.R
#Modify the psl into its subtract
#First separate the psl into 5' aligned and 3' aligned group, and throw the rest away.
#modify the each group together.
#check arguments
for (e in commandArgs()) {
ta = strsplit(e,"=",fixed=TRUE)
if(! is.na(ta[[1]][2])) {
temp = ta[[1]][2]
if(substr(ta[[1]][1],nchar(ta[[1]][1]),nchar(ta[[1]][1])) == "I") {
temp = as.integer(temp)
}
if(substr(ta[[1]][1],nchar(ta[[1]][1]),nchar(ta[[1]][1])) == "N") {
temp = as.numeric(temp)
}
assign(ta[[1]][1],temp)
} else {
assign(ta[[1]][1],TRUE)
}
}
#check whether file in is exist
if (!exists("file.in")) {
stop("\n\nWarning: file.in is not exist in subtract_psl.R, please check the path, exit. \n\n")
}
#read in the file
ori_psl<-read.csv(file=file.in,sep = "\t",quote="",header=FALSE)
ori_psl_5pi_1<- as.matrix(ori_psl[which(as.numeric(ori_psl[,12])<=5 ),])
if (dim(ori_psl_5pi_1)[2]>0){
if (dim(ori_psl_5pi_1)[2]<=1){
ori_psl_5pi_1<- t(ori_psl_5pi_1)
}
}
ori_psl_5pi<- as.matrix(ori_psl_5pi_1[which(as.numeric(ori_psl_5pi_1[,13])<(as.numeric(read_length)-5)),])
if (dim(ori_psl_5pi)[2]>0){
if (dim(ori_psl_5pi)[2]<=1){
ori_psl_5pi<- t(ori_psl_5pi)
}
}
#ori_psl_5pi[,12]<-as.numeric(ori_psl_5pi[,13])+1
#the reason not to plus 1 is because this file it to generate bed for fastafromBed, and it take 0 as the original start point.
ori_psl_5pi[,12]<-as.numeric(ori_psl_5pi[,13])
ori_psl_5pi[,13]<-read_length
#check whether the end is close to 3'
ori_psl_3pi_1<- as.matrix(ori_psl[which(as.numeric(ori_psl[,13])>=(as.numeric(read_length)-5)),])
if (dim(ori_psl_3pi_1)[2]>0){
if (dim(ori_psl_3pi_1)[2]<=1){
ori_psl_3pi_1<- t(ori_psl_3pi_1)
}
}
ori_psl_3pi<-as.matrix(ori_psl_3pi_1[which(as.numeric(ori_psl_3pi_1[,12])>5),])
if (dim(ori_psl_3pi)[2]>0){
if (dim(ori_psl_3pi)[2]<=1){
ori_psl_3pi<- t(ori_psl_3pi)
}
#ori_psl_3pi[,13]<-as.numeric(ori_psl_3pi[,12])-1
#this is for blat v. 34x13
ori_psl_3pi[,13]<-as.numeric(ori_psl_3pi[,12])
#if for online blat version, it should minus 1, because it will give the true correct start point
ori_psl_3pi[,12]<-0
}
ori_psl_out<-rbind(ori_psl_5pi,ori_psl_3pi)
ori_bed_out<-as.matrix(ori_psl_out[,c(10,12,13,10)])
if (dim(ori_bed_out)[2]==1){
ori_bed_out<- t(ori_bed_out)
}
#output the file
write.table(ori_bed_out,file=`file.out`,quote=FALSE,row.name=FALSE,col.name=FALSE,sep = "\t")
|
/QueryFuse_v1/subtract_psl.R
|
permissive
|
yuxiangtan/QueryFuse
|
R
| false
| false
| 3,078
|
r
|
# Copyright {2015} Yuxiang Tan
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#subtract_psl.R
#Modify the psl into its subtract
#First separate the psl into 5' aligned and 3' aligned group, and throw the rest away.
#modify the each group together.
#check arguments
for (e in commandArgs()) {
ta = strsplit(e,"=",fixed=TRUE)
if(! is.na(ta[[1]][2])) {
temp = ta[[1]][2]
if(substr(ta[[1]][1],nchar(ta[[1]][1]),nchar(ta[[1]][1])) == "I") {
temp = as.integer(temp)
}
if(substr(ta[[1]][1],nchar(ta[[1]][1]),nchar(ta[[1]][1])) == "N") {
temp = as.numeric(temp)
}
assign(ta[[1]][1],temp)
} else {
assign(ta[[1]][1],TRUE)
}
}
#check whether file in is exist
if (!exists("file.in")) {
stop("\n\nWarning: file.in is not exist in subtract_psl.R, please check the path, exit. \n\n")
}
#read in the file
ori_psl<-read.csv(file=file.in,sep = "\t",quote="",header=FALSE)
ori_psl_5pi_1<- as.matrix(ori_psl[which(as.numeric(ori_psl[,12])<=5 ),])
if (dim(ori_psl_5pi_1)[2]>0){
if (dim(ori_psl_5pi_1)[2]<=1){
ori_psl_5pi_1<- t(ori_psl_5pi_1)
}
}
ori_psl_5pi<- as.matrix(ori_psl_5pi_1[which(as.numeric(ori_psl_5pi_1[,13])<(as.numeric(read_length)-5)),])
if (dim(ori_psl_5pi)[2]>0){
if (dim(ori_psl_5pi)[2]<=1){
ori_psl_5pi<- t(ori_psl_5pi)
}
}
#ori_psl_5pi[,12]<-as.numeric(ori_psl_5pi[,13])+1
#the reason not to plus 1 is because this file it to generate bed for fastafromBed, and it take 0 as the original start point.
ori_psl_5pi[,12]<-as.numeric(ori_psl_5pi[,13])
ori_psl_5pi[,13]<-read_length
#check whether the end is close to 3'
ori_psl_3pi_1<- as.matrix(ori_psl[which(as.numeric(ori_psl[,13])>=(as.numeric(read_length)-5)),])
if (dim(ori_psl_3pi_1)[2]>0){
if (dim(ori_psl_3pi_1)[2]<=1){
ori_psl_3pi_1<- t(ori_psl_3pi_1)
}
}
ori_psl_3pi<-as.matrix(ori_psl_3pi_1[which(as.numeric(ori_psl_3pi_1[,12])>5),])
if (dim(ori_psl_3pi)[2]>0){
if (dim(ori_psl_3pi)[2]<=1){
ori_psl_3pi<- t(ori_psl_3pi)
}
#ori_psl_3pi[,13]<-as.numeric(ori_psl_3pi[,12])-1
#this is for blat v. 34x13
ori_psl_3pi[,13]<-as.numeric(ori_psl_3pi[,12])
#if for online blat version, it should minus 1, because it will give the true correct start point
ori_psl_3pi[,12]<-0
}
ori_psl_out<-rbind(ori_psl_5pi,ori_psl_3pi)
ori_bed_out<-as.matrix(ori_psl_out[,c(10,12,13,10)])
if (dim(ori_bed_out)[2]==1){
ori_bed_out<- t(ori_bed_out)
}
#output the file
write.table(ori_bed_out,file=`file.out`,quote=FALSE,row.name=FALSE,col.name=FALSE,sep = "\t")
|
\name{synPrintEntity}
\alias{synPrintEntity}
\docType{methods}
\title{
synPrintEntity
}
\description{
Pretty prints an Entity.
}
\usage{
synPrintEntity(entity, ensure_ascii=TRUE)
}
\arguments{
\item{entity}{ The entity to be printed.\cr
}
\item{ensure_ascii}{ If TRUE, escapes all non-ASCII characters}
}
\examples{
\dontrun{
synPrintEntity("syn123")
}
}
|
/man/synPrintEntity.Rd
|
permissive
|
Sage-Bionetworks/synapser
|
R
| false
| false
| 359
|
rd
|
\name{synPrintEntity}
\alias{synPrintEntity}
\docType{methods}
\title{
synPrintEntity
}
\description{
Pretty prints an Entity.
}
\usage{
synPrintEntity(entity, ensure_ascii=TRUE)
}
\arguments{
\item{entity}{ The entity to be printed.\cr
}
\item{ensure_ascii}{ If TRUE, escapes all non-ASCII characters}
}
\examples{
\dontrun{
synPrintEntity("syn123")
}
}
|
library(tidyverse)
bookings <- read_csv("datasets/bookings.csv")
properties <- read_csv("datasets/properties.csv")
# Your turn 1 ----
# Create a new tibble `x` that
#
# - only contains the columns `room_nights` and `review_score`, and
# - only contains the bookings with a price per night of less than 80.
#
# 1. _...using base `R` functions only._
# 2. _...using the `dplyr` functions `select()` and `filter()`._
# base R
# tidyverse
# Your turn 2 ----
# Use `%>%` to write a pipeline which extracts all bookings with Friday as check-in
# day while only returning the columns `property_id` and `status`.
# Your turn 3 ----
# Write a pipeline which calculates the mean-centered `price_per_night` as new column
# names `centered_ppn`. The returned data frame should only contain this new column.
#
# _You need `mutate()` and `select()`._
# Your turn 4 ----
# Obtain a one-row summary tibble of the bookings data containing the following statistics:
#
# - The number of rows (Hint: use the function `n()`)
# - The number of `"stayed"` bookings
# - The mean of the _total price_ (Hint: you have to compute the total price first)
#
# The output tibble should contain three columns and just one row.
# Your turn 5 ----
# Write a pipeline that yields a tibble containing only the most expensive booking
# of each property. Return only the property id and the price of the most expensive booking.
#
# You should get as many rows as there are unique properties:
#
# ```{r}
# n_distinct(bookings$property_id) # convenience function for `length(unique(bookings$property_id))`
# ```
# 1. Calculate `price_total` using `mutate()`.
# 2. Group by `property_id` using `group_by()`.
# 3. Filter rows where the total price is equal to the maximum of the total price
# (for that property using `filter()`).
# 4. Select the columns `property_id` and `price_total` using `select()`.
# Your turn 6 ----
# Calculate the number of bookings per city.
#
# Hint: Join the tibbles `bookings` and `properties` and _count_ the number of rows per city.
# Your turn 7 ----
# ### Do properties appeal to business travellers and tourists alike?
#
# - Convert the column `for_business` to a factor with the levels `"business"` and `"tourist"`.
# - For each property, calculate the average review score given by business travelers and tourists.
# - Then, calculate the average review score difference between business travelers and tourists.
# Your turn 8 ----
# Actually, we don't need to store the property facilities as tibble, since the column `facility_nr` is redundant.
#
# We change the `facilities` column to a list column, such that each element is a character vector of property facilities.
#
properties_l <- properties %>%
mutate(facilities = strsplit(facilities, ","))
# head(properties_l, 3)
#
# Example:
# properties_l$facilities[[1]]
#
# Add a column `n_features` to `properties` that contains the number of facilities.
#
# Hint: Use the `map_*()` function that returns an integer vector.
# Your turn 9 ----
### _Which factors contribute to the average property review score?_
#
# For **each city**, fit a **linear regression** model<sup>1</sup> to predict a property's **average review score** based on average price per night, number of bookings (stayed or cancelled), and property type. Compare the quality of the three models using $R^2$.
#
# Strategy:
#
# 1. Compute the summary statistics `avg_review` (average review score),
# `n_bookings` (number of bookings) and `avg_price` (average price per night)
# for each property and city.
# 2. For each city, fit a linear regression model (?`lm`) using the formula
# `avg_review ~ property_type + n_bookings + avg_price`. Save these models as new
# column `fit`.
# 3. Extract the $R^2$ value (`r.squared`) from `fit` using the appropriate `map_*` function.
|
/exercises/ex-tidyverse.R
|
no_license
|
stfnrpplngr/DataSciR20
|
R
| false
| false
| 3,969
|
r
|
library(tidyverse)
bookings <- read_csv("datasets/bookings.csv")
properties <- read_csv("datasets/properties.csv")
# Your turn 1 ----
# Create a new tibble `x` that
#
# - only contains the columns `room_nights` and `review_score`, and
# - only contains the bookings with a price per night of less than 80.
#
# 1. _...using base `R` functions only._
# 2. _...using the `dplyr` functions `select()` and `filter()`._
# base R
# tidyverse
# Your turn 2 ----
# Use `%>%` to write a pipeline which extracts all bookings with Friday as check-in
# day while only returning the columns `property_id` and `status`.
# Your turn 3 ----
# Write a pipeline which calculates the mean-centered `price_per_night` as new column
# names `centered_ppn`. The returned data frame should only contain this new column.
#
# _You need `mutate()` and `select()`._
# Your turn 4 ----
# Obtain a one-row summary tibble of the bookings data containing the following statistics:
#
# - The number of rows (Hint: use the function `n()`)
# - The number of `"stayed"` bookings
# - The mean of the _total price_ (Hint: you have to compute the total price first)
#
# The output tibble should contain three columns and just one row.
# Your turn 5 ----
# Write a pipeline that yields a tibble containing only the most expensive booking
# of each property. Return only the property id and the price of the most expensive booking.
#
# You should get as many rows as there are unique properties:
#
# ```{r}
# n_distinct(bookings$property_id) # convenience function for `length(unique(bookings$property_id))`
# ```
# 1. Calculate `price_total` using `mutate()`.
# 2. Group by `property_id` using `group_by()`.
# 3. Filter rows where the total price is equal to the maximum of the total price
# (for that property using `filter()`).
# 4. Select the columns `property_id` and `price_total` using `select()`.
# Your turn 6 ----
# Calculate the number of bookings per city.
#
# Hint: Join the tibbles `bookings` and `properties` and _count_ the number of rows per city.
# Your turn 7 ----
# ### Do properties appeal to business travellers and tourists alike?
#
# - Convert the column `for_business` to a factor with the levels `"business"` and `"tourist"`.
# - For each property, calculate the average review score given by business travelers and tourists.
# - Then, calculate the average review score difference between business travelers and tourists.
# Your turn 8 ----
# Actually, we don't need to store the property facilities as tibble, since the column `facility_nr` is redundant.
#
# We change the `facilities` column to a list column, such that each element is a character vector of property facilities.
#
properties_l <- properties %>%
mutate(facilities = strsplit(facilities, ","))
# head(properties_l, 3)
#
# Example:
# properties_l$facilities[[1]]
#
# Add a column `n_features` to `properties` that contains the number of facilities.
#
# Hint: Use the `map_*()` function that returns an integer vector.
# Your turn 9 ----
### _Which factors contribute to the average property review score?_
#
# For **each city**, fit a **linear regression** model<sup>1</sup> to predict a property's **average review score** based on average price per night, number of bookings (stayed or cancelled), and property type. Compare the quality of the three models using $R^2$.
#
# Strategy:
#
# 1. Compute the summary statistics `avg_review` (average review score),
# `n_bookings` (number of bookings) and `avg_price` (average price per night)
# for each property and city.
# 2. For each city, fit a linear regression model (?`lm`) using the formula
# `avg_review ~ property_type + n_bookings + avg_price`. Save these models as new
# column `fit`.
# 3. Extract the $R^2$ value (`r.squared`) from `fit` using the appropriate `map_*` function.
|
#' Process data for fitting distance sampling detection function
#'
#' Sets up dataframe and does some basic error checking. Adds needed fields to
#' dataframe and to \code{meta.data}.
#'
#' The function does a number of error checking tasks, creating fields and
#' adding to \code{meta.data} including:
#'
#' 1) If \code{check=TRUE}, check to make sure the record structure is okay for
#' mrds data. The number of primary records (observer=1) must equal the number
#' of secondary records (observer=2). Also, a field in the dataframe is created
#' \code{timesseen} which counts the number of times an object was detected
#' 0,1,2; if \code{timesseen=0} then the record is tossed from the analysis.
#' Also if there are differences in the data (distance, size, covariates) for
#' observer 1 and 2 a warning is issued that the analysis may fail. The code
#' assumes these values are the same for both observers.
#'
#' 2) Based on the presence of fields \code{distbegin} and \code{distend}, a
#' determination is made of whether the data analysis should be based on binned
#' distances and a field \code{binned} is created, which is \code{TRUE} if the
#' distance for the observation is binned. By assigning for each observation
#' this allows an analysis of a mixture of binned and unbinned distances.
#'
#' 4) Data are restricted such that distances are not greater than \code{width}
#' and not less than \code{left} if those values are specified in
#' \code{meta.data}. If they are not specified then \code{left} defaults to 0
#' and \code{width} defaults to the largest distance measurement.
#'
#' 5) Determine if an integration range (\code{int.begin} and \code{int.end}
#' has been specified for the observations. If it has, add the structure to
#' \code{meta.data}. The integration range is typically used for aerial
#' surveys in which the altitude varies such that the strip width (left to
#' width) changes with a change in altitude.
#'
#' 6) Fields defined as factors are cleaned up such that any unused levels are
#' eliminated.
#'
#' 7) If the restrictions placed on the data, eliminated all of the data, the
#' function stops with an error message
#'
#' @param data dataframe object
#' @param meta.data meta.data options; see \code{\link{ddf}} for a description
#' @param check if \code{TRUE} check data for errors in the mrds structure; for
#' \code{method="ds" check=FALSE}
#' @return \item{xmat}{processed \code{data.frame} with added fields}
#' \item{meta.data}{meta.data list}
#' @author Jeff Laake
#' @keywords utility
process.data <- function(data,meta.data=list(),check=TRUE){
set.default.width=function(data,meta.data){
# set.default.width - sets default transect width when none was specified
# Arguments:
# data - dataframe
# meta.data - meta.data list
# Values: width of transect
if(meta.data$binned){
width <- max(c(data$distend,data$distance),na.rm=TRUE)
}else{
width <- max(data$distance)
}
return(width)
}
# assign dataframe to data
# Check to make sure the record structure is ok. Number of primary
# records = number of secondary
if(check){
if(length(data$detected[data$observer==1]) !=
length(data$detected[data$observer==2])){
stop("number of records for primary observer not equal to number for secondary observer")
}
}
# Create field which counts the number of times an object was detected 0,1,2
if(check){
timesdetected <- data$detected[data$observer==1] +
data$detected[data$observer==2]
data$timesdetected <- rep(0,dim(data)[1])
data$timesdetected[data$observer==1] <- timesdetected
data$timesdetected[data$observer==2] <- timesdetected
# If any 00 (not detected by either observer), stop and issue error message
if(any(data$timesdetected==0)){
stop("following objects were never detected:",
paste(data$object[data$observer==1&data$timesdetected==0],
collapse=","),"\n")
}
}
# also check for mrds that the data fields have the same value for both
# observers for example same distance, size etc. This is only a warning as
#some fields may be validly different
# if(any(apply(data[data$observer==1,names(data)!="observer"&names(data)!="detected"],1,paste,collapse="")!=
# apply(data[data$observer==2,names(data)!="observer"&names(data)!="detected"],1,paste,collapse="")))
# warning("If analysis fails it may be due to difference in data between observer 1 and 2;\n fields such as distance, size and covariates should be the same")
# Determine if data are binned by presence of distbegin and distend fields
if(is.null(data$distend)|is.null(data$distbegin)){
binned <- FALSE
}else{
if(all(is.null(data$distend))|all(is.null(data$distbegin))){
binned <- FALSE
}else{
if(any(is.null(data$distend) & !is.null(data$distbegin)) |
any(is.null(data$distbegin)&!is.null(data$distend))){
stop("mismatched distance intervals - one or more endpoints are missing")
}else{
binned <- TRUE
}
}
}
if(meta.data$binned & !binned){
stop("binned set to TRUE in meta.data but distbegin and distend fields are missing")
}
if(!meta.data$binned & binned){
warning("data contain distbegin and distend fields but binned=FALSE. Analyzing as not binned",immediate.=TRUE)
binned <- FALSE
}
meta.data$binned <- binned
if(meta.data$binned & is.null(meta.data$breaks)){
stop("breaks must be set in meta.data for binned data")
}
# Fill in distance field for binned observations and create logical variable
data$binned <- rep(FALSE,dim(data)[1])
if(binned){
meta.data$binned <- TRUE
data$distance[!is.na(data$distend)]<-(data$distbegin[!is.na(data$distend)]+
data$distend[!is.na(data$distend)])/2
data$binned[!is.na(data$distbegin)] <- TRUE
}
# Restrict data to width interval
# If no width set, use largest measured distance as width
if(is.na(meta.data$width)){
width <- set.default.width(data,meta.data)
meta.data$width <- width
xmat <- data
warning("no truncation distance specified; using largest observed distance",immediate.=TRUE)
}else{
# change: jll 2 June 05; ref to width changed to meta.data$width
# This piece of code makes sure that the set width is as large as the
# largest bin end point for binned data.
if(meta.data$binned){
if(any(data$binned & data$distend > meta.data$width)){
stop("width must exceed largest interval end point")
}else{
xmat <- data[data$binned |
(!data$binned&data$distance<=meta.data$width),]
}
}else{
xmat <- data[data$distance <= meta.data$width,]
}
}
# Determine if integration range has been specified
if(is.null(xmat$int.begin)|is.null(xmat$int.end)){
if(any(is.na(meta.data$int.range))){
meta.data$int.range <- c(meta.data$left,meta.data$width)
}
}else{
meta.data$int.range <- rbind(c(meta.data$left,meta.data$width),
cbind(xmat$int.begin,xmat$int.end))
}
# If left >0 perform left truncation by restricting values
if(meta.data$left >0){
if(binned){
if(any(data$binned&data$distbegin < meta.data$left)){
stop("left truncation must be smaller than the smallest interval begin point")
}else{
xmat <- data[data$binned|(!data$binned&data$distance>=meta.data$left),]
}
}else{
xmat <- xmat[xmat$distance>=meta.data$left,]
}
}
# Clean up factor levels
b <- dim(xmat)[2]
for(i in 1:b){
if(is.factor(xmat[,i])){
xmat[,i] <- factor (xmat[,i])
}
}
# If the exclusion eliminated all of the data, stop with error message
if(dim(xmat)[1]==0){
stop("no data to analyze")
}
return(list(xmat=xmat,meta.data=meta.data))
}
|
/mrds/R/process.data.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 8,103
|
r
|
#' Process data for fitting distance sampling detection function
#'
#' Sets up dataframe and does some basic error checking. Adds needed fields to
#' dataframe and to \code{meta.data}.
#'
#' The function does a number of error checking tasks, creating fields and
#' adding to \code{meta.data} including:
#'
#' 1) If \code{check=TRUE}, check to make sure the record structure is okay for
#' mrds data. The number of primary records (observer=1) must equal the number
#' of secondary records (observer=2). Also, a field in the dataframe is created
#' \code{timesseen} which counts the number of times an object was detected
#' 0,1,2; if \code{timesseen=0} then the record is tossed from the analysis.
#' Also if there are differences in the data (distance, size, covariates) for
#' observer 1 and 2 a warning is issued that the analysis may fail. The code
#' assumes these values are the same for both observers.
#'
#' 2) Based on the presence of fields \code{distbegin} and \code{distend}, a
#' determination is made of whether the data analysis should be based on binned
#' distances and a field \code{binned} is created, which is \code{TRUE} if the
#' distance for the observation is binned. By assigning for each observation
#' this allows an analysis of a mixture of binned and unbinned distances.
#'
#' 4) Data are restricted such that distances are not greater than \code{width}
#' and not less than \code{left} if those values are specified in
#' \code{meta.data}. If they are not specified then \code{left} defaults to 0
#' and \code{width} defaults to the largest distance measurement.
#'
#' 5) Determine if an integration range (\code{int.begin} and \code{int.end}
#' has been specified for the observations. If it has, add the structure to
#' \code{meta.data}. The integration range is typically used for aerial
#' surveys in which the altitude varies such that the strip width (left to
#' width) changes with a change in altitude.
#'
#' 6) Fields defined as factors are cleaned up such that any unused levels are
#' eliminated.
#'
#' 7) If the restrictions placed on the data, eliminated all of the data, the
#' function stops with an error message
#'
#' @param data dataframe object
#' @param meta.data meta.data options; see \code{\link{ddf}} for a description
#' @param check if \code{TRUE} check data for errors in the mrds structure; for
#' \code{method="ds" check=FALSE}
#' @return \item{xmat}{processed \code{data.frame} with added fields}
#' \item{meta.data}{meta.data list}
#' @author Jeff Laake
#' @keywords utility
process.data <- function(data,meta.data=list(),check=TRUE){
set.default.width=function(data,meta.data){
# set.default.width - sets default transect width when none was specified
# Arguments:
# data - dataframe
# meta.data - meta.data list
# Values: width of transect
if(meta.data$binned){
width <- max(c(data$distend,data$distance),na.rm=TRUE)
}else{
width <- max(data$distance)
}
return(width)
}
# assign dataframe to data
# Check to make sure the record structure is ok. Number of primary
# records = number of secondary
if(check){
if(length(data$detected[data$observer==1]) !=
length(data$detected[data$observer==2])){
stop("number of records for primary observer not equal to number for secondary observer")
}
}
# Create field which counts the number of times an object was detected 0,1,2
if(check){
timesdetected <- data$detected[data$observer==1] +
data$detected[data$observer==2]
data$timesdetected <- rep(0,dim(data)[1])
data$timesdetected[data$observer==1] <- timesdetected
data$timesdetected[data$observer==2] <- timesdetected
# If any 00 (not detected by either observer), stop and issue error message
if(any(data$timesdetected==0)){
stop("following objects were never detected:",
paste(data$object[data$observer==1&data$timesdetected==0],
collapse=","),"\n")
}
}
# also check for mrds that the data fields have the same value for both
# observers for example same distance, size etc. This is only a warning as
#some fields may be validly different
# if(any(apply(data[data$observer==1,names(data)!="observer"&names(data)!="detected"],1,paste,collapse="")!=
# apply(data[data$observer==2,names(data)!="observer"&names(data)!="detected"],1,paste,collapse="")))
# warning("If analysis fails it may be due to difference in data between observer 1 and 2;\n fields such as distance, size and covariates should be the same")
# Determine if data are binned by presence of distbegin and distend fields
if(is.null(data$distend)|is.null(data$distbegin)){
binned <- FALSE
}else{
if(all(is.null(data$distend))|all(is.null(data$distbegin))){
binned <- FALSE
}else{
if(any(is.null(data$distend) & !is.null(data$distbegin)) |
any(is.null(data$distbegin)&!is.null(data$distend))){
stop("mismatched distance intervals - one or more endpoints are missing")
}else{
binned <- TRUE
}
}
}
if(meta.data$binned & !binned){
stop("binned set to TRUE in meta.data but distbegin and distend fields are missing")
}
if(!meta.data$binned & binned){
warning("data contain distbegin and distend fields but binned=FALSE. Analyzing as not binned",immediate.=TRUE)
binned <- FALSE
}
meta.data$binned <- binned
if(meta.data$binned & is.null(meta.data$breaks)){
stop("breaks must be set in meta.data for binned data")
}
# Fill in distance field for binned observations and create logical variable
data$binned <- rep(FALSE,dim(data)[1])
if(binned){
meta.data$binned <- TRUE
data$distance[!is.na(data$distend)]<-(data$distbegin[!is.na(data$distend)]+
data$distend[!is.na(data$distend)])/2
data$binned[!is.na(data$distbegin)] <- TRUE
}
# Restrict data to width interval
# If no width set, use largest measured distance as width
if(is.na(meta.data$width)){
width <- set.default.width(data,meta.data)
meta.data$width <- width
xmat <- data
warning("no truncation distance specified; using largest observed distance",immediate.=TRUE)
}else{
# change: jll 2 June 05; ref to width changed to meta.data$width
# This piece of code makes sure that the set width is as large as the
# largest bin end point for binned data.
if(meta.data$binned){
if(any(data$binned & data$distend > meta.data$width)){
stop("width must exceed largest interval end point")
}else{
xmat <- data[data$binned |
(!data$binned&data$distance<=meta.data$width),]
}
}else{
xmat <- data[data$distance <= meta.data$width,]
}
}
# Determine if integration range has been specified
if(is.null(xmat$int.begin)|is.null(xmat$int.end)){
if(any(is.na(meta.data$int.range))){
meta.data$int.range <- c(meta.data$left,meta.data$width)
}
}else{
meta.data$int.range <- rbind(c(meta.data$left,meta.data$width),
cbind(xmat$int.begin,xmat$int.end))
}
# If left >0 perform left truncation by restricting values
if(meta.data$left >0){
if(binned){
if(any(data$binned&data$distbegin < meta.data$left)){
stop("left truncation must be smaller than the smallest interval begin point")
}else{
xmat <- data[data$binned|(!data$binned&data$distance>=meta.data$left),]
}
}else{
xmat <- xmat[xmat$distance>=meta.data$left,]
}
}
# Clean up factor levels
b <- dim(xmat)[2]
for(i in 1:b){
if(is.factor(xmat[,i])){
xmat[,i] <- factor (xmat[,i])
}
}
# If the exclusion eliminated all of the data, stop with error message
if(dim(xmat)[1]==0){
stop("no data to analyze")
}
return(list(xmat=xmat,meta.data=meta.data))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/methods.R
\name{print.sam}
\alias{print.sam}
\title{Print sam object}
\usage{
\method{print}{sam}(x, ...)
}
\arguments{
\item{x}{the fitted object as returned from the \code{\link{sam.fit}} function}
\item{...}{extra arguments}
}
\description{
Print sam object
}
\details{
prints the log-likelihood and the main convergence criteria
}
|
/stockassessment/man/print.sam.Rd
|
no_license
|
fishfollower/SAM
|
R
| false
| true
| 414
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/methods.R
\name{print.sam}
\alias{print.sam}
\title{Print sam object}
\usage{
\method{print}{sam}(x, ...)
}
\arguments{
\item{x}{the fitted object as returned from the \code{\link{sam.fit}} function}
\item{...}{extra arguments}
}
\description{
Print sam object
}
\details{
prints the log-likelihood and the main convergence criteria
}
|
#' Available models in autoML
#'
#' Returns a list of models to select from when using autoML
#'
#' @return List of models
#' @export
#'
#' @examples
#' availableLearners()
#' @author
#' Xander Horn
availableLearners <- function(){
library(mlr)
tempClassifTask <- generateTask(x = iris, y = "Species", problemType = "multi")
tempRegrTask <- generateTask(x = iris[,-5], y = "Sepal.Length", problemType = "regression")
tempClustTask <- generateTask(x = iris[,-5], y = NULL)
classifLearners <- suppressWarnings(generateLearners(task = tempClassifTask))
regrLearners <- suppressWarnings(generateLearners(task = tempRegrTask))
clusterLearners <- suppressWarnings(generateLearners(task = tempClustTask))
learners <- list(classifiers = names(classifLearners),
regressors = names(regrLearners),
cluster = names(clusterLearners))
return(learners)
}
|
/R/availableLearners.R
|
permissive
|
hansvomkreuz/autoML
|
R
| false
| false
| 943
|
r
|
#' Available models in autoML
#'
#' Returns a list of models to select from when using autoML
#'
#' @return List of models
#' @export
#'
#' @examples
#' availableLearners()
#' @author
#' Xander Horn
availableLearners <- function(){
library(mlr)
tempClassifTask <- generateTask(x = iris, y = "Species", problemType = "multi")
tempRegrTask <- generateTask(x = iris[,-5], y = "Sepal.Length", problemType = "regression")
tempClustTask <- generateTask(x = iris[,-5], y = NULL)
classifLearners <- suppressWarnings(generateLearners(task = tempClassifTask))
regrLearners <- suppressWarnings(generateLearners(task = tempRegrTask))
clusterLearners <- suppressWarnings(generateLearners(task = tempClustTask))
learners <- list(classifiers = names(classifLearners),
regressors = names(regrLearners),
cluster = names(clusterLearners))
return(learners)
}
|
#' ---
#' title: HW 1
#' author: Aidan Dunleavy
#' date: 01/20/2021
#' ---
#' PROBLEM 1
data = read.csv("https://raw.githubusercontent.com/AidanDunleavy/Fall-2020-STAT-40001/master/paper.csv")
data$Group <- ordered(data$Group,
levels = c("4", "4.75", "5.5", "6"))
names(data) <- c("time", "Group")
levels(data$Group)
library("ggpubr")
plot.new()
ggboxplot(data, x = "Group", y = "time",
color = "Group", palette = c("#00AFBB", "#E7B800", "#FC4E07", "#00bb57"),
order = c("4", "4.75", "5.5", "6"),
ylab = "time", xlab = "Treatment")
plot.new()
boxplot(time ~ Group, data = data,
xlab = "Treatment", ylab = "time",
frame = FALSE, col = c("#00AFBB", "#E7B800", "#FC4E07", "#00bb57"))
res.aov <- aov(time ~ Group, data = data)
summary(res.aov)
plot.new()
par(mfrow=c(2,2))
plot(res.aov)
par(mfrow = c(1,1))
data$Group
#plot.new()
#plot(c("4", "4.75", "5.5", "6"), by(data$time, data$Group, mean),
# col = c("#00AFBB", "#E7B800", "#FC4E07", "#00bb57"))
#plot(data$Group, data$time)
model = lm(time ~ Group, data = data)
summary(model)
|
/main/works/hw1.R
|
no_license
|
AidanDunleavy/Spring2021-Kuhn
|
R
| false
| false
| 1,151
|
r
|
#' ---
#' title: HW 1
#' author: Aidan Dunleavy
#' date: 01/20/2021
#' ---
#' PROBLEM 1
data = read.csv("https://raw.githubusercontent.com/AidanDunleavy/Fall-2020-STAT-40001/master/paper.csv")
data$Group <- ordered(data$Group,
levels = c("4", "4.75", "5.5", "6"))
names(data) <- c("time", "Group")
levels(data$Group)
library("ggpubr")
plot.new()
ggboxplot(data, x = "Group", y = "time",
color = "Group", palette = c("#00AFBB", "#E7B800", "#FC4E07", "#00bb57"),
order = c("4", "4.75", "5.5", "6"),
ylab = "time", xlab = "Treatment")
plot.new()
boxplot(time ~ Group, data = data,
xlab = "Treatment", ylab = "time",
frame = FALSE, col = c("#00AFBB", "#E7B800", "#FC4E07", "#00bb57"))
res.aov <- aov(time ~ Group, data = data)
summary(res.aov)
plot.new()
par(mfrow=c(2,2))
plot(res.aov)
par(mfrow = c(1,1))
data$Group
#plot.new()
#plot(c("4", "4.75", "5.5", "6"), by(data$time, data$Group, mean),
# col = c("#00AFBB", "#E7B800", "#FC4E07", "#00bb57"))
#plot(data$Group, data$time)
model = lm(time ~ Group, data = data)
summary(model)
|
library(tidyverse)
library(here)
## Challlenge 1
species_df <- read_csv(here("data", "20191024_species.csv"), na = "")
### Set columns `species_id` and `taxa` lowercase
species_df$species_id <- str_to_lower(species_df$species_id)
species_df$taxa <- str_to_lower(species_df$taxa)
# or using tidyverse functions (dplyr)
species_df <-
species_df %>%
mutate(species_id = str_to_lower(species_id),
taxa = str_to_lower(taxa))
### sort vector of species_id label alphabetically
species_id_label <- str_sort(species_df$species_id)
## beware!! str_sort within mutate will sort a specific column without sorting
## the other columns in the dataframe
species_df %>%
mutate(species_id = str_sort(species_id))
# to sort a dataframe by a column use arrange instead
species_df %>%
arrange(species_id)
### extract species_id labels longer than 2 letters
species_id_long <- species_df$species_id[str_length(species_df$species_id) > 2]
### tidyverse version
species_id_long <-
species_df %>%
filter(str_length(species_id) > 2) %>%
pull(species_id)
## CHALLENGE 2
### genus + species
# tidyverse style
df_complete <-
species_df %>%
mutate(canonicalName = str_c(genus, species, sep = " "))
# or in old style R
df_complete <- species_df
df_complete[["canonicalName"]] <- paste(species_df$genus, species_df$species, sep = " ")
# Remove census related label from taxa column
species_df_taxa_clean <-
species_df %>%
# str_remove or str_remove_all, in this case no differences
mutate(taxa = str_remove(taxa, "-not censused"))
## INTERMEZZO
example_string <- "I. love. the. 2019(!!) INBO. Coding. Club! Session. of. 24/10/2019...."
# How to detect/remove/extract:
# - any digit? (maybe first show all three function (str_detect, ...), but
# then choose str_remove as it is useful for
# challenge 3, for everyday use, and you see the original string changing.
str_detect(example_string, pattern = "[:digit]") # or "[\\d]"
str_remove(example_string, pattern = "[:digit:]") # or "[\\d]"
str_remove_all(example_string, pattern = "[:digit:]") # or "[\\d]"
str_extract(example_string, pattern = "[:digit:]") # or "[\\d]"
str_extract_all(example_string, pattern = "[:digit:]") # or "[\\d]"
# any "." (nice to show the need of \\ before the "." as "." is a special
# character. See in cheatsheet: every character except a new line) SHOW THE DIFFERENCE.
str_remove_all(example_string, pattern = ".") # Oh no, everything is gone
str_remove_all(example_string, pattern = "\\.") # this is correct
# the last .
str_remove_all(example_string, pattern = "\\.$") # show anchor "$"
# All . at the end
str_remove_all(example_string, pattern = "\\.+$") # show quantifier "+"
# - any extra "." , i.e. any . preceded by .
str_remove_all(example_string, pattern = "(?<=\\.)\\.") # show look around (?<=)
## Challenge 3
species_df_clean <-
species_df %>%
# remove tabs
mutate(authorship_clean = str_remove_all(authorship,
pattern = "\\t")) %>%
# remove vertical pipes
mutate(authorship_clean = str_remove_all(authorship_clean,
pattern = "\\|")) %>%
# remove NAs (note this might inwantedly remove NA within the author name)
# (although it is very unlikely that captalized NA is part of author name)
mutate(authorship_clean = str_remove_all(authorship_clean,
pattern = "NA")) %>%
# remove spaces at the end
mutate(authorship_clean = str_remove_all(authorship_clean,
pattern = "[:space:]+$")) %>%
# remove punctuation at the end
mutate(authorship_clean = str_remove_all(authorship_clean,
pattern = "[:punct:]$"))
View(species_df_clean)
# a somewhat safer alternative for NA removal:
species_df_clean2 <-
species_df %>%
# remove tabs
mutate(authorship_clean = str_remove_all(authorship,
pattern = "\\t")) %>%
# remove only NAs that are preceded by | or followed by |
mutate(authorship_clean = str_remove_all(authorship_clean,
pattern = "((?<=\\|)NA)|(NA(?=\\|))")) %>%
# remove vertical pipes
mutate(authorship_clean = str_remove_all(authorship_clean,
pattern = "\\|")) %>%
# remove spaces at the end
mutate(authorship_clean = str_remove_all(authorship_clean,
pattern = "[:space:]+$")) %>%
# remove punctuation at the end
mutate(authorship_clean = str_remove_all(authorship_clean,
pattern = "[:punct:]$"))
all.equal(species_df_clean, species_df_clean2)
## Bonus challenge
### bird observation (easy)
bird_obs <- read_csv(here("data", "20191024_bird_observations.csv"), na = "")
bird_obs[["observation_location"]] <- str_c(
str_c(bird_obs$PlaatsGemeente, bird_obs$PlaatsToponym,
sep = ","),
bird_obs$PlaatsToponymDetail,
sep = ":"
)
# tidyverse style
bird_obs <-
bird_obs %>%
mutate(observation_location =
str_c(PlaatsGemeente,
PlaatsToponym,
sep = ","),
observation_location =
str_c(
observation_location,
PlaatsToponymDetail,
sep = ":"
)
)
### bird observation (slightly more difficult)
bird_obs2 <- read_csv(here("data", "20191024_bird_observations_with_na.csv"),
na = "")
bird_obs2 <-
bird_obs2 %>%
mutate(observation_location = if_else(!is.na(PlaatsToponym),
str_c(PlaatsGemeente, PlaatsToponym,
sep = ","),
PlaatsGemeente)) %>%
mutate(observation_location = if_else(!is.na(PlaatsToponymDetail),
str_c(observation_location,
PlaatsToponymDetail,
sep = ":"),
observation_location))
View(bird_obs2)
|
/src/20191024_challenges_commented_solution.R
|
permissive
|
Yasmine-Verzelen/coding-club
|
R
| false
| false
| 6,173
|
r
|
library(tidyverse)
library(here)
## Challlenge 1
species_df <- read_csv(here("data", "20191024_species.csv"), na = "")
### Set columns `species_id` and `taxa` lowercase
species_df$species_id <- str_to_lower(species_df$species_id)
species_df$taxa <- str_to_lower(species_df$taxa)
# or using tidyverse functions (dplyr)
species_df <-
species_df %>%
mutate(species_id = str_to_lower(species_id),
taxa = str_to_lower(taxa))
### sort vector of species_id label alphabetically
species_id_label <- str_sort(species_df$species_id)
## beware!! str_sort within mutate will sort a specific column without sorting
## the other columns in the dataframe
species_df %>%
mutate(species_id = str_sort(species_id))
# to sort a dataframe by a column use arrange instead
species_df %>%
arrange(species_id)
### extract species_id labels longer than 2 letters
species_id_long <- species_df$species_id[str_length(species_df$species_id) > 2]
### tidyverse version
species_id_long <-
species_df %>%
filter(str_length(species_id) > 2) %>%
pull(species_id)
## CHALLENGE 2
### genus + species
# tidyverse style
df_complete <-
species_df %>%
mutate(canonicalName = str_c(genus, species, sep = " "))
# or in old style R
df_complete <- species_df
df_complete[["canonicalName"]] <- paste(species_df$genus, species_df$species, sep = " ")
# Remove census related label from taxa column
species_df_taxa_clean <-
species_df %>%
# str_remove or str_remove_all, in this case no differences
mutate(taxa = str_remove(taxa, "-not censused"))
## INTERMEZZO
example_string <- "I. love. the. 2019(!!) INBO. Coding. Club! Session. of. 24/10/2019...."
# How to detect/remove/extract:
# - any digit? (maybe first show all three function (str_detect, ...), but
# then choose str_remove as it is useful for
# challenge 3, for everyday use, and you see the original string changing.
str_detect(example_string, pattern = "[:digit]") # or "[\\d]"
str_remove(example_string, pattern = "[:digit:]") # or "[\\d]"
str_remove_all(example_string, pattern = "[:digit:]") # or "[\\d]"
str_extract(example_string, pattern = "[:digit:]") # or "[\\d]"
str_extract_all(example_string, pattern = "[:digit:]") # or "[\\d]"
# any "." (nice to show the need of \\ before the "." as "." is a special
# character. See in cheatsheet: every character except a new line) SHOW THE DIFFERENCE.
str_remove_all(example_string, pattern = ".") # Oh no, everything is gone
str_remove_all(example_string, pattern = "\\.") # this is correct
# the last .
str_remove_all(example_string, pattern = "\\.$") # show anchor "$"
# All . at the end
str_remove_all(example_string, pattern = "\\.+$") # show quantifier "+"
# - any extra "." , i.e. any . preceded by .
str_remove_all(example_string, pattern = "(?<=\\.)\\.") # show look around (?<=)
## Challenge 3
species_df_clean <-
species_df %>%
# remove tabs
mutate(authorship_clean = str_remove_all(authorship,
pattern = "\\t")) %>%
# remove vertical pipes
mutate(authorship_clean = str_remove_all(authorship_clean,
pattern = "\\|")) %>%
# remove NAs (note this might inwantedly remove NA within the author name)
# (although it is very unlikely that captalized NA is part of author name)
mutate(authorship_clean = str_remove_all(authorship_clean,
pattern = "NA")) %>%
# remove spaces at the end
mutate(authorship_clean = str_remove_all(authorship_clean,
pattern = "[:space:]+$")) %>%
# remove punctuation at the end
mutate(authorship_clean = str_remove_all(authorship_clean,
pattern = "[:punct:]$"))
View(species_df_clean)
# a somewhat safer alternative for NA removal:
species_df_clean2 <-
species_df %>%
# remove tabs
mutate(authorship_clean = str_remove_all(authorship,
pattern = "\\t")) %>%
# remove only NAs that are preceded by | or followed by |
mutate(authorship_clean = str_remove_all(authorship_clean,
pattern = "((?<=\\|)NA)|(NA(?=\\|))")) %>%
# remove vertical pipes
mutate(authorship_clean = str_remove_all(authorship_clean,
pattern = "\\|")) %>%
# remove spaces at the end
mutate(authorship_clean = str_remove_all(authorship_clean,
pattern = "[:space:]+$")) %>%
# remove punctuation at the end
mutate(authorship_clean = str_remove_all(authorship_clean,
pattern = "[:punct:]$"))
all.equal(species_df_clean, species_df_clean2)
## Bonus challenge
### bird observation (easy)
bird_obs <- read_csv(here("data", "20191024_bird_observations.csv"), na = "")
bird_obs[["observation_location"]] <- str_c(
str_c(bird_obs$PlaatsGemeente, bird_obs$PlaatsToponym,
sep = ","),
bird_obs$PlaatsToponymDetail,
sep = ":"
)
# tidyverse style
bird_obs <-
bird_obs %>%
mutate(observation_location =
str_c(PlaatsGemeente,
PlaatsToponym,
sep = ","),
observation_location =
str_c(
observation_location,
PlaatsToponymDetail,
sep = ":"
)
)
### bird observation (slightly more difficult)
bird_obs2 <- read_csv(here("data", "20191024_bird_observations_with_na.csv"),
na = "")
bird_obs2 <-
bird_obs2 %>%
mutate(observation_location = if_else(!is.na(PlaatsToponym),
str_c(PlaatsGemeente, PlaatsToponym,
sep = ","),
PlaatsGemeente)) %>%
mutate(observation_location = if_else(!is.na(PlaatsToponymDetail),
str_c(observation_location,
PlaatsToponymDetail,
sep = ":"),
observation_location))
View(bird_obs2)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ELMER.data.R
\docType{data}
\name{Human_genes__GRCh37_p13}
\alias{Human_genes__GRCh37_p13}
\title{A matrix containing ENSEMBL hg19 gene metadata accessed using biomart
This data is used if ensembl cannot be reached}
\format{A matrix with metadata for 60482 genes}
\usage{
Human_genes__GRCh37_p13
}
\description{
A matrix containing ENSEMBL hg19 gene metadata accessed using biomart
This data is used if ensembl cannot be reached
}
\keyword{internal}
|
/man/Human_genes__GRCh37_p13.Rd
|
no_license
|
tiagochst/ELMER.data
|
R
| false
| true
| 528
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ELMER.data.R
\docType{data}
\name{Human_genes__GRCh37_p13}
\alias{Human_genes__GRCh37_p13}
\title{A matrix containing ENSEMBL hg19 gene metadata accessed using biomart
This data is used if ensembl cannot be reached}
\format{A matrix with metadata for 60482 genes}
\usage{
Human_genes__GRCh37_p13
}
\description{
A matrix containing ENSEMBL hg19 gene metadata accessed using biomart
This data is used if ensembl cannot be reached
}
\keyword{internal}
|
## The following function creates a special "matrix" object
## that can cache its inverse.
## It creates a list containing functions to
## 1. set the matrix
## 2. get the matrix
## 3. set the inverse of the matrix
## 4. get the inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
invM <- NULL
setM <- function(y) {
x <<- y
invM <<- NULL
}
getM <- function() x
setInvM <- function(inverse) invM <<- inverse
getInvM <- function() invM
list(setM = setM,
getM = getM,
setInvM = setInvM,
getInvM = getInvM)
}
## The following function computes the inverse of the special "matrix"
## returned by makeCacheMatrix above.
## If the inverse has already been calculated (and the matrix has not changed),
## then the cacheSolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
invM <- x$getInvM()
if(!is.null(invM)) {
message("get the inverse of the matrix")
return(invM)
}
## otherwise, calculate the inverse
calInvM <- x$getM()
invM <- solve(calInvM, ...)
x$setInvM(invM)
invM
}
|
/cachematrix.R
|
no_license
|
BichTran91/datasciencecoursera
|
R
| false
| false
| 1,334
|
r
|
## The following function creates a special "matrix" object
## that can cache its inverse.
## It creates a list containing functions to
## 1. set the matrix
## 2. get the matrix
## 3. set the inverse of the matrix
## 4. get the inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
invM <- NULL
setM <- function(y) {
x <<- y
invM <<- NULL
}
getM <- function() x
setInvM <- function(inverse) invM <<- inverse
getInvM <- function() invM
list(setM = setM,
getM = getM,
setInvM = setInvM,
getInvM = getInvM)
}
## The following function computes the inverse of the special "matrix"
## returned by makeCacheMatrix above.
## If the inverse has already been calculated (and the matrix has not changed),
## then the cacheSolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
invM <- x$getInvM()
if(!is.null(invM)) {
message("get the inverse of the matrix")
return(invM)
}
## otherwise, calculate the inverse
calInvM <- x$getM()
invM <- solve(calInvM, ...)
x$setInvM(invM)
invM
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/07_calc_myreg_mreg_linear_yreg_logistic.R
\name{calc_myreg_mreg_linear_yreg_logistic}
\alias{calc_myreg_mreg_linear_yreg_logistic}
\title{Create calculators for effects and se (mreg linear / yreg logistic)}
\usage{
calc_myreg_mreg_linear_yreg_logistic(
mreg,
mreg_fit,
yreg,
yreg_fit,
avar,
mvar,
cvar,
interaction
)
}
\arguments{
\item{mreg}{A character vector of length 1. Mediator regression type: \code{"linear"} or \code{"logistic"}.}
\item{mreg_fit}{Model fit from \code{\link{fit_mreg}}}
\item{yreg}{A character vector of length 1. Outcome regression type: \code{"linear"}, \code{"logistic"}, \code{"loglinear"}, \code{"poisson"}, \code{"negbin"}, \code{"survCox"}, \code{"survAFT_exp"}, or \code{"survAFT_weibull"}.}
\item{yreg_fit}{Model fit from \code{\link{fit_yreg}}}
\item{avar}{A character vector of length 1. Treatment variable name.}
\item{mvar}{A character vector of length 1. Mediator variable name.}
\item{cvar}{A character vector of length > 0. Covariate names. Use \code{NULL} if there is no covariate. However, this is a highly suspicious situation. Even if \code{avar} is randomized, \code{mvar} is not. Thus, there should usually be some confounder(s) to account for the common cause structure (confounding) between \code{mvar} and \code{yvar}.}
\item{interaction}{A logical vector of length 1. Default to TRUE. Whether to include a mediator-treatment interaction term in the outcome regression model.}
}
\value{
A list contraining a function for effect estimates and a function for corresponding standard errors.
}
\description{
Construct functions for the conditional effect estimates and their standard errors in the mreg linear / yreg logistic setting. Internally, this function deconstruct model objects and feed parameter estiamtes to the internal worker functions \code{calc_myreg_mreg_linear_yreg_logistic_est} and \code{calc_myreg_mreg_linear_yreg_logistic_se}.
}
|
/man/calc_myreg_mreg_linear_yreg_logistic.Rd
|
no_license
|
linzn1008/regmedint
|
R
| false
| true
| 1,998
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/07_calc_myreg_mreg_linear_yreg_logistic.R
\name{calc_myreg_mreg_linear_yreg_logistic}
\alias{calc_myreg_mreg_linear_yreg_logistic}
\title{Create calculators for effects and se (mreg linear / yreg logistic)}
\usage{
calc_myreg_mreg_linear_yreg_logistic(
mreg,
mreg_fit,
yreg,
yreg_fit,
avar,
mvar,
cvar,
interaction
)
}
\arguments{
\item{mreg}{A character vector of length 1. Mediator regression type: \code{"linear"} or \code{"logistic"}.}
\item{mreg_fit}{Model fit from \code{\link{fit_mreg}}}
\item{yreg}{A character vector of length 1. Outcome regression type: \code{"linear"}, \code{"logistic"}, \code{"loglinear"}, \code{"poisson"}, \code{"negbin"}, \code{"survCox"}, \code{"survAFT_exp"}, or \code{"survAFT_weibull"}.}
\item{yreg_fit}{Model fit from \code{\link{fit_yreg}}}
\item{avar}{A character vector of length 1. Treatment variable name.}
\item{mvar}{A character vector of length 1. Mediator variable name.}
\item{cvar}{A character vector of length > 0. Covariate names. Use \code{NULL} if there is no covariate. However, this is a highly suspicious situation. Even if \code{avar} is randomized, \code{mvar} is not. Thus, there should usually be some confounder(s) to account for the common cause structure (confounding) between \code{mvar} and \code{yvar}.}
\item{interaction}{A logical vector of length 1. Default to TRUE. Whether to include a mediator-treatment interaction term in the outcome regression model.}
}
\value{
A list contraining a function for effect estimates and a function for corresponding standard errors.
}
\description{
Construct functions for the conditional effect estimates and their standard errors in the mreg linear / yreg logistic setting. Internally, this function deconstruct model objects and feed parameter estiamtes to the internal worker functions \code{calc_myreg_mreg_linear_yreg_logistic_est} and \code{calc_myreg_mreg_linear_yreg_logistic_se}.
}
|
#' Kaplan-Meier Estimates
#'
#' For two strata, estimates the standard error of the difference in two
#' Kaplan-Meier estimates at each value of times, and plots half-width
#' of confidence level for the difference, centered at the midpoint
#' of the survival estimates.
#'
#' details
#'
#' @param fit survfit object. See \code{\link[rms]{survfit.formula}}.
#' @param times numeric vector. Time value for each record.
#' @param fun function. Function to plot estimates.
#' @param offset numeric. Offset value to apply to \sQuote{x} coordinate points.
#' @param lwd numeric. The line width, passed to \code{lines}.
#' @param lty numeric. The line type, passed to \code{lines}.
#' @export
#' @examples
#' set.seed(20)
#' time <- rep(365, 50)
#' event <- rbinom(50, 1, 1/3)
#' time[event == 1] <- sample(365, sum(event == 1), replace=TRUE)
#' trt <- sample(1:2, 50, replace=TRUE)
#' require('rms')
#' fit <- survfit.formula(Surv(time, event) ~ trt)
#' survplot.survfit(fit)
#' plotKmHalfCL(fit, time)
plotKmHalfCL <- function(fit, times, fun=function(x) x,
offset=0, lwd=0.5, lty=1) {
s <- summary(fit, times=times)
st <- s$strata
lev <- levels(st)
if(length(lev) != 2) {
stop('only handles 2 strata')
}
s1 <- s$surv[st==lev[1]]
s2 <- s$surv[st==lev[2]]
se1 <- s$std.err[st==lev[1]]
se2 <- s$std.err[st==lev[2]]
se.diff <- sqrt(se1^2 + se2^2)
clhalf <- 1.96*se.diff
midpt <- (s1 + s2)/2
for(i in 1:length(times)) {
lines(offset+c(times[i],times[i]),
fun(c(midpt[i]-clhalf[i]/2, midpt[i]+clhalf[i]/2)),
lwd=lwd, lty=lty, col=gray(0.7))
}
}
#' Set mfrow Parameter
#'
#' Compute and set a good \code{par("mfrow")} given the
#' number of figures to plot.
#'
#' \code{trellis} and \code{small} may not both be specified as \sQuote{TRUE}.
#'
#' @param n numeric. Total number of figures to place in layout.
#' @param trellis logical. Set to \sQuote{TRUE} when a \sQuote{trellis} plot
#' is requested.
#' @param small logical. Set to \sQuote{TRUE} if the plot area should be
#' smaller to accomodate many plots.
#' @return return numeric vector.
#' If \code{trellis = TRUE} the suggested \sQuote{mfrow} is returned.
#' Otherwise the original \sQuote{mfrow} is returned invisibly.
#' @export
#' @examples
#' oldmfrow <- mfrowSet(8)
mfrowSet <- function(n, trellis=FALSE, small=FALSE) {
if(small && trellis) stop('may not specify small=T when trellis=T')
omf <- mf <- if(trellis)NULL else par('mfrow')
if(length(mf)==0) mf <- c(1,1)
if(n > 1 & max(mf)==1) {
if(small) {
mf <- if(n <= 4) {
c(2,2)
} else if(n <= 6) {
c(2,3)
} else if(n <= 12) {
c(3,4)
} else if(n <= 16) {
c(4,4)
} else if(n <= 20) {
c(4,5)
} else if(n <= 24) {
c(4,6)
} else if(n <= 25) {
c(5,5)
} else if(n <= 30) {
c(5,6)
} else if(n <= 36) {
c(6,6)
} else if(n <= 42) {
c(6,7)
} else {
c(6,8)
}
} else {
mf <- if(n <= 4) {
c(2,2)
} else if(n <= 6) {
c(2,3)
} else if(n <= 9) {
c(3,3)
} else {
c(4,3)
}
if(n > 12 & n <= 16) {
mf <- c(4,4)
}
}
if(!trellis) {
par(mfrow=mf)
}
}
if(trellis) {
mf
} else {
invisible(omf)
}
}
#' Put Figure
#'
#' Included a generated figure within LaTex document.
#'
#' @param panel character. Panel name.
#' @param name character. Name for figure.
#' @param caption character. Short caption for figure.
#' @param longcaption character. Long caption for figure.
#' @param append logical. If \sQuote{TRUE} output will be appended instead of overwritten.
#' @export
putFig <- function(panel, name, caption=NULL, longcaption=NULL, append=TRUE) {
gtype <- getOption('rreport.gtype')
if(gtype=='interactive') {
return(invisible())
}
panel <- paste(translate(panel, '.', '-'), '.tex', sep='')
name <- translate(name, '.', '-')
file <- file.path(TexDirName(), panel)
suffix <- paste('.', gtype, sep='')
## if(length(caption)) caption <- latexTranslate(caption)
## if(length(longcaption)) longcaption <- latexTranslate(longcaption)
capt1 <- capt2 <- ""
if(length(longcaption)) {
capt1 <- sprintf("\\caption[%s]{%s}\n", caption, longcaption)
} else if(length(caption)) {
capt1 <- sprintf("\\caption{%s}\n", caption)
}
if(length(caption)) {
capt2 <- sprintf("\\label{fig:%s}\n", name)
}
mytex <- sprintf("\\begin{figure}[hbp!]\\leavevmode\\centerline{\\includegraphics{%s%s}}\n%s%s\\end{figure}\n", name, suffix, capt1, capt2)
cat(mytex, file=file, append=append)
invisible()
}
#' Plot Initialization
#'
#' Toggle plotting. Sets options by examining \code{.Options$rreport.gtype}.
#'
#' @param file character. Character string specifying file prefix.
#' @param \dots Arguments to be passed to \code{\link[Hmisc]{setps}} or \code{\link[Hmisc]{setpdf}}.
#' @export
#' @seealso \code{\link[Hmisc]{ps.slide}}
startPlot <- function(file, ...) {
gtype <- getOption('rreport.gtype')
file <- translate(file,'.','-')
if(gtype == 'pdf') {
options(setpdfPrefix=file.path('pdf',''))
setpdf(file, ..., type='char')
} else if(gtype == 'ps') {
options(setpsPrefix=file.path('ps',''))
setps(file, ..., type='char')
}
invisible()
}
#' @rdname startPlot
#' @export
endPlot <- function() {
gtype <- getOption('rreport.gtype')
if(gtype != 'interactive') {
dev.off()
}
invisible()
}
#' Combine Equal
#'
#' Given a contingency table of counts, combine factors with equal counts.
#'
#' Factor names will be pasted together to make new names. A code and definition will be generated
#' if the new name should exceed \code{maxChar}.
#'
#' @param x numeric. Contingency table or matrix of names and counts, see \code{\link[base]{table}}.
#' @param maxChar numeric. Maximum length of character string. Names exceeding this will be replaced with a letter-code.
#' @return a list with three elements
#' \item{x}{Named vector of code frequencies. The name corresponds to the code.}
#'
#' \item{codes}{Character vector of alpha-code labels.}
#'
#' \item{defs}{Character vector of code definitions.}
#'
#' @export
#' @examples
#' combineEqual(table(rep(991:1010, times=rep(1:4, each=5))))
#' combineEqual(table(rep(991:1010, times=rep(1:4, each=5))), maxChar=10)
combineEqual <- function(x, maxChar=24) {
xorig <- x
if(is.matrix(x)) {
x <- apply(x, 2, paste, collapse=',')
}
if(!any(duplicated(x))) {
return(xorig)
}
z <- split(names(x), x)
v <- if(is.matrix(xorig)) {
names(z)
} else {
as.numeric(names(z))
}
nam <- codes <- defs <- character(0)
j <- 0
all.letters <- c(letters,LETTERS)
for(i in 1:length(z)) {
a <- z[[i]]
ac <- paste(a, collapse=', ')
if(nchar(ac) <= maxChar) {
nam <- c(nam,ac)
} else {
j <- j + 1
k <- paste('(',all.letters[j],')',sep='')
nam <- c(nam, k)
codes <- c(codes, k)
defs <- c(defs, ac)
}
}
names(v) <- nam
if(is.matrix(xorig)) {
v <- matrix(as.numeric(unlist(strsplit(v,','))),ncol=length(v),
dimnames=list(dimnames(xorig)[[1]], nam))
}
list(x=v, codes=codes, defs=defs)
}
#' Make Treatment Key
#'
#' Use treatment levels to generate treatment key in LaTeX.
#'
#' Typically will be called with \code{paramFile} missing as the filename
#' will be generated by package options.
#'
#' @param tlevels vector. unique treatment levels, expected to have length two
#' @param paramFile character. params filename
#' @param append logical. If \sQuote{TRUE} output will be appended to \sQuote{file},
#' otherwise, contents will be overwritten
#' @export
#' @examples
#' makeTreatKey(c('A', 'B'), paramFile='') # prints to standard output
makeTreatKey <- function(tlevels, paramFile, append=FALSE) {
if(length(tlevels) != 2) {
stop('expected two levels of treatment')
}
if(missing(paramFile)) {
paramFile <- paramTexFile()
}
cat('\\def\\treatkey{', tlevels[1], ':\\rule[.05in]{.25in}{.5pt}; ', tlevels[2], ':\\textcolor[gray]{0.7}{\\rule[.05in]{.25in}{1.25pt}}.}\n',
sep='', file=paramFile, append=append)
invisible()
}
#' Put Params
#'
#' Define parameters and provide LaTeX formatting.
#'
#' Parameters will be saved to the parameter LaTeX file.
#'
#' @param \dots list of name-value pairs. parameter names and their associated formats
#' @param paramFile character. params filename
#' @export
#' @examples
#' putparams(go=1, fish='blue', paramFile='') #prints to standard output
putparams <- function(..., paramFile) {
if(missing(paramFile)) {
paramFile <- paramTexFile()
}
x <- list(...)
if(!length(x)) {
cat('% $Id$\n', file=paramFile)
} else {
for(n in names(x)) {
cat('\\def\\', n, '{', format(x[[n]]), '}\n', sep='', file=paramFile, append=TRUE)
}
}
invisible()
}
#' PS-to-PDF Directory
#'
#' Used if want to create PS files and later convert all to PDF
#'
#' Utilize the \sQuote{epstopdf} command for conversion.
#'
#' @export
dirps2pdf <- function() {
files <- dir('ps/', pattern='\\.ps$')
nfiles <- sub('ps$', 'pdf', files)
i <- 0
for(file in files) {
i <- i + 1
psname <- file.path('ps', file)
pdfname <- file.path('pdf', nfiles[i])
input.date <- file.info(psname)$mtime
output.date <- file.info(pdfname)$mtime
if(is.na(output.date) || output.date < input.date) {
cat('Converting file:',file,'\n')
cmd <- sprintf("epstopdf %s -o %s", shQuote(psname), shQuote(pdfname))
system(cmd)
}
}
invisible()
}
#' Publish PDF
#'
#' summary
#'
#' details
#'
#' @param reports NEEDDOC
#' @param minutes NEEDDOC
#' @param title NEEDDOC
#' @param server NEEDDOC
#' @param path NEEDDOC
#' @param extension NEEDDOC
#' @param upload NEEDDOC
#' @param email NEEDDOC
#' @param uid NEEDDOC
#' @param passwd NEEDDOC
#' @param to NEEDDOC
#' @param cc NEEDDOC
#' @param bcc NEEDDOC
#' @param sig NEEDDOC
#' @param hardcopies NEEDDOC
#' @param verbose NEEDDOC
#' @param mailer NEEDDOC
#' @param extra NEEDDOC
#' @return return something
#' @export
#' @examples
#' 1
publishPdf <- function(reports, minutes=NULL, title, server, path, extension="pdf",
upload=TRUE, email=FALSE, uid=NULL, passwd=NULL,
to=NULL, cc=NULL, bcc=NULL, sig=NULL,
hardcopies=TRUE, verbose=TRUE,
mailer=c('mail','kmail'), extra=NULL) {
## E.g. publishPdf(c(report='Closed Meeting Report',
## Oreport='Open Meeting Report'),title='My Project',
## server='myserver.edu', path='/home/www/html/myproject')
## Be sure to put something like export REPLYTO=foo@place.edu in ~/.bashrc
## if using mailer='mail'
mailer <- match.arg(mailer)
nl <- ifelse(mailer=='kmail','\n','\\n')
if(upload) {
f <- tempfile()
if(file.exists(f) && !file.info(f)$isdir) {
file.remove(f)
}
dir.create(f, recursive=TRUE)
if (extension=="") {sep=""} else {sep="."}
rn <- paste(names(c(reports,minutes)), extension, sep=sep)
paths <- file.path(f, c('index.html', basename(rn)))
info <- file.info(rn)[,c('size','mtime')]
cat('<html><body bgcolor=white>',
paste('<h2>', title, '</h2>', sep=''),
sep='\n', file=paths[1])
i <- with(info, data.frame(Bytes=size, 'Date Created'=mtime,
Description=c(reports,minutes),
row.names=basename(row.names(info)),
check.names=FALSE))
z <- html(i, file=paths[1], append=TRUE, link=basename(rn), linkCol='Name',
linkType='href')
file.copy(rn, paths[-1], overwrite=TRUE)
system(paste('chmod u=rw,g=rw,o=', paste(shQuote(paths), collapse=' ')))
system(paste('scp ', paste(shQuote(paths), collapse=' '), ' ', server, ':', path, sep=''))
#file.remove(paths, f)
}
if(email) {
url <- strsplit(path, '/')[[1]]
url <- url[length(url)]
url <- paste('http://', server, '/', url, sep='')
cmd <- paste(
if(length(c(reports,minutes)) ==1) {
'The following document has'
} else {
'The following documents have'
},
' been placed or updated on a secure web page:',nl,#nl,
paste(c(reports,minutes), collapse=nl), nl, nl,
'Point your browser to ', url, #nl,
' and use the username ', uid,
' and the password that will be in the next email. ',
'For accuracy, copy the password from the e-mail and',
' paste it in the proper field in your browser.',nl,nl,
'Please confirm your ability to open the pdf files within 24 hours by replying to this message.',nl,nl,
if(hardcopies) {
'I will bring final hard copies to the meeting.'
},
if(length(extra)) {
paste(nl,nl, extra,sep='')
},
sep='')
if(length(sig)) {
sig <- paste(sig, collapse=nl)
cmd <- paste(cmd, nl, '----------', nl, sig, sep='')
}
if(mailer=='kmail') {
tf <- tempfile()
cat(cmd, file=tf)
to <- paste('"', paste(to, collapse=','), '"', sep='')
if(length(cc)) {
cc <- paste(' -c "', paste(cc, collapse=','),'"',sep='')
}
if(length(bcc)) {
bcc <- paste(' -b "', paste(bcc, collapse=','),'"',sep='')
}
} else {
to <- paste(to, collapse=' ')
if(length(cc)) {
cc <- paste(paste(' -c', cc), collapse='')
}
if(length(bcc)) {
bcc <- paste(paste(' -b', bcc),collapse='')
}
}
cmd <- if(mailer=='kmail') {
paste('kmail -s "', title, '"', cc,
bcc, ' --msg ', tf, ' ', to, sep='')
} else {
paste('echo "', cmd, '" | mail -s "',
title, ' Reports"', cc, bcc, ' ', to, sep='')
}
system(cmd)
if(verbose) {
cat('\n\nMail command sent:\n', cmd, '\n')
}
prn(passwd)
if(length(passwd)) {
cmd <- if(mailer=='kmail') {
paste('kmail -s "Additional information"', cc, bcc,
' --body "', passwd, '" ', to, sep='')
} else {
paste('echo ', passwd, ' | mail -s "Additional information"',
cc, bcc, ' ', to, sep='')
}
system(cmd)
if(verbose) {
cat('\n\nMail command sent:\n', cmd, '\n')
}
}
}
invisible()
}
|
/R/Misc.R
|
no_license
|
harrelfe/rreport
|
R
| false
| false
| 14,627
|
r
|
#' Kaplan-Meier Estimates
#'
#' For two strata, estimates the standard error of the difference in two
#' Kaplan-Meier estimates at each value of times, and plots half-width
#' of confidence level for the difference, centered at the midpoint
#' of the survival estimates.
#'
#' details
#'
#' @param fit survfit object. See \code{\link[rms]{survfit.formula}}.
#' @param times numeric vector. Time value for each record.
#' @param fun function. Function to plot estimates.
#' @param offset numeric. Offset value to apply to \sQuote{x} coordinate points.
#' @param lwd numeric. The line width, passed to \code{lines}.
#' @param lty numeric. The line type, passed to \code{lines}.
#' @export
#' @examples
#' set.seed(20)
#' time <- rep(365, 50)
#' event <- rbinom(50, 1, 1/3)
#' time[event == 1] <- sample(365, sum(event == 1), replace=TRUE)
#' trt <- sample(1:2, 50, replace=TRUE)
#' require('rms')
#' fit <- survfit.formula(Surv(time, event) ~ trt)
#' survplot.survfit(fit)
#' plotKmHalfCL(fit, time)
plotKmHalfCL <- function(fit, times, fun=function(x) x,
offset=0, lwd=0.5, lty=1) {
s <- summary(fit, times=times)
st <- s$strata
lev <- levels(st)
if(length(lev) != 2) {
stop('only handles 2 strata')
}
s1 <- s$surv[st==lev[1]]
s2 <- s$surv[st==lev[2]]
se1 <- s$std.err[st==lev[1]]
se2 <- s$std.err[st==lev[2]]
se.diff <- sqrt(se1^2 + se2^2)
clhalf <- 1.96*se.diff
midpt <- (s1 + s2)/2
for(i in 1:length(times)) {
lines(offset+c(times[i],times[i]),
fun(c(midpt[i]-clhalf[i]/2, midpt[i]+clhalf[i]/2)),
lwd=lwd, lty=lty, col=gray(0.7))
}
}
#' Set mfrow Parameter
#'
#' Compute and set a good \code{par("mfrow")} given the
#' number of figures to plot.
#'
#' \code{trellis} and \code{small} may not both be specified as \sQuote{TRUE}.
#'
#' @param n numeric. Total number of figures to place in layout.
#' @param trellis logical. Set to \sQuote{TRUE} when a \sQuote{trellis} plot
#' is requested.
#' @param small logical. Set to \sQuote{TRUE} if the plot area should be
#' smaller to accomodate many plots.
#' @return return numeric vector.
#' If \code{trellis = TRUE} the suggested \sQuote{mfrow} is returned.
#' Otherwise the original \sQuote{mfrow} is returned invisibly.
#' @export
#' @examples
#' oldmfrow <- mfrowSet(8)
mfrowSet <- function(n, trellis=FALSE, small=FALSE) {
if(small && trellis) stop('may not specify small=T when trellis=T')
omf <- mf <- if(trellis)NULL else par('mfrow')
if(length(mf)==0) mf <- c(1,1)
if(n > 1 & max(mf)==1) {
if(small) {
mf <- if(n <= 4) {
c(2,2)
} else if(n <= 6) {
c(2,3)
} else if(n <= 12) {
c(3,4)
} else if(n <= 16) {
c(4,4)
} else if(n <= 20) {
c(4,5)
} else if(n <= 24) {
c(4,6)
} else if(n <= 25) {
c(5,5)
} else if(n <= 30) {
c(5,6)
} else if(n <= 36) {
c(6,6)
} else if(n <= 42) {
c(6,7)
} else {
c(6,8)
}
} else {
mf <- if(n <= 4) {
c(2,2)
} else if(n <= 6) {
c(2,3)
} else if(n <= 9) {
c(3,3)
} else {
c(4,3)
}
if(n > 12 & n <= 16) {
mf <- c(4,4)
}
}
if(!trellis) {
par(mfrow=mf)
}
}
if(trellis) {
mf
} else {
invisible(omf)
}
}
#' Put Figure
#'
#' Included a generated figure within LaTex document.
#'
#' @param panel character. Panel name.
#' @param name character. Name for figure.
#' @param caption character. Short caption for figure.
#' @param longcaption character. Long caption for figure.
#' @param append logical. If \sQuote{TRUE} output will be appended instead of overwritten.
#' @export
putFig <- function(panel, name, caption=NULL, longcaption=NULL, append=TRUE) {
gtype <- getOption('rreport.gtype')
if(gtype=='interactive') {
return(invisible())
}
panel <- paste(translate(panel, '.', '-'), '.tex', sep='')
name <- translate(name, '.', '-')
file <- file.path(TexDirName(), panel)
suffix <- paste('.', gtype, sep='')
## if(length(caption)) caption <- latexTranslate(caption)
## if(length(longcaption)) longcaption <- latexTranslate(longcaption)
capt1 <- capt2 <- ""
if(length(longcaption)) {
capt1 <- sprintf("\\caption[%s]{%s}\n", caption, longcaption)
} else if(length(caption)) {
capt1 <- sprintf("\\caption{%s}\n", caption)
}
if(length(caption)) {
capt2 <- sprintf("\\label{fig:%s}\n", name)
}
mytex <- sprintf("\\begin{figure}[hbp!]\\leavevmode\\centerline{\\includegraphics{%s%s}}\n%s%s\\end{figure}\n", name, suffix, capt1, capt2)
cat(mytex, file=file, append=append)
invisible()
}
#' Plot Initialization
#'
#' Toggle plotting. Sets options by examining \code{.Options$rreport.gtype}.
#'
#' @param file character. Character string specifying file prefix.
#' @param \dots Arguments to be passed to \code{\link[Hmisc]{setps}} or \code{\link[Hmisc]{setpdf}}.
#' @export
#' @seealso \code{\link[Hmisc]{ps.slide}}
startPlot <- function(file, ...) {
gtype <- getOption('rreport.gtype')
file <- translate(file,'.','-')
if(gtype == 'pdf') {
options(setpdfPrefix=file.path('pdf',''))
setpdf(file, ..., type='char')
} else if(gtype == 'ps') {
options(setpsPrefix=file.path('ps',''))
setps(file, ..., type='char')
}
invisible()
}
#' @rdname startPlot
#' @export
endPlot <- function() {
gtype <- getOption('rreport.gtype')
if(gtype != 'interactive') {
dev.off()
}
invisible()
}
#' Combine Equal
#'
#' Given a contingency table of counts, combine factors with equal counts.
#'
#' Factor names will be pasted together to make new names. A code and definition will be generated
#' if the new name should exceed \code{maxChar}.
#'
#' @param x numeric. Contingency table or matrix of names and counts, see \code{\link[base]{table}}.
#' @param maxChar numeric. Maximum length of character string. Names exceeding this will be replaced with a letter-code.
#' @return a list with three elements
#' \item{x}{Named vector of code frequencies. The name corresponds to the code.}
#'
#' \item{codes}{Character vector of alpha-code labels.}
#'
#' \item{defs}{Character vector of code definitions.}
#'
#' @export
#' @examples
#' combineEqual(table(rep(991:1010, times=rep(1:4, each=5))))
#' combineEqual(table(rep(991:1010, times=rep(1:4, each=5))), maxChar=10)
combineEqual <- function(x, maxChar=24) {
xorig <- x
if(is.matrix(x)) {
x <- apply(x, 2, paste, collapse=',')
}
if(!any(duplicated(x))) {
return(xorig)
}
z <- split(names(x), x)
v <- if(is.matrix(xorig)) {
names(z)
} else {
as.numeric(names(z))
}
nam <- codes <- defs <- character(0)
j <- 0
all.letters <- c(letters,LETTERS)
for(i in 1:length(z)) {
a <- z[[i]]
ac <- paste(a, collapse=', ')
if(nchar(ac) <= maxChar) {
nam <- c(nam,ac)
} else {
j <- j + 1
k <- paste('(',all.letters[j],')',sep='')
nam <- c(nam, k)
codes <- c(codes, k)
defs <- c(defs, ac)
}
}
names(v) <- nam
if(is.matrix(xorig)) {
v <- matrix(as.numeric(unlist(strsplit(v,','))),ncol=length(v),
dimnames=list(dimnames(xorig)[[1]], nam))
}
list(x=v, codes=codes, defs=defs)
}
#' Make Treatment Key
#'
#' Use treatment levels to generate treatment key in LaTeX.
#'
#' Typically will be called with \code{paramFile} missing as the filename
#' will be generated by package options.
#'
#' @param tlevels vector. unique treatment levels, expected to have length two
#' @param paramFile character. params filename
#' @param append logical. If \sQuote{TRUE} output will be appended to \sQuote{file},
#' otherwise, contents will be overwritten
#' @export
#' @examples
#' makeTreatKey(c('A', 'B'), paramFile='') # prints to standard output
makeTreatKey <- function(tlevels, paramFile, append=FALSE) {
if(length(tlevels) != 2) {
stop('expected two levels of treatment')
}
if(missing(paramFile)) {
paramFile <- paramTexFile()
}
cat('\\def\\treatkey{', tlevels[1], ':\\rule[.05in]{.25in}{.5pt}; ', tlevels[2], ':\\textcolor[gray]{0.7}{\\rule[.05in]{.25in}{1.25pt}}.}\n',
sep='', file=paramFile, append=append)
invisible()
}
#' Put Params
#'
#' Define parameters and provide LaTeX formatting.
#'
#' Parameters will be saved to the parameter LaTeX file.
#'
#' @param \dots list of name-value pairs. parameter names and their associated formats
#' @param paramFile character. params filename
#' @export
#' @examples
#' putparams(go=1, fish='blue', paramFile='') #prints to standard output
putparams <- function(..., paramFile) {
if(missing(paramFile)) {
paramFile <- paramTexFile()
}
x <- list(...)
if(!length(x)) {
cat('% $Id$\n', file=paramFile)
} else {
for(n in names(x)) {
cat('\\def\\', n, '{', format(x[[n]]), '}\n', sep='', file=paramFile, append=TRUE)
}
}
invisible()
}
#' PS-to-PDF Directory
#'
#' Used if want to create PS files and later convert all to PDF
#'
#' Utilize the \sQuote{epstopdf} command for conversion.
#'
#' @export
dirps2pdf <- function() {
files <- dir('ps/', pattern='\\.ps$')
nfiles <- sub('ps$', 'pdf', files)
i <- 0
for(file in files) {
i <- i + 1
psname <- file.path('ps', file)
pdfname <- file.path('pdf', nfiles[i])
input.date <- file.info(psname)$mtime
output.date <- file.info(pdfname)$mtime
if(is.na(output.date) || output.date < input.date) {
cat('Converting file:',file,'\n')
cmd <- sprintf("epstopdf %s -o %s", shQuote(psname), shQuote(pdfname))
system(cmd)
}
}
invisible()
}
#' Publish PDF
#'
#' summary
#'
#' details
#'
#' @param reports NEEDDOC
#' @param minutes NEEDDOC
#' @param title NEEDDOC
#' @param server NEEDDOC
#' @param path NEEDDOC
#' @param extension NEEDDOC
#' @param upload NEEDDOC
#' @param email NEEDDOC
#' @param uid NEEDDOC
#' @param passwd NEEDDOC
#' @param to NEEDDOC
#' @param cc NEEDDOC
#' @param bcc NEEDDOC
#' @param sig NEEDDOC
#' @param hardcopies NEEDDOC
#' @param verbose NEEDDOC
#' @param mailer NEEDDOC
#' @param extra NEEDDOC
#' @return return something
#' @export
#' @examples
#' 1
publishPdf <- function(reports, minutes=NULL, title, server, path, extension="pdf",
upload=TRUE, email=FALSE, uid=NULL, passwd=NULL,
to=NULL, cc=NULL, bcc=NULL, sig=NULL,
hardcopies=TRUE, verbose=TRUE,
mailer=c('mail','kmail'), extra=NULL) {
## E.g. publishPdf(c(report='Closed Meeting Report',
## Oreport='Open Meeting Report'),title='My Project',
## server='myserver.edu', path='/home/www/html/myproject')
## Be sure to put something like export REPLYTO=foo@place.edu in ~/.bashrc
## if using mailer='mail'
mailer <- match.arg(mailer)
nl <- ifelse(mailer=='kmail','\n','\\n')
if(upload) {
f <- tempfile()
if(file.exists(f) && !file.info(f)$isdir) {
file.remove(f)
}
dir.create(f, recursive=TRUE)
if (extension=="") {sep=""} else {sep="."}
rn <- paste(names(c(reports,minutes)), extension, sep=sep)
paths <- file.path(f, c('index.html', basename(rn)))
info <- file.info(rn)[,c('size','mtime')]
cat('<html><body bgcolor=white>',
paste('<h2>', title, '</h2>', sep=''),
sep='\n', file=paths[1])
i <- with(info, data.frame(Bytes=size, 'Date Created'=mtime,
Description=c(reports,minutes),
row.names=basename(row.names(info)),
check.names=FALSE))
z <- html(i, file=paths[1], append=TRUE, link=basename(rn), linkCol='Name',
linkType='href')
file.copy(rn, paths[-1], overwrite=TRUE)
system(paste('chmod u=rw,g=rw,o=', paste(shQuote(paths), collapse=' ')))
system(paste('scp ', paste(shQuote(paths), collapse=' '), ' ', server, ':', path, sep=''))
#file.remove(paths, f)
}
if(email) {
url <- strsplit(path, '/')[[1]]
url <- url[length(url)]
url <- paste('http://', server, '/', url, sep='')
cmd <- paste(
if(length(c(reports,minutes)) ==1) {
'The following document has'
} else {
'The following documents have'
},
' been placed or updated on a secure web page:',nl,#nl,
paste(c(reports,minutes), collapse=nl), nl, nl,
'Point your browser to ', url, #nl,
' and use the username ', uid,
' and the password that will be in the next email. ',
'For accuracy, copy the password from the e-mail and',
' paste it in the proper field in your browser.',nl,nl,
'Please confirm your ability to open the pdf files within 24 hours by replying to this message.',nl,nl,
if(hardcopies) {
'I will bring final hard copies to the meeting.'
},
if(length(extra)) {
paste(nl,nl, extra,sep='')
},
sep='')
if(length(sig)) {
sig <- paste(sig, collapse=nl)
cmd <- paste(cmd, nl, '----------', nl, sig, sep='')
}
if(mailer=='kmail') {
tf <- tempfile()
cat(cmd, file=tf)
to <- paste('"', paste(to, collapse=','), '"', sep='')
if(length(cc)) {
cc <- paste(' -c "', paste(cc, collapse=','),'"',sep='')
}
if(length(bcc)) {
bcc <- paste(' -b "', paste(bcc, collapse=','),'"',sep='')
}
} else {
to <- paste(to, collapse=' ')
if(length(cc)) {
cc <- paste(paste(' -c', cc), collapse='')
}
if(length(bcc)) {
bcc <- paste(paste(' -b', bcc),collapse='')
}
}
cmd <- if(mailer=='kmail') {
paste('kmail -s "', title, '"', cc,
bcc, ' --msg ', tf, ' ', to, sep='')
} else {
paste('echo "', cmd, '" | mail -s "',
title, ' Reports"', cc, bcc, ' ', to, sep='')
}
system(cmd)
if(verbose) {
cat('\n\nMail command sent:\n', cmd, '\n')
}
prn(passwd)
if(length(passwd)) {
cmd <- if(mailer=='kmail') {
paste('kmail -s "Additional information"', cc, bcc,
' --body "', passwd, '" ', to, sep='')
} else {
paste('echo ', passwd, ' | mail -s "Additional information"',
cc, bcc, ' ', to, sep='')
}
system(cmd)
if(verbose) {
cat('\n\nMail command sent:\n', cmd, '\n')
}
}
}
invisible()
}
|
#' Simple chronometer.
#' Has a little display and avoids wrapping everything in `system.time()`.
#' @noRd
chrono <- function(start, display = TRUE) {
if(missing(start)) ans <- proc.time()
else {
ans <- proc.time() - start
ans <- as.numeric(ans[3])
if(display) message("Execution time: ", round(ans, 2), " seconds.")
}
ans
}
|
/R/chrono.R
|
no_license
|
mlysy/msde
|
R
| false
| false
| 357
|
r
|
#' Simple chronometer.
#' Has a little display and avoids wrapping everything in `system.time()`.
#' @noRd
chrono <- function(start, display = TRUE) {
if(missing(start)) ans <- proc.time()
else {
ans <- proc.time() - start
ans <- as.numeric(ans[3])
if(display) message("Execution time: ", round(ans, 2), " seconds.")
}
ans
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rehydratoR.R
\name{rehydratoR}
\alias{rehydratoR}
\title{Get tweets for given statuses (tweet IDs).}
\usage{
rehydratoR(consumer_key, consumer_secret, access_token, access_secret,
status_ids, base_path = NULL, group_start = 1)
}
\arguments{
\item{consumer_key}{Consumer Key (API Key) from https://apps.twitter.com/}
\item{consumer_secret}{Consumer Secret (API Secret) from https://apps.twitter.com/}
\item{access_token}{Access Token from the https://apps.twitter.com/}
\item{access_secret}{Access Token Secret from https://apps.twitter.com/}
\item{status_ids}{data frame of tweet IDs}
\item{base_path}{optional. The base path to use to save the tweets. If set, the function will
write the tweets to files instead of returning the tweets as a variable.}
\item{group_start}{is the group to start at after splitting list of ids. Is useful in case the download was interrupted.}
}
\value{
A tibble of tweets data if base_path is not defined. Nothing is returned if base_path is defined
but the tweets are saved to a file for about every 90,0000 tweets.
}
\description{
Get tweets for given statuses (tweet IDs).
}
\examples{
\dontrun{
# Get Twitter api keys from https://apps.twitter.com
consumerKey <- ''
consumerSecret <- ''
accessToken <- ''
accessTokenSecret <- ''
# Read tweet ids
tweet_ids <- data.frame(read.table(tweet_ids_file, numerals = 'no.loss'))
# Download tweets
tweets <- rehydratoR(consumerKey, consumerSecret, accessToken, accessTokenSecret, tweet_ids)
}
}
|
/man/rehydratoR.Rd
|
permissive
|
kevincoakley/rehydratoR
|
R
| false
| true
| 1,563
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rehydratoR.R
\name{rehydratoR}
\alias{rehydratoR}
\title{Get tweets for given statuses (tweet IDs).}
\usage{
rehydratoR(consumer_key, consumer_secret, access_token, access_secret,
status_ids, base_path = NULL, group_start = 1)
}
\arguments{
\item{consumer_key}{Consumer Key (API Key) from https://apps.twitter.com/}
\item{consumer_secret}{Consumer Secret (API Secret) from https://apps.twitter.com/}
\item{access_token}{Access Token from the https://apps.twitter.com/}
\item{access_secret}{Access Token Secret from https://apps.twitter.com/}
\item{status_ids}{data frame of tweet IDs}
\item{base_path}{optional. The base path to use to save the tweets. If set, the function will
write the tweets to files instead of returning the tweets as a variable.}
\item{group_start}{is the group to start at after splitting list of ids. Is useful in case the download was interrupted.}
}
\value{
A tibble of tweets data if base_path is not defined. Nothing is returned if base_path is defined
but the tweets are saved to a file for about every 90,0000 tweets.
}
\description{
Get tweets for given statuses (tweet IDs).
}
\examples{
\dontrun{
# Get Twitter api keys from https://apps.twitter.com
consumerKey <- ''
consumerSecret <- ''
accessToken <- ''
accessTokenSecret <- ''
# Read tweet ids
tweet_ids <- data.frame(read.table(tweet_ids_file, numerals = 'no.loss'))
# Download tweets
tweets <- rehydratoR(consumerKey, consumerSecret, accessToken, accessTokenSecret, tweet_ids)
}
}
|
## The following functions shall be used to cache a given matrix and calculate
## and cache its inverse
## "makeCacheMatrix" function is used to cache a given matrix
##
## to set a matrix and cache it, type:
## > x <- makeCacheMatrix()
## > x$setmat(y)
## where y is the matrix to be cached
## with each setting for the matrix, its inverse is set to NULL matrix
##
## to display the cached matrix, type:
## > x$getmat()
##
makeCacheMatrix <- function(x = matrix()) {
x_inv <- matrix()
setmat <- function(y) {
x <<- y
x_inv <<- matrix()
}
getmat <- function() x
setinv <- function(inv) x_inv <<- inv
getinv <- function() x_inv
list(setmat=setmat, getmat=getmat, setinv=setinv, getinv=getinv)
}
## "cacheSolve" function return a matrix that is the inverse of matrix 'x'
## if the result is already cached (x_inv is not NULL), no need to calculate it
## just display the value of x_inv, if x_inv is NULL, then it should be calculated
## using solve() function, then cached using the above described function setinv()
cacheSolve <- function(x, ...) {
x_inv <- x$getinv()
if (!all(is.na(x_inv))) {
message("get the result from cache")
return(x_inv)
}
my_mat <- x$getmat()
my_inv <- solve(my_mat)
x$setinv(my_inv)
my_inv
}
|
/cachematrix.R
|
no_license
|
hsalaheldin/ProgrammingAssignment2
|
R
| false
| false
| 1,399
|
r
|
## The following functions shall be used to cache a given matrix and calculate
## and cache its inverse
## "makeCacheMatrix" function is used to cache a given matrix
##
## to set a matrix and cache it, type:
## > x <- makeCacheMatrix()
## > x$setmat(y)
## where y is the matrix to be cached
## with each setting for the matrix, its inverse is set to NULL matrix
##
## to display the cached matrix, type:
## > x$getmat()
##
makeCacheMatrix <- function(x = matrix()) {
x_inv <- matrix()
setmat <- function(y) {
x <<- y
x_inv <<- matrix()
}
getmat <- function() x
setinv <- function(inv) x_inv <<- inv
getinv <- function() x_inv
list(setmat=setmat, getmat=getmat, setinv=setinv, getinv=getinv)
}
## "cacheSolve" function return a matrix that is the inverse of matrix 'x'
## if the result is already cached (x_inv is not NULL), no need to calculate it
## just display the value of x_inv, if x_inv is NULL, then it should be calculated
## using solve() function, then cached using the above described function setinv()
cacheSolve <- function(x, ...) {
x_inv <- x$getinv()
if (!all(is.na(x_inv))) {
message("get the result from cache")
return(x_inv)
}
my_mat <- x$getmat()
my_inv <- solve(my_mat)
x$setinv(my_inv)
my_inv
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/WQcompos.R
\name{WQcompos}
\alias{WQcompos}
\title{Combine multiple samples into one composite sample (loads and concentrations)}
\usage{
WQcompos(df.samples, sampleID, parms, volume = "Evolume", bdate, edate, codes)
}
\arguments{
\item{df.samples}{dataframe with sample results and volumes}
\item{sampleID}{character variable name for the IDs for compositing samples (multiple samples will have the same ID)}
\item{parms}{vector Parameters to composite}
\item{volume}{character variable name for the volume, defaults to "Evolume"}
\item{bdate}{character variable name for the beginning of event times for each sample}
\item{edate}{character variable name for the ending of event times for each sample}
\item{codes}{a vector of character variable names for the values that should be pasted together into one string when combining samples (lab IDs are common here)}
}
\value{
IDdf dataframe
}
\description{
function to composite samples weighted by the associated volume
the result is a volume-weighted concentration and summation of volumes
}
\examples{
flowData <- flowData
FIBdata <- FIBdata
FIBcomposData <- Hydrovol(dfQ=flowData,Q="Q",time="pdate",
df.dates=FIBdata,bdate="SSdate",edate="SEdate")
WQcompos(df.samples=FIBcomposData,sampleID="SampleID",
parms=c("Ecoli","Enterococci"), volume="event.vol",
bdate="SSdate",edate="SEdate",codes="SampleID")
}
|
/man/WQcompos.Rd
|
permissive
|
joshuaeveleth/USGSHydroTools
|
R
| false
| true
| 1,464
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/WQcompos.R
\name{WQcompos}
\alias{WQcompos}
\title{Combine multiple samples into one composite sample (loads and concentrations)}
\usage{
WQcompos(df.samples, sampleID, parms, volume = "Evolume", bdate, edate, codes)
}
\arguments{
\item{df.samples}{dataframe with sample results and volumes}
\item{sampleID}{character variable name for the IDs for compositing samples (multiple samples will have the same ID)}
\item{parms}{vector Parameters to composite}
\item{volume}{character variable name for the volume, defaults to "Evolume"}
\item{bdate}{character variable name for the beginning of event times for each sample}
\item{edate}{character variable name for the ending of event times for each sample}
\item{codes}{a vector of character variable names for the values that should be pasted together into one string when combining samples (lab IDs are common here)}
}
\value{
IDdf dataframe
}
\description{
function to composite samples weighted by the associated volume
the result is a volume-weighted concentration and summation of volumes
}
\examples{
flowData <- flowData
FIBdata <- FIBdata
FIBcomposData <- Hydrovol(dfQ=flowData,Q="Q",time="pdate",
df.dates=FIBdata,bdate="SSdate",edate="SEdate")
WQcompos(df.samples=FIBcomposData,sampleID="SampleID",
parms=c("Ecoli","Enterococci"), volume="event.vol",
bdate="SSdate",edate="SEdate",codes="SampleID")
}
|
source('./R/utils_av.R')
country_name <- "Bolivia"
# this is the "stata" option (demetra + always include a constant even if D+d = 2). Approx 20 sec in my PC. ARG 30 sec
tic()
arima_res <- get_arima_results(country_name = country_name,
use_demetra = TRUE,
use_dm_force_constant = TRUE,
arima_res_suffix = "_dm_force_const",
data_is_log_log = TRUE)
toc()
# this is the "demetra" option (demetra, which does not include a constant when D+d = 2). Approx 20 sec in my PC
tic()
arima_res <- get_arima_results(country_name = country_name,
use_demetra = TRUE,
use_dm_force_constant = FALSE,
arima_res_suffix = "_dm_do_not_force_const",
data_is_log_log = TRUE)
toc()
# this uses auto.arima for everything. Approx 220 sec in my PC, ARG 480 sec.
tic()
arima_res <- get_arima_results(country_name = country_name,
use_demetra = FALSE,
use_dm_force_constant = FALSE,
arima_res_suffix = "_auto_r",
data_is_log_log = TRUE)
toc()
# if you are curious these are the default values (h_max, n_cv etc)
# get_arima_results <- function(country_name, read_results = FALSE,
# data_folder = "./data/excel/",
# arima_rds_path = "data/sarimax_objects_",
# h_max = 8, final_ext_horizon = c(2019, 12),
# train_span = 16, number_of_cv = 8,
# test_length = 8, use_demetra = TRUE,
# do_auto = FALSE, use_dm_force_constant = FALSE,
# is_log_log = TRUE, lambda_0_in_auto = FALSE,
# mean_logical_in_auto = TRUE, max_x_lag = 2,
# external_data_path = "./data/external/external.xlsx",
# arima_res_suffix = "foo")
#
#
|
/R/use_new_get_arima_results.R
|
no_license
|
Allisterh/sarimax_var_search
|
R
| false
| false
| 2,227
|
r
|
source('./R/utils_av.R')
country_name <- "Bolivia"
# this is the "stata" option (demetra + always include a constant even if D+d = 2). Approx 20 sec in my PC. ARG 30 sec
tic()
arima_res <- get_arima_results(country_name = country_name,
use_demetra = TRUE,
use_dm_force_constant = TRUE,
arima_res_suffix = "_dm_force_const",
data_is_log_log = TRUE)
toc()
# this is the "demetra" option (demetra, which does not include a constant when D+d = 2). Approx 20 sec in my PC
tic()
arima_res <- get_arima_results(country_name = country_name,
use_demetra = TRUE,
use_dm_force_constant = FALSE,
arima_res_suffix = "_dm_do_not_force_const",
data_is_log_log = TRUE)
toc()
# this uses auto.arima for everything. Approx 220 sec in my PC, ARG 480 sec.
tic()
arima_res <- get_arima_results(country_name = country_name,
use_demetra = FALSE,
use_dm_force_constant = FALSE,
arima_res_suffix = "_auto_r",
data_is_log_log = TRUE)
toc()
# if you are curious these are the default values (h_max, n_cv etc)
# get_arima_results <- function(country_name, read_results = FALSE,
# data_folder = "./data/excel/",
# arima_rds_path = "data/sarimax_objects_",
# h_max = 8, final_ext_horizon = c(2019, 12),
# train_span = 16, number_of_cv = 8,
# test_length = 8, use_demetra = TRUE,
# do_auto = FALSE, use_dm_force_constant = FALSE,
# is_log_log = TRUE, lambda_0_in_auto = FALSE,
# mean_logical_in_auto = TRUE, max_x_lag = 2,
# external_data_path = "./data/external/external.xlsx",
# arima_res_suffix = "foo")
#
#
|
library(tidyverse)
library(cowplot)
library(ggplot2)
library(funk)
library(scales)
library(here)
library(piecewiseSEM)
library(lme4)
setwd(here('grazing-gradients'))
## get diversity
# data load
load("data/wio_herb_benthic_merged.Rdata")
# estimate mean biomass per site per FG
h <- pred %>% filter(FG == 'Herbivore Scraper') %>%
## sum biomass per FG in each transect
group_by(dataset, reef, site, transect,
unique.id, species) %>%
summarise(biom = sum(biomass.kgha)) %>%
## mean species biomass across transects at each site
group_by(unique.id, species) %>%
summarise(biom = mean(biom))
## change names for colnames
com.mat<-tidyr::spread(h, species, biom)
com.mat<-janitor::clean_names(com.mat)
rows<-com.mat[,1]
## drop cols
com.mat<-com.mat[, -c(1)]
## fill NAs
com.mat[is.na(com.mat)]<-0
## matrix format
com.mat<-as.matrix(com.mat)
dim(com.mat)
## estimate diversity
library(vegan)
div<-data.frame(div=diversity(com.mat),
richness=specnumber(com.mat),
unique.id = rows)
div$J <- div$div/log(div$richness)
# save mean sizes
sizes<-pred %>% filter(FG == 'Herbivore Scraper') %>%
## sum biomass per FG in each transect
group_by(dataset, reef, site, transect,
unique.id, species) %>%
summarise(size = mean(length.cm)) %>%
## mean species sizeass across transects at each site
group_by(unique.id) %>%
summarise(size = mean(size))
div$mean.size<-sizes$size
## scraping models
load("results/models/scraping_model.Rdata")
rsquared(m.scraper)
# Marginal Conditional
# 0.1459465 0.6475017
## scraping data
load('results/models/scraper_function.Rdata')
h$resid<-resid(m.scrape)
## attach to t
h$simpson.diversity<-div$div[match(h$unique.id, div$unique_id)] ## simpson is 1 - D.
h$sp.richness<-div$richness[match(h$unique.id, div$unique_id)]
h$evenness<-div$J[match(h$unique.id, div$unique_id)]
h$mean.size<-div$mean.size[match(h$unique.id, div$unique_id)]
## assign seychelles 2017 with mean complexity values for now - needs fixed
h$complexity[h$dataset == 'Seychelles' & h$date == 2017] <- mean(h$complexity)
## plot expected relationships
pdf(file='figures/explore/scraping_diversity.pdf', height =5 ,width=14)
g1<-ggplot(h, aes( sp.richness, scraping, col=dataset))+ geom_point() + theme(legend.position='none')
g2<-ggplot(h, aes( evenness, scraping, col=dataset))+ geom_point() +
theme(legend.position=c(0.6, 0.9), legend.title=element_blank())
g3<-ggplot(h, aes( mean.size, scraping, col=dataset))+ geom_point() +
theme(legend.position='none')
gridExtra::grid.arrange(g1,g2,g3, nrow=1)
dev.off()
## scale vars to keep covariate means = 0. This is helpful for comparing effect sizes when covariates are on different scales.
h$hard.coral <- scale(h$hard.coral)
h$macroalgae <- scale(h$macroalgae)
h$complexity <- scale(h$complexity)
h$rubble <- scale(h$rubble)
h$substrate <- scale(h$substrate)
h$fish.biom <- scale(h$fish.biom)
h$simpson.diversity <- scale(h$simpson.diversity)
h$sp.richness <- scale(h$sp.richness)
h$evenness <- scale(h$evenness)
h$mean.size <- scale(h$mean.size)
## make dummy variables
h$fish.dummy<-ifelse(h$management=='Fished', 1, 0)
h$pristine.dummy<-ifelse(h$management=='Unfished', 1, 0)
# we use 2 dummy variables for 3 levels
m<-lmer(resid ~ hard.coral + macroalgae + rubble + substrate + complexity +
fish.biom + fish.dummy + pristine.dummy + ## fixed
evenness + sp.richness + mean.size +
(1 | dataset/reef) , ## random, nested = reefs within datasets
data = h)
summary(m)
rsquared(m)
#visreg::visreg(m)
sjPlot::plot_models(m, axis.lim=c(-0.5, 0.5), show.values = TRUE)
sjPlot::plot_model(m, type='pred', terms='mean.size')
ggplot(h, aes( sp.richness, resid, col=dataset))+ geom_point() +
theme(legend.position='none')
m<-lmer(scraping ~ evenness + sp.richness + mean.size +
(1 | dataset/reef) , ## random, nested = reefs within datasets
data = h)
summary(m)
rsquared(m)
## for residuals
m<-lmer(resid ~ evenness + sp.richness + mean.size +
(1 | dataset/reef) , ## random, nested = reefs within datasets
data = h)
summary(m)
rsquared(m)
par(mfrow=c(1,3))
sjPlot::plot_model(m, type='pred', terms='sp.richness')
sjPlot::plot_model(m, type='pred', terms='evenness')
sjPlot::plot_model(m, type='pred', terms='mean.size')
## refit without low diversity outliers
m.sub<-lmer(resid ~ evenness + sp.richness +
(1 | dataset/reef) , ## random, nested = reefs within datasets
data = h[!h$evenness< -2,])
summary(m.sub)
## negative evenness effect is not due to low diversity outliers
par(mfrow=c(2,2))
visreg::visreg(m.sub)
ggplot(h, aes( biom, scraping, col=sp.richness))+ geom_point(alpha=0.8) + scale_x_log10() + scale_y_log10() +
scale_color_continuous(low='red', high='green')
with(h, plot(evenness, scraping))
## richness predictors?
m<-lmer(sp.richness ~ hard.coral + macroalgae + rubble + substrate + complexity +
fish.biom + fish.dummy + pristine.dummy + ## fixed
(1 | dataset/reef) , ## random, nested = reefs within datasets
data = h)
summary(m)
par(mfrow=c(3,3))
visreg::visreg(m)
|
/scripts/models/04_scrape_residuals.R
|
no_license
|
jpwrobinson/grazing-gradients
|
R
| false
| false
| 5,229
|
r
|
library(tidyverse)
library(cowplot)
library(ggplot2)
library(funk)
library(scales)
library(here)
library(piecewiseSEM)
library(lme4)
setwd(here('grazing-gradients'))
## get diversity
# data load
load("data/wio_herb_benthic_merged.Rdata")
# estimate mean biomass per site per FG
h <- pred %>% filter(FG == 'Herbivore Scraper') %>%
## sum biomass per FG in each transect
group_by(dataset, reef, site, transect,
unique.id, species) %>%
summarise(biom = sum(biomass.kgha)) %>%
## mean species biomass across transects at each site
group_by(unique.id, species) %>%
summarise(biom = mean(biom))
## change names for colnames
com.mat<-tidyr::spread(h, species, biom)
com.mat<-janitor::clean_names(com.mat)
rows<-com.mat[,1]
## drop cols
com.mat<-com.mat[, -c(1)]
## fill NAs
com.mat[is.na(com.mat)]<-0
## matrix format
com.mat<-as.matrix(com.mat)
dim(com.mat)
## estimate diversity
library(vegan)
div<-data.frame(div=diversity(com.mat),
richness=specnumber(com.mat),
unique.id = rows)
div$J <- div$div/log(div$richness)
# save mean sizes
sizes<-pred %>% filter(FG == 'Herbivore Scraper') %>%
## sum biomass per FG in each transect
group_by(dataset, reef, site, transect,
unique.id, species) %>%
summarise(size = mean(length.cm)) %>%
## mean species sizeass across transects at each site
group_by(unique.id) %>%
summarise(size = mean(size))
div$mean.size<-sizes$size
## scraping models
load("results/models/scraping_model.Rdata")
rsquared(m.scraper)
# Marginal Conditional
# 0.1459465 0.6475017
## scraping data
load('results/models/scraper_function.Rdata')
h$resid<-resid(m.scrape)
## attach to t
h$simpson.diversity<-div$div[match(h$unique.id, div$unique_id)] ## simpson is 1 - D.
h$sp.richness<-div$richness[match(h$unique.id, div$unique_id)]
h$evenness<-div$J[match(h$unique.id, div$unique_id)]
h$mean.size<-div$mean.size[match(h$unique.id, div$unique_id)]
## assign seychelles 2017 with mean complexity values for now - needs fixed
h$complexity[h$dataset == 'Seychelles' & h$date == 2017] <- mean(h$complexity)
## plot expected relationships
pdf(file='figures/explore/scraping_diversity.pdf', height =5 ,width=14)
g1<-ggplot(h, aes( sp.richness, scraping, col=dataset))+ geom_point() + theme(legend.position='none')
g2<-ggplot(h, aes( evenness, scraping, col=dataset))+ geom_point() +
theme(legend.position=c(0.6, 0.9), legend.title=element_blank())
g3<-ggplot(h, aes( mean.size, scraping, col=dataset))+ geom_point() +
theme(legend.position='none')
gridExtra::grid.arrange(g1,g2,g3, nrow=1)
dev.off()
## scale vars to keep covariate means = 0. This is helpful for comparing effect sizes when covariates are on different scales.
h$hard.coral <- scale(h$hard.coral)
h$macroalgae <- scale(h$macroalgae)
h$complexity <- scale(h$complexity)
h$rubble <- scale(h$rubble)
h$substrate <- scale(h$substrate)
h$fish.biom <- scale(h$fish.biom)
h$simpson.diversity <- scale(h$simpson.diversity)
h$sp.richness <- scale(h$sp.richness)
h$evenness <- scale(h$evenness)
h$mean.size <- scale(h$mean.size)
## make dummy variables
h$fish.dummy<-ifelse(h$management=='Fished', 1, 0)
h$pristine.dummy<-ifelse(h$management=='Unfished', 1, 0)
# we use 2 dummy variables for 3 levels
m<-lmer(resid ~ hard.coral + macroalgae + rubble + substrate + complexity +
fish.biom + fish.dummy + pristine.dummy + ## fixed
evenness + sp.richness + mean.size +
(1 | dataset/reef) , ## random, nested = reefs within datasets
data = h)
summary(m)
rsquared(m)
#visreg::visreg(m)
sjPlot::plot_models(m, axis.lim=c(-0.5, 0.5), show.values = TRUE)
sjPlot::plot_model(m, type='pred', terms='mean.size')
ggplot(h, aes( sp.richness, resid, col=dataset))+ geom_point() +
theme(legend.position='none')
m<-lmer(scraping ~ evenness + sp.richness + mean.size +
(1 | dataset/reef) , ## random, nested = reefs within datasets
data = h)
summary(m)
rsquared(m)
## for residuals
m<-lmer(resid ~ evenness + sp.richness + mean.size +
(1 | dataset/reef) , ## random, nested = reefs within datasets
data = h)
summary(m)
rsquared(m)
par(mfrow=c(1,3))
sjPlot::plot_model(m, type='pred', terms='sp.richness')
sjPlot::plot_model(m, type='pred', terms='evenness')
sjPlot::plot_model(m, type='pred', terms='mean.size')
## refit without low diversity outliers
m.sub<-lmer(resid ~ evenness + sp.richness +
(1 | dataset/reef) , ## random, nested = reefs within datasets
data = h[!h$evenness< -2,])
summary(m.sub)
## negative evenness effect is not due to low diversity outliers
par(mfrow=c(2,2))
visreg::visreg(m.sub)
ggplot(h, aes( biom, scraping, col=sp.richness))+ geom_point(alpha=0.8) + scale_x_log10() + scale_y_log10() +
scale_color_continuous(low='red', high='green')
with(h, plot(evenness, scraping))
## richness predictors?
m<-lmer(sp.richness ~ hard.coral + macroalgae + rubble + substrate + complexity +
fish.biom + fish.dummy + pristine.dummy + ## fixed
(1 | dataset/reef) , ## random, nested = reefs within datasets
data = h)
summary(m)
par(mfrow=c(3,3))
visreg::visreg(m)
|
library(RSQLite)
args <- commandArgs(trailingOnly = TRUE)
file.db <- args[1] #'/home/trn/Desktop/diploma-thesis/R/scripts/data-preparation/learning2.db'
save.dir <- args[2] #'/home/trn/Desktop/diploma-thesis/R/scripts/learning/dummy-region.csv'
file.analysis.addr <- args[3] #'/home/trn/Desktop/diploma-thesis/R/scripts/init-soil/analysis.csv' #to obtain water file
data.water <- read.csv(file.analysis.addr)$water
drv <- dbDriver("SQLite")
con <- dbConnect(drv, dbname = file.db)
res <- dbSendQuery(con,'select max(row),max(col) from inputInfo')
dim <- as.matrix(fetch(res,n=-1))
row <- dim[1,1]
col <- dim[1,2]
dbClearResult(res)
x <- matrix(Inf,nrow=row,ncol=col)
x[which(data.water==1)] <- 1 #water is one group
x[which(data.water!=1)] <- 2:(length(data.water[data.water<1])+1) #other regions correspond with cells
print('Saving dummy region learning map')
write.csv(x,save.dir,row.names=FALSE)
dbDisconnect(con)
|
/core/scripts/dummy-region.r
|
no_license
|
martintomas/iterative_prediction_of_pollutants
|
R
| false
| false
| 924
|
r
|
library(RSQLite)
args <- commandArgs(trailingOnly = TRUE)
file.db <- args[1] #'/home/trn/Desktop/diploma-thesis/R/scripts/data-preparation/learning2.db'
save.dir <- args[2] #'/home/trn/Desktop/diploma-thesis/R/scripts/learning/dummy-region.csv'
file.analysis.addr <- args[3] #'/home/trn/Desktop/diploma-thesis/R/scripts/init-soil/analysis.csv' #to obtain water file
data.water <- read.csv(file.analysis.addr)$water
drv <- dbDriver("SQLite")
con <- dbConnect(drv, dbname = file.db)
res <- dbSendQuery(con,'select max(row),max(col) from inputInfo')
dim <- as.matrix(fetch(res,n=-1))
row <- dim[1,1]
col <- dim[1,2]
dbClearResult(res)
x <- matrix(Inf,nrow=row,ncol=col)
x[which(data.water==1)] <- 1 #water is one group
x[which(data.water!=1)] <- 2:(length(data.water[data.water<1])+1) #other regions correspond with cells
print('Saving dummy region learning map')
write.csv(x,save.dir,row.names=FALSE)
dbDisconnect(con)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/summary.btfit.R
\name{summary.btfit}
\alias{summary.btfit}
\title{Summarizing Bradley-Terry Fits}
\usage{
\method{summary}{btfit}(object, subset = NULL, ref = NULL, SE = FALSE,
...)
}
\arguments{
\item{object}{An object of class "btfit", typically the result \code{ob} of \code{ob <- btfit(..)}. See \code{\link{btfit}}.}
\item{subset}{A condition for selecting one or more subsets of the components. This can either be a character vector of names of the components (i.e. a subset of \code{names(object$pi)}), a single predicate function (that takes a vector of \code{object$pi} as its argument), or a logical vector of the same length as the number of components, (i.e. \code{length(object$pi)}).}
\item{ref}{A reference item. Either a string with the item name, or the number 1, or NULL. If NULL, then the coefficients are constrained such that their mean is zero. If an item name is given, the coefficient estimates are shifted so that the coefficient for the ref item is zero. If there is more than one component, the components that do not include the ref item will be treated as if ref = NULL. If ref = 1, then the first item of each component is made the reference item.}
\item{SE}{Logical. Whether to include the standard error of the estimate in the \code{item_summary} table. Default is \code{FALSE}. \strong{N.B. calculating the standard error can be slow when the number of items is large}. See \code{\link{vcov.btfit}}.}
\item{...}{other arguments}
}
\value{
An S3 object of class \code{"summary.btfit"}. It is a list containing the following components:
\item{item_summary}{A \code{tibble} with columns for the item name, its coefficient, the standard error and the component it is in. Within each component, the items are arranged by estimate, in descending order. Note that the \code{estimate} is NOT the same as the values in \code{summary$pi}. See Details.}
\item{component_summary}{A \code{tibble} with a row for each component in the \code{btfit} object (named according to the original \code{btdata$components}, with the number of items in the component, the number of iterations the fitting algorithm ran for, and whether it converged.}
}
\description{
\code{summary} method for class "btfit"
}
\details{
Note that the values given in the \code{estimate} column of the \code{item_summary} element are NOT the same as the values in \code{object$pi}. Rather, they are the \eqn{\lambda_i}, where \eqn{\lambda_i = \log{\pi_i}} (i.e. the coefficients as found by They are the coefficients, as found by \code{\link{coef.btfit}}.). By default, these are normalised so that mean(\eqn{\lambda_i}) = 0. However, if \code{ref} is not equal to \code{NULL}, then the \eqn{\lambda_i} in the component in which \code{ref} appears are shifted to \eqn{\lambda_i - \lambda_{ref}}, for \eqn{i = 1, \dots, K_c}, where \eqn{K_c} is the number of items in the component in which \code{ref} appears, and \eqn{\lambda_{ref}} is the estimate for the reference item.
}
\examples{
citations_btdata <- btdata(BradleyTerryScalable::citations)
fit1 <- btfit(citations_btdata, 1)
summary(fit1)
toy_df_4col <- codes_to_counts(BradleyTerryScalable::toy_data, c("W1", "W2", "D"))
toy_btdata <- btdata(toy_df_4col)
fit2a <- btfit(toy_btdata, 1)
summary(fit2a)
fit2b <- btfit(toy_btdata, 1.1)
summary(fit2b, SE = TRUE)
fit2c <- btfit(toy_btdata, 1)
summary(fit2c, subset = function(x) "Amy" \%in\% names(x))
summary(fit2c, subset = function(x) length(x) > 3, ref = "Amy")
}
\seealso{
\code{\link{btfit}}, \code{\link{coef.btfit}}, \code{\link{vcov.btfit}}
}
\author{
Ella Kaye
}
|
/man/summary.btfit.Rd
|
no_license
|
cran/BradleyTerryScalable
|
R
| false
| true
| 3,649
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/summary.btfit.R
\name{summary.btfit}
\alias{summary.btfit}
\title{Summarizing Bradley-Terry Fits}
\usage{
\method{summary}{btfit}(object, subset = NULL, ref = NULL, SE = FALSE,
...)
}
\arguments{
\item{object}{An object of class "btfit", typically the result \code{ob} of \code{ob <- btfit(..)}. See \code{\link{btfit}}.}
\item{subset}{A condition for selecting one or more subsets of the components. This can either be a character vector of names of the components (i.e. a subset of \code{names(object$pi)}), a single predicate function (that takes a vector of \code{object$pi} as its argument), or a logical vector of the same length as the number of components, (i.e. \code{length(object$pi)}).}
\item{ref}{A reference item. Either a string with the item name, or the number 1, or NULL. If NULL, then the coefficients are constrained such that their mean is zero. If an item name is given, the coefficient estimates are shifted so that the coefficient for the ref item is zero. If there is more than one component, the components that do not include the ref item will be treated as if ref = NULL. If ref = 1, then the first item of each component is made the reference item.}
\item{SE}{Logical. Whether to include the standard error of the estimate in the \code{item_summary} table. Default is \code{FALSE}. \strong{N.B. calculating the standard error can be slow when the number of items is large}. See \code{\link{vcov.btfit}}.}
\item{...}{other arguments}
}
\value{
An S3 object of class \code{"summary.btfit"}. It is a list containing the following components:
\item{item_summary}{A \code{tibble} with columns for the item name, its coefficient, the standard error and the component it is in. Within each component, the items are arranged by estimate, in descending order. Note that the \code{estimate} is NOT the same as the values in \code{summary$pi}. See Details.}
\item{component_summary}{A \code{tibble} with a row for each component in the \code{btfit} object (named according to the original \code{btdata$components}, with the number of items in the component, the number of iterations the fitting algorithm ran for, and whether it converged.}
}
\description{
\code{summary} method for class "btfit"
}
\details{
Note that the values given in the \code{estimate} column of the \code{item_summary} element are NOT the same as the values in \code{object$pi}. Rather, they are the \eqn{\lambda_i}, where \eqn{\lambda_i = \log{\pi_i}} (i.e. the coefficients as found by They are the coefficients, as found by \code{\link{coef.btfit}}.). By default, these are normalised so that mean(\eqn{\lambda_i}) = 0. However, if \code{ref} is not equal to \code{NULL}, then the \eqn{\lambda_i} in the component in which \code{ref} appears are shifted to \eqn{\lambda_i - \lambda_{ref}}, for \eqn{i = 1, \dots, K_c}, where \eqn{K_c} is the number of items in the component in which \code{ref} appears, and \eqn{\lambda_{ref}} is the estimate for the reference item.
}
\examples{
citations_btdata <- btdata(BradleyTerryScalable::citations)
fit1 <- btfit(citations_btdata, 1)
summary(fit1)
toy_df_4col <- codes_to_counts(BradleyTerryScalable::toy_data, c("W1", "W2", "D"))
toy_btdata <- btdata(toy_df_4col)
fit2a <- btfit(toy_btdata, 1)
summary(fit2a)
fit2b <- btfit(toy_btdata, 1.1)
summary(fit2b, SE = TRUE)
fit2c <- btfit(toy_btdata, 1)
summary(fit2c, subset = function(x) "Amy" \%in\% names(x))
summary(fit2c, subset = function(x) length(x) > 3, ref = "Amy")
}
\seealso{
\code{\link{btfit}}, \code{\link{coef.btfit}}, \code{\link{vcov.btfit}}
}
\author{
Ella Kaye
}
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{Dest}
\alias{Dest}
\title{Estimation Joost's Dest parameter}
\usage{
Dest(x, stratum = "Population", nperm = 0, size.correct = FALSE)
}
\arguments{
\item{x}{A vector of \code{\link{locus}} objects or a \code{data.frame} with \code{locus} objects.}
\item{stratum}{Either a vector of strata variables if \code{x} is a \code{locus} vector or
the name of the column representing strata in \code{x} if it is a \code{data.frame}.}
\item{nperm}{The number of permutations to run for significance of the
estimator.}
\item{size.correct}{A flag indicating that the estimate should be corrected for
based upon sample sizes (default=TRUE).}
}
\value{
A \code{data.frame} with Dest, Hs, Ht, and P (if asked for). When multiple
loci are provided, the results also provide a multilocus estimate using the
harmonic mean.
}
\description{
This function estimates the parameter (and potentially the confidence
surrounding its value) for Joost's Dest.
}
\examples{
a1 <- sample( LETTERS[1:5], size=20, replace=TRUE)
a2 <- sample( LETTERS[4:8], size=20, replace=TRUE)
raw_alleles <- matrix( c(a1,a2), ncol=2, byrow=TRUE )
locus <- locus( raw_alleles, type="column")
Population <- c(rep("Pop-A",10),rep("Pop-B",10))
Dest( locus, Population )
a1 <- sample( LETTERS[1:5], size=20, replace=TRUE)
a2 <- sample( LETTERS[4:8], size=20, replace=TRUE)
raw_alleles <- matrix( c(a1,a2), ncol=2, byrow=TRUE )
locus2 <- locus( raw_alleles, type="column")
df <- data.frame( Population, TPI=locus, PGM=locus2 )
Dest( df, nperm=99)
}
\author{
Rodney J. Dyer \email{rjdyer@vcu.edu}
}
|
/man/Dest.Rd
|
no_license
|
hbwxf/gstudio
|
R
| false
| false
| 1,624
|
rd
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{Dest}
\alias{Dest}
\title{Estimation Joost's Dest parameter}
\usage{
Dest(x, stratum = "Population", nperm = 0, size.correct = FALSE)
}
\arguments{
\item{x}{A vector of \code{\link{locus}} objects or a \code{data.frame} with \code{locus} objects.}
\item{stratum}{Either a vector of strata variables if \code{x} is a \code{locus} vector or
the name of the column representing strata in \code{x} if it is a \code{data.frame}.}
\item{nperm}{The number of permutations to run for significance of the
estimator.}
\item{size.correct}{A flag indicating that the estimate should be corrected for
based upon sample sizes (default=TRUE).}
}
\value{
A \code{data.frame} with Dest, Hs, Ht, and P (if asked for). When multiple
loci are provided, the results also provide a multilocus estimate using the
harmonic mean.
}
\description{
This function estimates the parameter (and potentially the confidence
surrounding its value) for Joost's Dest.
}
\examples{
a1 <- sample( LETTERS[1:5], size=20, replace=TRUE)
a2 <- sample( LETTERS[4:8], size=20, replace=TRUE)
raw_alleles <- matrix( c(a1,a2), ncol=2, byrow=TRUE )
locus <- locus( raw_alleles, type="column")
Population <- c(rep("Pop-A",10),rep("Pop-B",10))
Dest( locus, Population )
a1 <- sample( LETTERS[1:5], size=20, replace=TRUE)
a2 <- sample( LETTERS[4:8], size=20, replace=TRUE)
raw_alleles <- matrix( c(a1,a2), ncol=2, byrow=TRUE )
locus2 <- locus( raw_alleles, type="column")
df <- data.frame( Population, TPI=locus, PGM=locus2 )
Dest( df, nperm=99)
}
\author{
Rodney J. Dyer \email{rjdyer@vcu.edu}
}
|
#load the data
#set working directory
facebook_data= read.table("facebook_user.tsv", header=TRUE)
library("dplyr")
getwd()
#load data
fb= read.table("facebook_user.tsv",header=TRUE)
library("dplyr")
#1.Which day has the highest number of birthdays ?
fb$birthdate=as.Date(with(fb,paste(dob_day,dob_month,dob_year,sep="-")),"%d-%m-%Y")
data=fb%>% group_by(birthdate) %>% summarise(count_dob=n()) %>% filter(count_dob1==max(count_dob))
#birthdate-1988-01-01
#count_dob-656
#2. Who tends to have more friends by average / by median ? Males or Females ?
fb %>% group_by(gender) %>% summarise(mean_friend_count= mean(friend_count), med_frnd_count= median(friend_count))
#female 242. 96
#male 165. 74
#NA 184. 81
#answer-female
#3. Which set of users have been on facebook for a long time ?
fb %>% arrange(desc(tenure)) %>% head(10) %>% select(userid,tenure)
#userid tenure
#1 1419799 3139
#2 1601778 3139
#3 2135122 3139
#4 2137369 3129
#5 2111567 3128
#6 1934957 3101
#7 2032933 3019
#8 2171167 2958
#9 1701874 2926
#10 1574331 2888
#4. Who is most active on facebook ? Who is most active on mobile facebook ? on web facebook ?
#mean of friendships_innitiated , mobile liked by user, www liked by user
#5. Who is using facebook more on mobile than desktop ?
df=fb %>% filter(mobile_likes>www_likes) %>% select(userid,mobile_likes,www_likes)
View(df)
|
/Assignment1.R
|
no_license
|
mukullokhande99/DS_ML_R
|
R
| false
| false
| 1,511
|
r
|
#load the data
#set working directory
facebook_data= read.table("facebook_user.tsv", header=TRUE)
library("dplyr")
getwd()
#load data
fb= read.table("facebook_user.tsv",header=TRUE)
library("dplyr")
#1.Which day has the highest number of birthdays ?
fb$birthdate=as.Date(with(fb,paste(dob_day,dob_month,dob_year,sep="-")),"%d-%m-%Y")
data=fb%>% group_by(birthdate) %>% summarise(count_dob=n()) %>% filter(count_dob1==max(count_dob))
#birthdate-1988-01-01
#count_dob-656
#2. Who tends to have more friends by average / by median ? Males or Females ?
fb %>% group_by(gender) %>% summarise(mean_friend_count= mean(friend_count), med_frnd_count= median(friend_count))
#female 242. 96
#male 165. 74
#NA 184. 81
#answer-female
#3. Which set of users have been on facebook for a long time ?
fb %>% arrange(desc(tenure)) %>% head(10) %>% select(userid,tenure)
#userid tenure
#1 1419799 3139
#2 1601778 3139
#3 2135122 3139
#4 2137369 3129
#5 2111567 3128
#6 1934957 3101
#7 2032933 3019
#8 2171167 2958
#9 1701874 2926
#10 1574331 2888
#4. Who is most active on facebook ? Who is most active on mobile facebook ? on web facebook ?
#mean of friendships_innitiated , mobile liked by user, www liked by user
#5. Who is using facebook more on mobile than desktop ?
df=fb %>% filter(mobile_likes>www_likes) %>% select(userid,mobile_likes,www_likes)
View(df)
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{d}
\alias{d}
\title{make data frame alias}
\usage{
d(x)
}
\description{
make data frame alias
}
|
/man/d.Rd
|
no_license
|
Philipp-Neubauer/euphemisms
|
R
| false
| false
| 156
|
rd
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{d}
\alias{d}
\title{make data frame alias}
\usage{
d(x)
}
\description{
make data frame alias
}
|
#' Search NOAA datasets
#'
#' From the NOAA API docs: All of our data are in datasets. To retrieve any data
#' from us, you must know what dataset it is in.
#'
#' @export
#'
#' @template rnoaa
#' @template rnoaa2
#' @template datasets
#' @param datasetid (optional) Accepts a single valid dataset id. Data returned will be
#' from the dataset specified.
#' @param stationid Accepts a valid station id or a vector or list of station ids
#' @return A data.frame for all datasets, or a list of length two, each with a data.frame.
#' @examples \dontrun{
#' # Get a table of all datasets
#' ncdc_datasets()
#'
#' # Get details from a particular dataset
#' ncdc_datasets(datasetid='ANNUAL')
#'
#' # Get datasets with Temperature at the time of observation (TOBS) data type
#' ncdc_datasets(datatypeid='TOBS')
#' ## two datatypeid's
#' ncdc_datasets(datatypeid=c('TOBS', "ACMH"))
#'
#' # Get datasets with data for a series of the same parameter arg, in this case
#' # stationid's
#' ncdc_datasets(stationid='COOP:310090')
#' ncdc_datasets(stationid=c('COOP:310090','COOP:310184','COOP:310212'))
#'
#' # Multiple datatypeid's
#' ncdc_datasets(datatypeid=c('ACMC','ACMH','ACSC'))
#' ncdc_datasets(datasetid='ANNUAL', datatypeid=c('ACMC','ACMH','ACSC'))
#'
#' # Multiple locationid's
#' ncdc_datasets(locationid="FIPS:30091")
#' ncdc_datasets(locationid=c("FIPS:30103", "FIPS:30091"))
#' }
ncdc_datasets <- function(datasetid=NULL, datatypeid=NULL, stationid=NULL, locationid=NULL,
startdate=NULL, enddate=NULL, sortfield=NULL, sortorder=NULL, limit=25, offset=NULL,
token=NULL, dataset=NULL, page=NULL, year=NULL, month=NULL, ...)
{
calls <- names(sapply(match.call(), deparse))[-1]
calls_vec <- c("dataset", "page", "year", "month") %in% calls
if (any(calls_vec))
stop("The parameters dataset, page, year, and month \n have been removed, and were only relavant in the old NOAA API v1. \n\nPlease see documentation for ?ncdc_datasets")
token <- check_key(token)
url <- "http://www.ncdc.noaa.gov/cdo-web/api/v2/datasets"
if (!is.null(datasetid)) url <- paste(url, "/", datasetid, sep = "")
args <- noaa_compact(list(startdate=startdate,
enddate=enddate, sortfield=sortfield, sortorder=sortorder,
limit=limit, offset=offset))
if (!is.null(stationid)) {
stationid <- lapply(stationid, function(x) list(stationid = x))
}
if (!is.null(datatypeid)) {
datatypeid <- lapply(datatypeid, function(x) list(datatypeid = x))
}
if (!is.null(locationid)) {
locationid <- lapply(locationid, function(x) list(locationid = x))
}
args <- c(args, stationid, datatypeid, locationid)
args <- as.list(unlist(args))
names(args) <- gsub("[0-9]+", "", names(args))
if (length(args) == 0) args <- NULL
temp <- GET(url, query = args, add_headers("token" = token), ...)
tt <- check_response(temp)
if (inherits(tt, "character")) {
all <- list(meta = NULL, data = NULL)
} else {
if (!is.null(datasetid)) {
dat <- data.frame(tt, stringsAsFactors = FALSE)
all <- list(meta = NULL, data = dat)
} else {
dat <- dplyr::bind_rows(lapply(tt$results, function(x) data.frame(x, stringsAsFactors = FALSE)))
all <- list(meta = tt$metadata$resultset, data = dat)
}
}
structure(all, class = "ncdc_datasets")
}
|
/R/ncdc_datasets.r
|
permissive
|
bestwpw/rnoaa
|
R
| false
| false
| 3,317
|
r
|
#' Search NOAA datasets
#'
#' From the NOAA API docs: All of our data are in datasets. To retrieve any data
#' from us, you must know what dataset it is in.
#'
#' @export
#'
#' @template rnoaa
#' @template rnoaa2
#' @template datasets
#' @param datasetid (optional) Accepts a single valid dataset id. Data returned will be
#' from the dataset specified.
#' @param stationid Accepts a valid station id or a vector or list of station ids
#' @return A data.frame for all datasets, or a list of length two, each with a data.frame.
#' @examples \dontrun{
#' # Get a table of all datasets
#' ncdc_datasets()
#'
#' # Get details from a particular dataset
#' ncdc_datasets(datasetid='ANNUAL')
#'
#' # Get datasets with Temperature at the time of observation (TOBS) data type
#' ncdc_datasets(datatypeid='TOBS')
#' ## two datatypeid's
#' ncdc_datasets(datatypeid=c('TOBS', "ACMH"))
#'
#' # Get datasets with data for a series of the same parameter arg, in this case
#' # stationid's
#' ncdc_datasets(stationid='COOP:310090')
#' ncdc_datasets(stationid=c('COOP:310090','COOP:310184','COOP:310212'))
#'
#' # Multiple datatypeid's
#' ncdc_datasets(datatypeid=c('ACMC','ACMH','ACSC'))
#' ncdc_datasets(datasetid='ANNUAL', datatypeid=c('ACMC','ACMH','ACSC'))
#'
#' # Multiple locationid's
#' ncdc_datasets(locationid="FIPS:30091")
#' ncdc_datasets(locationid=c("FIPS:30103", "FIPS:30091"))
#' }
ncdc_datasets <- function(datasetid=NULL, datatypeid=NULL, stationid=NULL, locationid=NULL,
startdate=NULL, enddate=NULL, sortfield=NULL, sortorder=NULL, limit=25, offset=NULL,
token=NULL, dataset=NULL, page=NULL, year=NULL, month=NULL, ...)
{
calls <- names(sapply(match.call(), deparse))[-1]
calls_vec <- c("dataset", "page", "year", "month") %in% calls
if (any(calls_vec))
stop("The parameters dataset, page, year, and month \n have been removed, and were only relavant in the old NOAA API v1. \n\nPlease see documentation for ?ncdc_datasets")
token <- check_key(token)
url <- "http://www.ncdc.noaa.gov/cdo-web/api/v2/datasets"
if (!is.null(datasetid)) url <- paste(url, "/", datasetid, sep = "")
args <- noaa_compact(list(startdate=startdate,
enddate=enddate, sortfield=sortfield, sortorder=sortorder,
limit=limit, offset=offset))
if (!is.null(stationid)) {
stationid <- lapply(stationid, function(x) list(stationid = x))
}
if (!is.null(datatypeid)) {
datatypeid <- lapply(datatypeid, function(x) list(datatypeid = x))
}
if (!is.null(locationid)) {
locationid <- lapply(locationid, function(x) list(locationid = x))
}
args <- c(args, stationid, datatypeid, locationid)
args <- as.list(unlist(args))
names(args) <- gsub("[0-9]+", "", names(args))
if (length(args) == 0) args <- NULL
temp <- GET(url, query = args, add_headers("token" = token), ...)
tt <- check_response(temp)
if (inherits(tt, "character")) {
all <- list(meta = NULL, data = NULL)
} else {
if (!is.null(datasetid)) {
dat <- data.frame(tt, stringsAsFactors = FALSE)
all <- list(meta = NULL, data = dat)
} else {
dat <- dplyr::bind_rows(lapply(tt$results, function(x) data.frame(x, stringsAsFactors = FALSE)))
all <- list(meta = tt$metadata$resultset, data = dat)
}
}
structure(all, class = "ncdc_datasets")
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{transform}
\alias{transform}
\title{Transformations
Performs log or logit transformations.}
\usage{
transform(x, type = c("identity", "log", "logit", "none", NA_character_))
}
\arguments{
\item{x}{value to transform}
\item{type}{type of transform (log, logit).}
}
\value{
transformed value
}
\description{
Transformations
Performs log or logit transformations.
}
|
/man/transform.Rd
|
no_license
|
cran/confidence
|
R
| false
| true
| 460
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{transform}
\alias{transform}
\title{Transformations
Performs log or logit transformations.}
\usage{
transform(x, type = c("identity", "log", "logit", "none", NA_character_))
}
\arguments{
\item{x}{value to transform}
\item{type}{type of transform (log, logit).}
}
\value{
transformed value
}
\description{
Transformations
Performs log or logit transformations.
}
|
\name{mongo.gridfile.seek}
\alias{mongo.gridfile.seek}
\title{Seek to a position in a mongo.gridfile}
\usage{
mongo.gridfile.seek(gridfile, offset)
}
\arguments{
\item{gridfile}{A (\link{mongo.gridfile}) object.}
\item{offset}{(as.double) The position to which to seek.}
}
\value{
(double) The position set. This may be at the length of
the GridFS file if \code{offset} was greater than that.
}
\description{
Seek to a position in a \link{mongo.gridfile}.\cr This sets
the position at which the next
\code{\link{mongo.gridfile.read}()} will start.
}
\examples{
mongo <- mongo.create()
if (mongo.is.connected(mongo)) {
gridfs <- mongo.gridfs.create(mongo, "grid")
mongo.gridfs.store.file(gridfs, "tests/test.R", "test.R")
gf <- mongo.gridfs.find(gridfs, "test.R")
if( !is.null(gf)){
mongo.gridfile.seek(gf, 256*256*5)
data <- mongo.gridfile.read(gf, 16384)
mongo.gridfile.destroy(gf)
}
mongo.gridfs.destroy(gridfs)
}
}
\seealso{
\code{\link{mongo.gridfs}},\cr
\code{\link{mongo.gridfs.find}},\cr
\link{mongo.gridfile},\cr
\code{\link{mongo.gridfile.get.descriptor}},\cr
\code{\link{mongo.gridfile.get.filename}},\cr
\code{\link{mongo.gridfile.get.length}},\cr
\code{\link{mongo.gridfile.get.chunk.size}},\cr
\code{\link{mongo.gridfile.get.chunk.count}},\cr
\code{\link{mongo.gridfile.get.content.type}},\cr
\code{\link{mongo.gridfile.get.upload.date}},\cr
\code{\link{mongo.gridfile.get.md5}},\cr
\code{\link{mongo.gridfile.get.metadata}},\cr
\code{\link{mongo.gridfile.get.chunk}},\cr
\code{\link{mongo.gridfile.get.chunks}},\cr
\code{\link{mongo.gridfile.read}},\cr
\code{\link{mongo.gridfile.pipe}}.
}
|
/man/mongo.gridfile.seek.Rd
|
no_license
|
StefanoSpada/rmongodb
|
R
| false
| false
| 1,655
|
rd
|
\name{mongo.gridfile.seek}
\alias{mongo.gridfile.seek}
\title{Seek to a position in a mongo.gridfile}
\usage{
mongo.gridfile.seek(gridfile, offset)
}
\arguments{
\item{gridfile}{A (\link{mongo.gridfile}) object.}
\item{offset}{(as.double) The position to which to seek.}
}
\value{
(double) The position set. This may be at the length of
the GridFS file if \code{offset} was greater than that.
}
\description{
Seek to a position in a \link{mongo.gridfile}.\cr This sets
the position at which the next
\code{\link{mongo.gridfile.read}()} will start.
}
\examples{
mongo <- mongo.create()
if (mongo.is.connected(mongo)) {
gridfs <- mongo.gridfs.create(mongo, "grid")
mongo.gridfs.store.file(gridfs, "tests/test.R", "test.R")
gf <- mongo.gridfs.find(gridfs, "test.R")
if( !is.null(gf)){
mongo.gridfile.seek(gf, 256*256*5)
data <- mongo.gridfile.read(gf, 16384)
mongo.gridfile.destroy(gf)
}
mongo.gridfs.destroy(gridfs)
}
}
\seealso{
\code{\link{mongo.gridfs}},\cr
\code{\link{mongo.gridfs.find}},\cr
\link{mongo.gridfile},\cr
\code{\link{mongo.gridfile.get.descriptor}},\cr
\code{\link{mongo.gridfile.get.filename}},\cr
\code{\link{mongo.gridfile.get.length}},\cr
\code{\link{mongo.gridfile.get.chunk.size}},\cr
\code{\link{mongo.gridfile.get.chunk.count}},\cr
\code{\link{mongo.gridfile.get.content.type}},\cr
\code{\link{mongo.gridfile.get.upload.date}},\cr
\code{\link{mongo.gridfile.get.md5}},\cr
\code{\link{mongo.gridfile.get.metadata}},\cr
\code{\link{mongo.gridfile.get.chunk}},\cr
\code{\link{mongo.gridfile.get.chunks}},\cr
\code{\link{mongo.gridfile.read}},\cr
\code{\link{mongo.gridfile.pipe}}.
}
|
#########################################################################################
# Prepared for Gabor's Data Analysis
#
# Data Analysis for Business, Economics, and Policy
# by Gabor Bekes and Gabor Kezdi
# Cambridge University Press 2021
#
# gabors-data-analysis.com
#
# License: Free to share, modify and use for educational purposes.
# Not to be used for commercial purposes.
# Chapter 12
# CH12 Time series simulation
# version 0.9 2020-09-09
#########################################################################################
# ------------------------------------------------------------------------------------------------------
#### SET UP
# It is advised to start a new session for every case study
# CLEAR MEMORY
rm(list=ls())
# Import libraries
library(tidyverse)
# set working directory
# option A: open material as project
# option B: set working directory for da_case_studies
# example: setwd("C:/Users/bekes.gabor/Documents/github/da_case_studies/")
# set data dir, data used
source("set-data-directory.R") # data_dir must be first defined
# alternative: give full path here,
# example data_dir="C:/Users/bekes.gabor/Dropbox (MTA KRTK)/bekes_kezdi_textbook/da_data_repo"
# load theme and functions
source("ch00-tech-prep/theme_bg.R")
source("ch00-tech-prep/da_helper_functions.R")
use_case_dir <- "ch12-time-series-simulations/"
data_out <- use_case_dir
output <- paste0(use_case_dir,"output/")
create_output_if_doesnt_exist(output)
#-----------------------------------------------------------------------------------------
# PART 1
# Random walk simulation
# Generate k random walks across time {0, 1, ... , T}
# set parameters
set.seed (10)
T <- 100 # number of obs
k <- 5 # nr of random walks generated
initial.value <- 0
# create a function
GetRandomWalk <- function() {
# Add a standard normal at each step
initial.value + c(0, cumsum(rnorm(T)))
}
# Matrix of random walks
values <- replicate(k, GetRandomWalk())
# visualize
rws <- as.data.frame(values)
rws <- rws %>%
mutate(time=as.numeric(rownames(.)))
rws <- rws %>%
gather(var, value, V1:V5)
rws_plot <- ggplot(rws,aes(time, value, color=var)) +
geom_line (show.legend = FALSE, size =0.8) +
theme_bg() +
scale_color_manual(values = c(color[1], color[2], color[3], color[4], color[5])) +
labs(x = "Time period",
y = "Value of the simulated variable") +
scale_x_continuous(expand = c(0.01,0.01), limits = c(0,100), breaks=seq(0,100,10))
rws_plot
save_fig("ch12-figure-1-randomwalks", output, "small")
#-----------------------------------------------------------------------------------------
# PART 2
# Serially correlated vs serially uncorrelated series
# simulation exercies
# rnorm(n, mean = 0, sd = 1)
# serially uncorrelated series/white noise
set.seed(2016)
uncorr <- as.data.frame(ts(rnorm(100, mean=0, sd=1)) )
uncorr <- uncorr %>%
mutate(t=as.numeric(rownames(.)))
uncorr
whitenoise <- ggplot(uncorr,aes(t, x)) +
geom_line (show.legend = FALSE, size =0.6, color=color[1]) +
geom_hline(yintercept=0,
color = color[2], size=1)+
labs(x = "Time period",
y = "Value of the simulated variable") +
theme_bg() +
scale_y_continuous(expand = c(0.01,0.01)) +
scale_x_continuous(expand = c(0.01,0.01),breaks=seq(0,100,10))
whitenoise
save_fig("ch12-figure-9a-serialcorr-whitenoise", output, "small")
# serially correlated series, pho=0.8
set.seed(2016)
rho=0.8
E <- rnorm(100, 0, 1)
x <- numeric()
x[1] <- E[1]
for(i in 2:100) x[i] <- rho*x[i-1] + E[i]
E <- as.data.frame(E)
corr08 <- E %>%
mutate(t=as.numeric(rownames(.)))
corr08_graph <- ggplot(corr08,aes(t, x)) +
geom_line (show.legend = FALSE, size =0.6, color=color[1]) +
geom_hline(yintercept=0,
color = color[2], size=1)+
labs(x = "Time period",
y = "Value of the simulated variable") +
theme_bg() +
scale_y_continuous(expand = c(0.01,0.01)) +
scale_x_continuous(expand = c(0.01,0.01),breaks=seq(0,100,10))
corr08_graph
save_fig("ch12-figure-9b-serialcorr-corr08", output, "small")
|
/ch12-time-series-simulations/ch12-randomwalk-serialcorr-simull.R
|
no_license
|
LIKE4986/da_case_studies
|
R
| false
| false
| 4,120
|
r
|
#########################################################################################
# Prepared for Gabor's Data Analysis
#
# Data Analysis for Business, Economics, and Policy
# by Gabor Bekes and Gabor Kezdi
# Cambridge University Press 2021
#
# gabors-data-analysis.com
#
# License: Free to share, modify and use for educational purposes.
# Not to be used for commercial purposes.
# Chapter 12
# CH12 Time series simulation
# version 0.9 2020-09-09
#########################################################################################
# ------------------------------------------------------------------------------------------------------
#### SET UP
# It is advised to start a new session for every case study
# CLEAR MEMORY
rm(list=ls())
# Import libraries
library(tidyverse)
# set working directory
# option A: open material as project
# option B: set working directory for da_case_studies
# example: setwd("C:/Users/bekes.gabor/Documents/github/da_case_studies/")
# set data dir, data used
source("set-data-directory.R") # data_dir must be first defined
# alternative: give full path here,
# example data_dir="C:/Users/bekes.gabor/Dropbox (MTA KRTK)/bekes_kezdi_textbook/da_data_repo"
# load theme and functions
source("ch00-tech-prep/theme_bg.R")
source("ch00-tech-prep/da_helper_functions.R")
use_case_dir <- "ch12-time-series-simulations/"
data_out <- use_case_dir
output <- paste0(use_case_dir,"output/")
create_output_if_doesnt_exist(output)
#-----------------------------------------------------------------------------------------
# PART 1
# Random walk simulation
# Generate k random walks across time {0, 1, ... , T}
# set parameters
set.seed (10)
T <- 100 # number of obs
k <- 5 # nr of random walks generated
initial.value <- 0
# create a function
GetRandomWalk <- function() {
# Add a standard normal at each step
initial.value + c(0, cumsum(rnorm(T)))
}
# Matrix of random walks
values <- replicate(k, GetRandomWalk())
# visualize
rws <- as.data.frame(values)
rws <- rws %>%
mutate(time=as.numeric(rownames(.)))
rws <- rws %>%
gather(var, value, V1:V5)
rws_plot <- ggplot(rws,aes(time, value, color=var)) +
geom_line (show.legend = FALSE, size =0.8) +
theme_bg() +
scale_color_manual(values = c(color[1], color[2], color[3], color[4], color[5])) +
labs(x = "Time period",
y = "Value of the simulated variable") +
scale_x_continuous(expand = c(0.01,0.01), limits = c(0,100), breaks=seq(0,100,10))
rws_plot
save_fig("ch12-figure-1-randomwalks", output, "small")
#-----------------------------------------------------------------------------------------
# PART 2
# Serially correlated vs serially uncorrelated series
# simulation exercies
# rnorm(n, mean = 0, sd = 1)
# serially uncorrelated series/white noise
set.seed(2016)
uncorr <- as.data.frame(ts(rnorm(100, mean=0, sd=1)) )
uncorr <- uncorr %>%
mutate(t=as.numeric(rownames(.)))
uncorr
whitenoise <- ggplot(uncorr,aes(t, x)) +
geom_line (show.legend = FALSE, size =0.6, color=color[1]) +
geom_hline(yintercept=0,
color = color[2], size=1)+
labs(x = "Time period",
y = "Value of the simulated variable") +
theme_bg() +
scale_y_continuous(expand = c(0.01,0.01)) +
scale_x_continuous(expand = c(0.01,0.01),breaks=seq(0,100,10))
whitenoise
save_fig("ch12-figure-9a-serialcorr-whitenoise", output, "small")
# serially correlated series, pho=0.8
set.seed(2016)
rho=0.8
E <- rnorm(100, 0, 1)
x <- numeric()
x[1] <- E[1]
for(i in 2:100) x[i] <- rho*x[i-1] + E[i]
E <- as.data.frame(E)
corr08 <- E %>%
mutate(t=as.numeric(rownames(.)))
corr08_graph <- ggplot(corr08,aes(t, x)) +
geom_line (show.legend = FALSE, size =0.6, color=color[1]) +
geom_hline(yintercept=0,
color = color[2], size=1)+
labs(x = "Time period",
y = "Value of the simulated variable") +
theme_bg() +
scale_y_continuous(expand = c(0.01,0.01)) +
scale_x_continuous(expand = c(0.01,0.01),breaks=seq(0,100,10))
corr08_graph
save_fig("ch12-figure-9b-serialcorr-corr08", output, "small")
|
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip","household_power_consumption.zip")
unzip("household_power_consumption.zip")
library(dplyr)
# Read only 1st 2 days of February 2007.
household.power.consumption <- tbl_df(read.table(pipe('grep "^[1-2]/2/2007" "household_power_consumption.txt"'), sep=";"))
# Assign proper column names by reading them in from the file.
colnames(household.power.consumption) <- (read.table("household_power_consumption.txt", nrow=1)[[1]] %>%
as.character() %>%
strsplit(split=";"))[[1]]
library(lubridate)
household.power.consumption <- mutate(household.power.consumption,
datetime = with(household.power.consumption,
paste(Date, Time))
%>% dmy_hms())
png("plot4.png", width = 480, height = 480)
par(mfrow = c(2, 2))
# Global Active Power vs. Datetime.
with(household.power.consumption, plot(datetime, Global_active_power, type='n', xlab= "", ylab="Global Active Power"))
with(household.power.consumption, lines(datetime, Global_active_power, type='l'))
# Voltage vs. Datetime.
with(household.power.consumption, plot(datetime, Voltage, type='n', xlab= "datetime", ylab="Voltage"))
with(household.power.consumption, lines(datetime, Voltage, type='l'))
# Submetering vs. Datetime.
with(household.power.consumption, plot(datetime, Sub_metering_1, type='n', xlab= "", ylab="Energy sub metering"))
with(household.power.consumption, lines(datetime, Sub_metering_1, type='l'))
with(household.power.consumption, lines(datetime, Sub_metering_2, type='l', col="red"))
with(household.power.consumption, lines(datetime, Sub_metering_3, type='l', col="blue"))
legend("topright", c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), lty=1, col=c("Black","Red","Blue"), bty="n")
# Global_Reactive_Power vs. Datetime.
with(household.power.consumption, plot(datetime, Global_reactive_power, type='n', xlab= "datetime"))
with(household.power.consumption, lines(datetime, Global_reactive_power, type='l'))
dev.off()
|
/plot4.R
|
no_license
|
yongish/ExData_Plotting1
|
R
| false
| false
| 2,210
|
r
|
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip","household_power_consumption.zip")
unzip("household_power_consumption.zip")
library(dplyr)
# Read only 1st 2 days of February 2007.
household.power.consumption <- tbl_df(read.table(pipe('grep "^[1-2]/2/2007" "household_power_consumption.txt"'), sep=";"))
# Assign proper column names by reading them in from the file.
colnames(household.power.consumption) <- (read.table("household_power_consumption.txt", nrow=1)[[1]] %>%
as.character() %>%
strsplit(split=";"))[[1]]
library(lubridate)
household.power.consumption <- mutate(household.power.consumption,
datetime = with(household.power.consumption,
paste(Date, Time))
%>% dmy_hms())
png("plot4.png", width = 480, height = 480)
par(mfrow = c(2, 2))
# Global Active Power vs. Datetime.
with(household.power.consumption, plot(datetime, Global_active_power, type='n', xlab= "", ylab="Global Active Power"))
with(household.power.consumption, lines(datetime, Global_active_power, type='l'))
# Voltage vs. Datetime.
with(household.power.consumption, plot(datetime, Voltage, type='n', xlab= "datetime", ylab="Voltage"))
with(household.power.consumption, lines(datetime, Voltage, type='l'))
# Submetering vs. Datetime.
with(household.power.consumption, plot(datetime, Sub_metering_1, type='n', xlab= "", ylab="Energy sub metering"))
with(household.power.consumption, lines(datetime, Sub_metering_1, type='l'))
with(household.power.consumption, lines(datetime, Sub_metering_2, type='l', col="red"))
with(household.power.consumption, lines(datetime, Sub_metering_3, type='l', col="blue"))
legend("topright", c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), lty=1, col=c("Black","Red","Blue"), bty="n")
# Global_Reactive_Power vs. Datetime.
with(household.power.consumption, plot(datetime, Global_reactive_power, type='n', xlab= "datetime"))
with(household.power.consumption, lines(datetime, Global_reactive_power, type='l'))
dev.off()
|
# The high level actions performed by the VIT Canvas. The details of how these methods accomplish their goals is set by loadDetails, which chooses the appropriate method based on the type of sata and the statistic being examined
#initialize_actions <- function(e){
#' Creates the initial plot of the data
PLOT_DATA <- function(canvas)
warning("PLOT_DATA details must be set by loadPlotDetails()")
#' Creates the initial plot of the sample
PLOT_SAMPLE <- function(canvas)
warning("PLOT_SAMPLE details must be set by loadDetails()")
#' Puts titles for each of the plots on the canvas
SHOW_LABELS <- function(canvas)
warning("SHOW_LABELS details must be set by loadDetails()")
#' Animates the construction of the sample from the data
ANIMATE_SAMPLE <- function(canvas)
warning("ANIMATE_SAMPLE details must be set by loadDetails()")
#' Animated the tracking of a sample (or resample)
TRACK_SAMPLE <- function(canvas)
warning("TRACK_SAMPLE details must be set by loadDetails()")
#' Method used to calculate a single statistic from a single group of observsations
CALC_STAT <- function(canvas)
warning("CALC_STAT details must be set by loadDetails()")
#' Method used to fade plots in order emphasize those remaining
FADE_PLOTS <- function(canvas)
warning("FADE_PLOTS details must be set by loadDetails()")
#' Creates a plot of the data parameter
PLOT_DATA_STAT <- function(canvas)
warning("PLOT_STAT details must be set by loadDetails()")
#' Creates a plot of the sample statistic
PLOT_SAMPLE_STAT <- function(canvas)
warning("PLOT_STAT details must be set by loadDetails()")
#' Creates a plot of the distribution of sample statistics
PLOT_STAT_DIST <- function(canvas)
warning("PLOT_STAT_DIST details must be set by loadDetails()")
#' Animates the addition of the current sample statistic to the statistic distribution
ANIMATE_STAT <- function(canvas)
warning("ANIMATE_STAT details must be set by loadDetails()")
#' Displays any final conlusions or information required by the method whenever a statistic is added to the statistic distribution
DISPLAY_RESULT <- function(canvas)
warning("DISPLAY_RESULT details must be set by loadDetails()")
#' Displays a second round of conclusions if required by the method
DISPLAY_RESULT_2 <- function(canvas)
warning("DISPLAY_RESULT_2 details must be set by loadDetails()")
#' Manages the methods' performance of 1000 complete runs. At the moment this has its own method because running 1000 methods the usual way would be time prohibitive.
HANDLE_1000 <- function(e)
warning("HANDLE_1000 details must be set by loadDetails()")
GRAPHPATH <- function(plot.name, number)
warning("GRAPHPATH details must be set by loadPlotDetalis()")
test_function <- function(canvas)
warning("test_function details must be set by loadDetails()")
#}
#' returns all actions to their original empty values, with the exception of PLOT_DETAILS. Since clear_actions() is only called when a new variable is added, it will be followed by e$build_canvas, which will change PLOT_DATA if necessary
clear_actions <- function(e) {
PLOT_SAMPLE <<- function(canvas)
warning("PLOT_SAMPLE details must be set by loadDetails()")
SHOW_LABELS <<- function(canvas)
warning("SHOW_LABELS details must be set by loadDetails()")
ANIMATE_SAMPLE <<- function(canvas)
warning("ANIMATE_SAMPLE details must be set by loadDetails()")
TRACK_SAMPLE <<- function(canvas)
warning("TRACK_SAMPLE details must be set by loadDetails()")
CALC_STAT <<- function(canvas)
warning("CALC_STAT details must be set by loadDetails()")
FADE_PLOTS <<- function(canvas)
warning("FADE_PLOTS details must be set by loadDetails()")
PLOT_DATA_STAT <<- function(canvas)
warning("PLOT_STAT details must be set by loadDetails()")
PLOT_SAMPLE_STAT <<- function(canvas)
warning("PLOT_STAT details must be set by loadDetails()")
PLOT_STAT_DIST <<- function(canvas)
warning("PLOT_STAT_DIST details must be set by loadDetails()")
ANIMATE_STAT <<- function(canvas)
warning("ANIMATE_STAT details must be set by loadDetails()")
DISPLAY_RESULT <<- function(canvas)
warning("DISPLAY_RESULT details must be set by loadDetails()")
DISPLAY_RESULT_2 <<- function(canvas)
warning("DISPLAY_RESULT_2 details must be set by loadDetails()")
HANDLE_1000 <<- function(e)
warning("HANDLE_1000 details must be set by loadDetails()")
MISCELLANEOUS <<- function(env)
warning("MISCELLANEOUS details must be set by loadDetails()")
e$loaded <- FALSE
}
|
/R/actions.R
|
no_license
|
garrettgman/Visual-Inference-Tools
|
R
| false
| false
| 4,699
|
r
|
# The high level actions performed by the VIT Canvas. The details of how these methods accomplish their goals is set by loadDetails, which chooses the appropriate method based on the type of sata and the statistic being examined
#initialize_actions <- function(e){
#' Creates the initial plot of the data
PLOT_DATA <- function(canvas)
warning("PLOT_DATA details must be set by loadPlotDetails()")
#' Creates the initial plot of the sample
PLOT_SAMPLE <- function(canvas)
warning("PLOT_SAMPLE details must be set by loadDetails()")
#' Puts titles for each of the plots on the canvas
SHOW_LABELS <- function(canvas)
warning("SHOW_LABELS details must be set by loadDetails()")
#' Animates the construction of the sample from the data
ANIMATE_SAMPLE <- function(canvas)
warning("ANIMATE_SAMPLE details must be set by loadDetails()")
#' Animated the tracking of a sample (or resample)
TRACK_SAMPLE <- function(canvas)
warning("TRACK_SAMPLE details must be set by loadDetails()")
#' Method used to calculate a single statistic from a single group of observsations
CALC_STAT <- function(canvas)
warning("CALC_STAT details must be set by loadDetails()")
#' Method used to fade plots in order emphasize those remaining
FADE_PLOTS <- function(canvas)
warning("FADE_PLOTS details must be set by loadDetails()")
#' Creates a plot of the data parameter
PLOT_DATA_STAT <- function(canvas)
warning("PLOT_STAT details must be set by loadDetails()")
#' Creates a plot of the sample statistic
PLOT_SAMPLE_STAT <- function(canvas)
warning("PLOT_STAT details must be set by loadDetails()")
#' Creates a plot of the distribution of sample statistics
PLOT_STAT_DIST <- function(canvas)
warning("PLOT_STAT_DIST details must be set by loadDetails()")
#' Animates the addition of the current sample statistic to the statistic distribution
ANIMATE_STAT <- function(canvas)
warning("ANIMATE_STAT details must be set by loadDetails()")
#' Displays any final conlusions or information required by the method whenever a statistic is added to the statistic distribution
DISPLAY_RESULT <- function(canvas)
warning("DISPLAY_RESULT details must be set by loadDetails()")
#' Displays a second round of conclusions if required by the method
DISPLAY_RESULT_2 <- function(canvas)
warning("DISPLAY_RESULT_2 details must be set by loadDetails()")
#' Manages the methods' performance of 1000 complete runs. At the moment this has its own method because running 1000 methods the usual way would be time prohibitive.
HANDLE_1000 <- function(e)
warning("HANDLE_1000 details must be set by loadDetails()")
GRAPHPATH <- function(plot.name, number)
warning("GRAPHPATH details must be set by loadPlotDetalis()")
test_function <- function(canvas)
warning("test_function details must be set by loadDetails()")
#}
#' returns all actions to their original empty values, with the exception of PLOT_DETAILS. Since clear_actions() is only called when a new variable is added, it will be followed by e$build_canvas, which will change PLOT_DATA if necessary
clear_actions <- function(e) {
PLOT_SAMPLE <<- function(canvas)
warning("PLOT_SAMPLE details must be set by loadDetails()")
SHOW_LABELS <<- function(canvas)
warning("SHOW_LABELS details must be set by loadDetails()")
ANIMATE_SAMPLE <<- function(canvas)
warning("ANIMATE_SAMPLE details must be set by loadDetails()")
TRACK_SAMPLE <<- function(canvas)
warning("TRACK_SAMPLE details must be set by loadDetails()")
CALC_STAT <<- function(canvas)
warning("CALC_STAT details must be set by loadDetails()")
FADE_PLOTS <<- function(canvas)
warning("FADE_PLOTS details must be set by loadDetails()")
PLOT_DATA_STAT <<- function(canvas)
warning("PLOT_STAT details must be set by loadDetails()")
PLOT_SAMPLE_STAT <<- function(canvas)
warning("PLOT_STAT details must be set by loadDetails()")
PLOT_STAT_DIST <<- function(canvas)
warning("PLOT_STAT_DIST details must be set by loadDetails()")
ANIMATE_STAT <<- function(canvas)
warning("ANIMATE_STAT details must be set by loadDetails()")
DISPLAY_RESULT <<- function(canvas)
warning("DISPLAY_RESULT details must be set by loadDetails()")
DISPLAY_RESULT_2 <<- function(canvas)
warning("DISPLAY_RESULT_2 details must be set by loadDetails()")
HANDLE_1000 <<- function(e)
warning("HANDLE_1000 details must be set by loadDetails()")
MISCELLANEOUS <<- function(env)
warning("MISCELLANEOUS details must be set by loadDetails()")
e$loaded <- FALSE
}
|
#' Cluster robust standard errors
#'
#' Take a model and cluster variable and returns summary output
#' @param model A model eg from lm
#' @param cluster A variable
#' @keywords Cluster, Robust
#' @export
#' @examples
#' cluster_robust(lm(time~temp, data = beaver1), beaver1$day)
#'
cluster_robust <- function(model, cluster){
require(sandwich, quietly = TRUE)
require(lmtest, quietly = TRUE)
not.miss<- !is.na(predict(model))
if(length(not.miss)!=length(cluster)){
stop("check your data: cluster variable has different N than model")
}
M <- length(unique(cluster[not.miss]))
N <- length(cluster[not.miss])
K <- model$rank
if(M<50){
warning("Fewer than 50 clusters, variances may be unreliable (could try block bootstrap instead).")
}
dfc <- (M/(M - 1)) * ((N - 1)/(N - K))
uj <- apply(estfun(model), 2, function(x) tapply(x, cluster, sum, na.rm=TRUE));
vcovCL <- dfc * sandwich(model, meat = crossprod(uj)/N)
out <- list()
out[[1]] <- coeftest(model, vcovCL)
out[[2]] <- N
return(out)
}
#' Runs lm and returns cluster robust standard errors
#'
#' Take a model and cluster variable and returns summary output
#' @param formula An lm formula of the form Y~X
#' @param data A dataframe
#' @param weights weights used in lm
#' @keywords Cluster, Robust
#' @export
#' @examples
#' lm_cluster_robust("time~temp", beaver1, "day")
#'
lm_cluster_robust <- function(formula, data, cluster_name, weights = NULL){
if(is.null(weights)) model <- lm(formula = as.formula(formula), data = data, na.action="na.exclude")
if(!is.null(weights)) model <- lm(formula = as.formula(formula), weights = weights, data = data, na.action="na.exclude")
cluster_robust(model, data[cluster_name][[1]])
}
#' Generates nice output after cluster robust
#'
#' Take a model and cluster variable and returns summary output
#' @param X a list output from lm_cluster_robust
#' @param coefrows rows of coefficients to grab
#' @param stars add stars
#' @param alpha a vector of alpha levels for stars (descending order)
#' @param signs a symbol to be added for every alpha level satisfied
#' @keywords Cluster, Robust
#' @export
#' @examples
#' output_function(lm_cluster_robust("time~temp", beaver1, "day"), coefrows = 2)
#'
output_function <- function(X, stars = TRUE,
alpha = c(0.1, 0.05, 0.01),
signs = "*",
coefrows = c(2:4),
round = 3
){
ncoefs <- length(coefrows)
out <- names <- matrix(NA, 2*ncoefs+1)
out[2*(1:ncoefs)-1] <- round(X[[1]][coefrows,1],3)
out[2*(1:ncoefs)] <- paste("(", round(X[[1]][coefrows,2],3), ")", sep ="")
out[2*ncoefs+1] <- X[[2]]
if(stars) {
for(j in 1: length(alpha)){
out[2*(1:ncoefs)-1][X[[1]][coefrows,4] <= alpha[j]] <-
paste(out[2*(1:ncoefs)-1][X[[1]][coefrows,4] <= alpha[j]], signs, sep ="")
}}
names[2*(1:ncoefs)-1] <- rownames(X[[1]])[coefrows]
names[2*(1:ncoefs)] <- paste("sd_", rownames(X[[1]])[coefrows], sep = "")
names[2*ncoefs+1] <- "N"
rownames(out) <- names
out
}
# END #
|
/R/0 helper cluster_robust.R
|
no_license
|
macartan/VDW-H-SDS_2018
|
R
| false
| false
| 3,127
|
r
|
#' Cluster robust standard errors
#'
#' Take a model and cluster variable and returns summary output
#' @param model A model eg from lm
#' @param cluster A variable
#' @keywords Cluster, Robust
#' @export
#' @examples
#' cluster_robust(lm(time~temp, data = beaver1), beaver1$day)
#'
cluster_robust <- function(model, cluster){
require(sandwich, quietly = TRUE)
require(lmtest, quietly = TRUE)
not.miss<- !is.na(predict(model))
if(length(not.miss)!=length(cluster)){
stop("check your data: cluster variable has different N than model")
}
M <- length(unique(cluster[not.miss]))
N <- length(cluster[not.miss])
K <- model$rank
if(M<50){
warning("Fewer than 50 clusters, variances may be unreliable (could try block bootstrap instead).")
}
dfc <- (M/(M - 1)) * ((N - 1)/(N - K))
uj <- apply(estfun(model), 2, function(x) tapply(x, cluster, sum, na.rm=TRUE));
vcovCL <- dfc * sandwich(model, meat = crossprod(uj)/N)
out <- list()
out[[1]] <- coeftest(model, vcovCL)
out[[2]] <- N
return(out)
}
#' Runs lm and returns cluster robust standard errors
#'
#' Take a model and cluster variable and returns summary output
#' @param formula An lm formula of the form Y~X
#' @param data A dataframe
#' @param weights weights used in lm
#' @keywords Cluster, Robust
#' @export
#' @examples
#' lm_cluster_robust("time~temp", beaver1, "day")
#'
lm_cluster_robust <- function(formula, data, cluster_name, weights = NULL){
if(is.null(weights)) model <- lm(formula = as.formula(formula), data = data, na.action="na.exclude")
if(!is.null(weights)) model <- lm(formula = as.formula(formula), weights = weights, data = data, na.action="na.exclude")
cluster_robust(model, data[cluster_name][[1]])
}
#' Generates nice output after cluster robust
#'
#' Take a model and cluster variable and returns summary output
#' @param X a list output from lm_cluster_robust
#' @param coefrows rows of coefficients to grab
#' @param stars add stars
#' @param alpha a vector of alpha levels for stars (descending order)
#' @param signs a symbol to be added for every alpha level satisfied
#' @keywords Cluster, Robust
#' @export
#' @examples
#' output_function(lm_cluster_robust("time~temp", beaver1, "day"), coefrows = 2)
#'
output_function <- function(X, stars = TRUE,
alpha = c(0.1, 0.05, 0.01),
signs = "*",
coefrows = c(2:4),
round = 3
){
ncoefs <- length(coefrows)
out <- names <- matrix(NA, 2*ncoefs+1)
out[2*(1:ncoefs)-1] <- round(X[[1]][coefrows,1],3)
out[2*(1:ncoefs)] <- paste("(", round(X[[1]][coefrows,2],3), ")", sep ="")
out[2*ncoefs+1] <- X[[2]]
if(stars) {
for(j in 1: length(alpha)){
out[2*(1:ncoefs)-1][X[[1]][coefrows,4] <= alpha[j]] <-
paste(out[2*(1:ncoefs)-1][X[[1]][coefrows,4] <= alpha[j]], signs, sep ="")
}}
names[2*(1:ncoefs)-1] <- rownames(X[[1]])[coefrows]
names[2*(1:ncoefs)] <- paste("sd_", rownames(X[[1]])[coefrows], sep = "")
names[2*ncoefs+1] <- "N"
rownames(out) <- names
out
}
# END #
|
load('Dane/train.rda')
load('Dane/train_interpunction.rda')
load('Dane/train_Boruta.rda')
load('Dane/test.rda')
load('Dane/test_interpunction.rda')
load('Dane/test_Boruta.rda')
library(e1071)
bayesAll <- naiveBayes(Classes~.,data=train[,-348], laplace=0.2)
bayesAll_interpunction <- naiveBayes(Classes~.,
data=train_interpunction[,-348], laplace=0.2)
bayesAll_interpunction_Boruta <- naiveBayes(Classes~.,
data=train_Boruta, laplace=0.2)
bayes_prawd_test <- predict(bayesAll, newdata=test[,-c(348:349)], type="raw")[,2]
bayes_prawd_test_interpunction <- predict(bayesAll_interpunction, newdata=test_interpunction[-c(348:349)], type="raw")[,2]
bayes_prawd_test_interpunction_Boruta <- predict(bayesAll_interpunction_Boruta, newdata=test_Boruta, type="raw")[,2]
library(ROCR)
save(bayes_prawd_test, file = "Predykcje/bayes_prawd_test.rda")
save(bayes_prawd_test_interpunction, file = "Predykcje/bayes_prawd_test_interpunction.rda")
save(bayes_prawd_test_interpunction_Boruta, file = "Predykcje/bayes_prawd_test_interpunction_Boruta.rda")
|
/R_codes/11_naiveBayes.R
|
no_license
|
mi2-warsaw/eRka-Onet-findTeam
|
R
| false
| false
| 1,079
|
r
|
load('Dane/train.rda')
load('Dane/train_interpunction.rda')
load('Dane/train_Boruta.rda')
load('Dane/test.rda')
load('Dane/test_interpunction.rda')
load('Dane/test_Boruta.rda')
library(e1071)
bayesAll <- naiveBayes(Classes~.,data=train[,-348], laplace=0.2)
bayesAll_interpunction <- naiveBayes(Classes~.,
data=train_interpunction[,-348], laplace=0.2)
bayesAll_interpunction_Boruta <- naiveBayes(Classes~.,
data=train_Boruta, laplace=0.2)
bayes_prawd_test <- predict(bayesAll, newdata=test[,-c(348:349)], type="raw")[,2]
bayes_prawd_test_interpunction <- predict(bayesAll_interpunction, newdata=test_interpunction[-c(348:349)], type="raw")[,2]
bayes_prawd_test_interpunction_Boruta <- predict(bayesAll_interpunction_Boruta, newdata=test_Boruta, type="raw")[,2]
library(ROCR)
save(bayes_prawd_test, file = "Predykcje/bayes_prawd_test.rda")
save(bayes_prawd_test_interpunction, file = "Predykcje/bayes_prawd_test_interpunction.rda")
save(bayes_prawd_test_interpunction_Boruta, file = "Predykcje/bayes_prawd_test_interpunction_Boruta.rda")
|
\name{WPGM.select}
\alias{WPGM.select}
\title{
Winsorized Poisson Graphical Model (WPGM)
}
\description{
Fitting the WPGM using efficient, parallel algorithm named Poisson Graphical Lasso.
This algorithm employs neighborhood selection to infer network structure.
Stability selection method "star" was used in selecting the optimal network.
}
\usage{
WPGM.select(X, R=max(X), N=100, beta=0.05, lmin=0.0001, nlams=20,
lambda.path=NULL, parallel=FALSE, ncores = 4)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{X}{pxn data matrix}
\item{R}{threshold value for truncating, default to be the maximum of value of the input data matrix}
\item{N}{number of iteration for stability selection, default to 100}
\item{beta}{threshold value on sparsity of the network to filter out dense network}
\item{lmin}{minimum lambda value, default to 0.0001}
\item{nlams}{number of lambda for regularization}
\item{lambda.path}{vector lambda used for regularization}
\item{parallel}{logical value to indicate if the process should be run parallelly in multiple threads, default to FALSE}
\item{ncores}{number of (maximum) cores to use for parallel execution, default to 4}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
A list of five elements:
\item{v}{vector of (nlams) variability measured from the stability selection}
\item{lambda.path}{vector lambda used for regularization}
\item{opt.lambda}{lambda value that gives the optimal network (network with maximum variability)}
\item{network}{a list of pxp coefficient matrix along the regularization.}
\item{opt.index}{index of the regularization value that gives the optimal network}
}
\references{
G.I. Allen and Z. Liu, 2012, A Log-Linear Graphical Model for Inferring Genetic Networks from High-Throughput Sequencing Data, \emph{The IEEE International Conference on Bioinformatics and Biomedicine (BIBM 2012)}.
E. Yang, P.K. Ravikumar, G.I. Allen, and Z. Liu, 2012, Graphical Models via Generalized Linear Models, \emph{NIPS}, \bold{vol. 25}, pp. 1367--1375.
}
\author{
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (X, R = max(X), method = "star", N = 100, beta = 0.05,
lambda.path = NULL, nlams = 20, ncores = 4, parallel = F)
{
if (is.null(lambda.path)) {
lmax = lambdaMax(t(X))
lambda.path = exp(seq(log(lmax), log(1e-04), l = nlams))
}
b = min(c(10 * sqrt(ncol(X)), 0.8 * ncol(X)))
ghat = list()
ghat.path = list()
ghat.path$path = vector("list", length(lambda.path))
v = c()
for (i in 1:N) {
cat(paste("WPGM: Conducting sampling ... in progress: ",
floor(100 * (i/N)), "\%", collapse = ""), "\r")
flush.console()
index = sample(1:ncol(X), b, replace = F)
ghat.path$raw = WPGM.network(X[, index], R, nlams = length(lambda.path),
lambda = lambda.path, parallel = parallel, ncores = ncores)
for (j in 1:length(lambda.path)) {
tmp = ghat.path$raw[[j]]
tmp[abs(tmp) < 1e-06] = 0
tmp[abs(tmp) > 1e-06] = 1
diag(tmp) = 0
if (is.null(ghat.path$path[[j]])) {
ghat.path$path[[j]] = tmp
}
else {
ghat.path$path[[j]] = ghat.path$path[[j]] + tmp
}
}
}
for (i in 1:length(lambda.path)) {
D = ghat.path$path[[i]]
D = D/N
D = 2 * D * (1 - D)
v = c(v, mean(D[upper.tri(D)]))
}
v = cummax(v)
ghat$v = v
ghat$lambda.path = lambda.path
ghat$opt.lambda = lambda.path[which(v == max(v[v < beta]))]
ghat$network = WPGM.network(X, R, nlams = length(lambda.path),
lambda = lambda.path, parallel = T)
ghat$opt.index = which(v == max(v[v < beta]))
cat("\nWPGM Completed. \n")
return(ghat)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
/PGMs/Package_Trial/T3/expMRF/man/WPGM.select.Rd
|
no_license
|
zhandong/XFam
|
R
| false
| false
| 4,355
|
rd
|
\name{WPGM.select}
\alias{WPGM.select}
\title{
Winsorized Poisson Graphical Model (WPGM)
}
\description{
Fitting the WPGM using efficient, parallel algorithm named Poisson Graphical Lasso.
This algorithm employs neighborhood selection to infer network structure.
Stability selection method "star" was used in selecting the optimal network.
}
\usage{
WPGM.select(X, R=max(X), N=100, beta=0.05, lmin=0.0001, nlams=20,
lambda.path=NULL, parallel=FALSE, ncores = 4)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{X}{pxn data matrix}
\item{R}{threshold value for truncating, default to be the maximum of value of the input data matrix}
\item{N}{number of iteration for stability selection, default to 100}
\item{beta}{threshold value on sparsity of the network to filter out dense network}
\item{lmin}{minimum lambda value, default to 0.0001}
\item{nlams}{number of lambda for regularization}
\item{lambda.path}{vector lambda used for regularization}
\item{parallel}{logical value to indicate if the process should be run parallelly in multiple threads, default to FALSE}
\item{ncores}{number of (maximum) cores to use for parallel execution, default to 4}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
A list of five elements:
\item{v}{vector of (nlams) variability measured from the stability selection}
\item{lambda.path}{vector lambda used for regularization}
\item{opt.lambda}{lambda value that gives the optimal network (network with maximum variability)}
\item{network}{a list of pxp coefficient matrix along the regularization.}
\item{opt.index}{index of the regularization value that gives the optimal network}
}
\references{
G.I. Allen and Z. Liu, 2012, A Log-Linear Graphical Model for Inferring Genetic Networks from High-Throughput Sequencing Data, \emph{The IEEE International Conference on Bioinformatics and Biomedicine (BIBM 2012)}.
E. Yang, P.K. Ravikumar, G.I. Allen, and Z. Liu, 2012, Graphical Models via Generalized Linear Models, \emph{NIPS}, \bold{vol. 25}, pp. 1367--1375.
}
\author{
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (X, R = max(X), method = "star", N = 100, beta = 0.05,
lambda.path = NULL, nlams = 20, ncores = 4, parallel = F)
{
if (is.null(lambda.path)) {
lmax = lambdaMax(t(X))
lambda.path = exp(seq(log(lmax), log(1e-04), l = nlams))
}
b = min(c(10 * sqrt(ncol(X)), 0.8 * ncol(X)))
ghat = list()
ghat.path = list()
ghat.path$path = vector("list", length(lambda.path))
v = c()
for (i in 1:N) {
cat(paste("WPGM: Conducting sampling ... in progress: ",
floor(100 * (i/N)), "\%", collapse = ""), "\r")
flush.console()
index = sample(1:ncol(X), b, replace = F)
ghat.path$raw = WPGM.network(X[, index], R, nlams = length(lambda.path),
lambda = lambda.path, parallel = parallel, ncores = ncores)
for (j in 1:length(lambda.path)) {
tmp = ghat.path$raw[[j]]
tmp[abs(tmp) < 1e-06] = 0
tmp[abs(tmp) > 1e-06] = 1
diag(tmp) = 0
if (is.null(ghat.path$path[[j]])) {
ghat.path$path[[j]] = tmp
}
else {
ghat.path$path[[j]] = ghat.path$path[[j]] + tmp
}
}
}
for (i in 1:length(lambda.path)) {
D = ghat.path$path[[i]]
D = D/N
D = 2 * D * (1 - D)
v = c(v, mean(D[upper.tri(D)]))
}
v = cummax(v)
ghat$v = v
ghat$lambda.path = lambda.path
ghat$opt.lambda = lambda.path[which(v == max(v[v < beta]))]
ghat$network = WPGM.network(X, R, nlams = length(lambda.path),
lambda = lambda.path, parallel = T)
ghat$opt.index = which(v == max(v[v < beta]))
cat("\nWPGM Completed. \n")
return(ghat)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_bbox_centroid.R
\name{get_bbox_centroid}
\alias{get_bbox_centroid}
\title{Get the great-circle centroid for a given bounding box.}
\usage{
get_bbox_centroid(bbox)
}
\arguments{
\item{bbox}{The bounding box to find a centroid for. If not already
a \code{\link{terrainr_bounding_box}} object, will be converted.}
}
\value{
A \code{\link{terrainr_coordinate_pair}}.
}
\description{
Get the great-circle centroid for a given bounding box.
}
\examples{
get_bbox_centroid(
list(
c(lat = 44.04905, lng = -74.01188),
c(lat = 44.17609, lng = -73.83493)
)
)
}
\seealso{
Other utilities:
\code{\link{addbuff}},
\code{\link{calc_haversine_distance}()},
\code{\link{convert_distance}()},
\code{\link{deg_to_rad}()},
\code{\link{get_bbox}()},
\code{\link{point_from_distance}()},
\code{\link{rad_to_deg}()}
}
\concept{utilities}
\keyword{internal}
|
/man/get_bbox_centroid.Rd
|
permissive
|
mikejohnson51/terrainr
|
R
| false
| true
| 929
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_bbox_centroid.R
\name{get_bbox_centroid}
\alias{get_bbox_centroid}
\title{Get the great-circle centroid for a given bounding box.}
\usage{
get_bbox_centroid(bbox)
}
\arguments{
\item{bbox}{The bounding box to find a centroid for. If not already
a \code{\link{terrainr_bounding_box}} object, will be converted.}
}
\value{
A \code{\link{terrainr_coordinate_pair}}.
}
\description{
Get the great-circle centroid for a given bounding box.
}
\examples{
get_bbox_centroid(
list(
c(lat = 44.04905, lng = -74.01188),
c(lat = 44.17609, lng = -73.83493)
)
)
}
\seealso{
Other utilities:
\code{\link{addbuff}},
\code{\link{calc_haversine_distance}()},
\code{\link{convert_distance}()},
\code{\link{deg_to_rad}()},
\code{\link{get_bbox}()},
\code{\link{point_from_distance}()},
\code{\link{rad_to_deg}()}
}
\concept{utilities}
\keyword{internal}
|
library(shiny)
library(foreign)
library(rCharts)
library(dplyr)
library(Hmisc)
library(reshape)
library(zoo)
rm(list=ls())
###change the max size of the file (5MB by defoult)
options(shiny.maxRequestSize=100*1024^2) ###check what would be a reasonable here
##########################
#Function to get the data
#########################
myfunction <- function(file=NULL,too_expensive=NULL, too_cheap=NULL, cheap=NULL, expensive=NULL,
weight="not applicable", filter_variable="not applicable",filter_value="not applicable", user_treshold="not applicable" )
{
#read file
#file<-read.spss(file_name, use.value.labels=FALSE, to.data.frame=TRUE)
#create dummy weight var = 1 if user doesn't want to weight the data
if(weight=="not applicable"){
file$weight<- 1
}
###Apply filter
if(filter_variable!="not applicable"){
if (weight=="not applicable"){
cols_filt <- c(too_expensive,too_cheap, expensive, cheap, filter_variable, "weight")
file<-file[,cols_filt]
file<- file[complete.cases(file), ]
} else
{
cols_filt <- c(too_expensive,too_cheap, expensive, cheap, filter_variable, weight)
file<-file[,cols_filt]
file<- file[complete.cases(file), ]
}
cols_filt <- c("too_expensive","too_cheap", "expensive", "cheap", "filter", "weight")
colnames(file) <- cols_filt
file<-file[file$filter == filter_value, ]
cols_filt <- c("too_expensive","too_cheap", "expensive", "cheap", "weight")
file<-file[,cols_filt]
}
###choose only the variables you need - make sure they are in the right order
if(filter_variable=="not applicable"){
if(weight == "not applicable")
{
cols <- c(too_expensive,too_cheap, expensive, cheap, "weight")
file<-file[,cols]
} else{
cols <- c(too_expensive,too_cheap, expensive, cheap, weight)
file<-file[,cols]
}
}
##choose only variables that are needed
##rename column namess
cols_new <- c("too_expensive","too_cheap", "expensive", "cheap", "weight")
colnames(file) <- cols_new
###change negative values for NA's
file$too_expensive[file$too_expensive<0] <- NA
file$too_cheap[file$too_cheap<0] <- NA
file$expensive[file$expensive<0] <- NA
file$cheap[file$cheap<0] <- NA
file<- file[complete.cases(file), ]
##if treshold set up - remove cases with these extreme values
if(is.na(user_treshold)==FALSE){
file<-file[!file$too_expensive > user_treshold, ]
file<-file[!file$too_cheap > user_treshold, ]
file<-file[!file$expensive > user_treshold, ]
file<-file[!file$cheap > user_treshold, ]
}
###calculate frequencies for every of 4 measures
freq_too_exp<-as.data.frame(as.matrix((cumsum(wtd.table(file$too_expensive, weights=file$weight, type='table', normwt=FALSE, na.rm=TRUE)))))
freq_too_exp$value<-row.names(freq_too_exp)
colnames(freq_too_exp)<-c("sum", "val")
freq_too_exp$val<-as.integer(freq_too_exp$val)
freq_too_cheap<-as.data.frame(as.matrix((cumsum(wtd.table(file$too_cheap, weights=file$weight, type='table', normwt=FALSE, na.rm=TRUE)))))
freq_too_cheap$value<-row.names(freq_too_cheap)
colnames(freq_too_cheap)<-c("sum1", "val")
freq_too_cheap$val<-as.integer(freq_too_cheap$val)
freq_too_cheap$sum[1]<-freq_too_cheap$sum1 [nrow(freq_too_cheap)]
for (i in 2:(nrow(freq_too_cheap)))
{
freq_too_cheap$sum[i]<- (freq_too_cheap$sum[1]) - (freq_too_cheap$sum1[i-1])
}
freq_too_cheap<-freq_too_cheap[, c("sum","val")]
freq_exp<-as.data.frame(as.matrix((cumsum(wtd.table(file$expensive, weights=file$weight, type='table', normwt=FALSE, na.rm=TRUE)))))
freq_exp$value<-row.names(freq_exp)
colnames(freq_exp)<-c("sum", "val")
freq_exp$val<-as.integer(freq_exp$val)
freq_cheap<-as.data.frame(as.matrix((cumsum(wtd.table(file$cheap, weights=file$weight, type='table', normwt=FALSE, na.rm=TRUE)))))
freq_cheap$value<-row.names(freq_cheap)
colnames(freq_cheap)<-c("sum1", "val")
freq_cheap$val<-as.integer(freq_cheap$val)
freq_cheap$sum[1]<-freq_cheap$sum1 [nrow(freq_cheap)]
for (i in 2:(nrow(freq_cheap)))
{
freq_cheap$sum[i]<- (freq_cheap$sum[1]) - (freq_cheap$sum1[i-1])
}
freq_cheap<-freq_cheap[, c("sum","val")]
###Generate a table with all measures for all possible prices (from 0 to max)
a<-max(file$too_cheap, na.rm=TRUE)
b<-max(file$too_expensive, na.rm=TRUE)
c<-max(file$cheap, na.rm=TRUE)
d<-max(file$expensive, na.rm=TRUE)
#### Calculate the table (possibility to exclude extremally weird answers)################IMPORTANT FOR APP
if(is.na(user_treshold)==TRUE){max_val = max(a,b,c,d)
}else {max_val = user_treshold}
val<-c(0:max_val)
val<-as.data.frame(val)
###change into percentages
n<-sum(file$weight)
freq_too_exp$sum<- freq_too_exp$sum/n*100.00
freq_too_cheap$sum<- freq_too_cheap$sum/n*100.00
freq_exp$sum<- freq_exp$sum/n*100.00
freq_cheap$sum<- freq_cheap$sum/n*100.00
#####merge val table with frequencies from
val<-left_join(val, freq_too_exp, by = "val")
val<-left_join(val, freq_too_cheap, by = "val")
val<-left_join(val, freq_exp, by = "val")
val<-left_join(val, freq_cheap, by = "val")
cols_freq<-c("sum", "too_expensive", "too_cheap", "expensive", "cheap")
colnames(val) <- cols_freq
###remove unnecessary stuff
rm(list=setdiff(ls(), "val"))
###Impute missing values - 1st and last line, NA's replaced
val[1,2]<-0.00
val[1,3]<-100.00
val[1,4]<-0.00
val[1,5]<-100.00
val[nrow(val),2]<-100.00
val[nrow(val),3]<-0.00
val[nrow(val),4]<-100.00
val[nrow(val),5]<-0.00
###Impute values
val$too_expensive <- na.locf(val$too_expensive)
val$too_cheap <- na.locf(val$too_cheap, fromLast = TRUE)
val$expensive <- na.locf(val$expensive)
val$cheap <- na.locf(val$cheap, fromLast = TRUE)
val <- melt(val, id="sum")
val$variable<-as.character(val$variable)
val$variable[val$variable == "too_expensive"] <- "Too expensive"
val$variable[val$variable == "too_cheap"] <- "Too cheap"
val$variable<-as.factor(val$variable)
val$value<-round(val$value,2)
return(val)
}
################
##server function
##################
shinyServer(function(session,input, output) {
##Upload file
Dataset <- reactive({
infile <- input$datafile
if (is.null(infile)) {
return(NULL)
}
read.spss(infile$datapath, use.value.labels=FALSE, to.data.frame=TRUE)
})
###choice of 4 variables !!!!!!!!!!!!!!!!!to do: TRY TO HIDE FILTER VALUE BUTTON IF NO FILTER VARIABLE SELECTED!!!!!!!!!!!!!
output$too_expensive <- renderUI({
if (identical(Dataset(), '') || identical(Dataset(),data.frame())) return(NULL)
cols <- names(Dataset())
selectInput("too_expensive", "TOO EXPENSIVE variable:",choices=cols, selected=cols, multiple=F)
})
output$too_cheap <- renderUI({
if (identical(Dataset(), '') || identical(Dataset(),data.frame())) return(NULL)
cols <- names(Dataset())
selectInput("too_cheap", "TOO CHEAP variable:",choices=cols, selected=cols, multiple=F)
})
output$expensive <- renderUI({
if (identical(Dataset(), '') || identical(Dataset(),data.frame())) return(NULL)
cols <- names(Dataset())
selectInput("expensive", "EXPENSIVE variable:",choices=cols, selected=cols, multiple=F)
})
output$cheap <- renderUI({
if (identical(Dataset(), '') || identical(Dataset(),data.frame())) return(NULL)
cols <- names(Dataset())
selectInput("cheap", "CHEAP variable:",choices=cols, selected=cols, multiple=F)
})
output$filter_variable <- renderUI({
if (identical(Dataset(), '') || identical(Dataset(),data.frame())) return(NULL)
cols <- names(Dataset())
selectInput("filter_variable", "Filter variable:",choices=c(cols, "not applicable"), multiple=F, selected="not applicable")
})
output$weight<- renderUI({
if (identical(Dataset(), '') || identical(Dataset(),data.frame())) return(NULL)
cols <- names(Dataset())
selectInput("weight", "Weighting variable:",choices=c(cols, "not applicable"), multiple=F, selected="not applicable")
})
##render output
data_funct<-eventReactive(input$goButton, {
myfunction(file = Dataset(),too_expensive = input$too_expensive, too_cheap = input$too_cheap,
cheap = input$cheap, expensive = input$expensive,
weight = input$weight, filter_variable = input$filter_variable,filter_value = input$filter_value,
user_treshold = input$user_treshold)})
output$table <-DT::renderDataTable(datatable(
data_funct(), filter = "top",
options = list(pageLength = 16, autoWidth = TRUE, lengthMenu = c(5, 10, 16)),
rownames= FALSE
))
})
|
/server.R
|
no_license
|
Dorota-Lewandowska/YG_van_Wastendorp
|
R
| false
| false
| 8,814
|
r
|
library(shiny)
library(foreign)
library(rCharts)
library(dplyr)
library(Hmisc)
library(reshape)
library(zoo)
rm(list=ls())
###change the max size of the file (5MB by defoult)
options(shiny.maxRequestSize=100*1024^2) ###check what would be a reasonable here
##########################
#Function to get the data
#########################
myfunction <- function(file=NULL,too_expensive=NULL, too_cheap=NULL, cheap=NULL, expensive=NULL,
weight="not applicable", filter_variable="not applicable",filter_value="not applicable", user_treshold="not applicable" )
{
#read file
#file<-read.spss(file_name, use.value.labels=FALSE, to.data.frame=TRUE)
#create dummy weight var = 1 if user doesn't want to weight the data
if(weight=="not applicable"){
file$weight<- 1
}
###Apply filter
if(filter_variable!="not applicable"){
if (weight=="not applicable"){
cols_filt <- c(too_expensive,too_cheap, expensive, cheap, filter_variable, "weight")
file<-file[,cols_filt]
file<- file[complete.cases(file), ]
} else
{
cols_filt <- c(too_expensive,too_cheap, expensive, cheap, filter_variable, weight)
file<-file[,cols_filt]
file<- file[complete.cases(file), ]
}
cols_filt <- c("too_expensive","too_cheap", "expensive", "cheap", "filter", "weight")
colnames(file) <- cols_filt
file<-file[file$filter == filter_value, ]
cols_filt <- c("too_expensive","too_cheap", "expensive", "cheap", "weight")
file<-file[,cols_filt]
}
###choose only the variables you need - make sure they are in the right order
if(filter_variable=="not applicable"){
if(weight == "not applicable")
{
cols <- c(too_expensive,too_cheap, expensive, cheap, "weight")
file<-file[,cols]
} else{
cols <- c(too_expensive,too_cheap, expensive, cheap, weight)
file<-file[,cols]
}
}
##choose only variables that are needed
##rename column namess
cols_new <- c("too_expensive","too_cheap", "expensive", "cheap", "weight")
colnames(file) <- cols_new
###change negative values for NA's
file$too_expensive[file$too_expensive<0] <- NA
file$too_cheap[file$too_cheap<0] <- NA
file$expensive[file$expensive<0] <- NA
file$cheap[file$cheap<0] <- NA
file<- file[complete.cases(file), ]
##if treshold set up - remove cases with these extreme values
if(is.na(user_treshold)==FALSE){
file<-file[!file$too_expensive > user_treshold, ]
file<-file[!file$too_cheap > user_treshold, ]
file<-file[!file$expensive > user_treshold, ]
file<-file[!file$cheap > user_treshold, ]
}
###calculate frequencies for every of 4 measures
freq_too_exp<-as.data.frame(as.matrix((cumsum(wtd.table(file$too_expensive, weights=file$weight, type='table', normwt=FALSE, na.rm=TRUE)))))
freq_too_exp$value<-row.names(freq_too_exp)
colnames(freq_too_exp)<-c("sum", "val")
freq_too_exp$val<-as.integer(freq_too_exp$val)
freq_too_cheap<-as.data.frame(as.matrix((cumsum(wtd.table(file$too_cheap, weights=file$weight, type='table', normwt=FALSE, na.rm=TRUE)))))
freq_too_cheap$value<-row.names(freq_too_cheap)
colnames(freq_too_cheap)<-c("sum1", "val")
freq_too_cheap$val<-as.integer(freq_too_cheap$val)
freq_too_cheap$sum[1]<-freq_too_cheap$sum1 [nrow(freq_too_cheap)]
for (i in 2:(nrow(freq_too_cheap)))
{
freq_too_cheap$sum[i]<- (freq_too_cheap$sum[1]) - (freq_too_cheap$sum1[i-1])
}
freq_too_cheap<-freq_too_cheap[, c("sum","val")]
freq_exp<-as.data.frame(as.matrix((cumsum(wtd.table(file$expensive, weights=file$weight, type='table', normwt=FALSE, na.rm=TRUE)))))
freq_exp$value<-row.names(freq_exp)
colnames(freq_exp)<-c("sum", "val")
freq_exp$val<-as.integer(freq_exp$val)
freq_cheap<-as.data.frame(as.matrix((cumsum(wtd.table(file$cheap, weights=file$weight, type='table', normwt=FALSE, na.rm=TRUE)))))
freq_cheap$value<-row.names(freq_cheap)
colnames(freq_cheap)<-c("sum1", "val")
freq_cheap$val<-as.integer(freq_cheap$val)
freq_cheap$sum[1]<-freq_cheap$sum1 [nrow(freq_cheap)]
for (i in 2:(nrow(freq_cheap)))
{
freq_cheap$sum[i]<- (freq_cheap$sum[1]) - (freq_cheap$sum1[i-1])
}
freq_cheap<-freq_cheap[, c("sum","val")]
###Generate a table with all measures for all possible prices (from 0 to max)
a<-max(file$too_cheap, na.rm=TRUE)
b<-max(file$too_expensive, na.rm=TRUE)
c<-max(file$cheap, na.rm=TRUE)
d<-max(file$expensive, na.rm=TRUE)
#### Calculate the table (possibility to exclude extremally weird answers)################IMPORTANT FOR APP
if(is.na(user_treshold)==TRUE){max_val = max(a,b,c,d)
}else {max_val = user_treshold}
val<-c(0:max_val)
val<-as.data.frame(val)
###change into percentages
n<-sum(file$weight)
freq_too_exp$sum<- freq_too_exp$sum/n*100.00
freq_too_cheap$sum<- freq_too_cheap$sum/n*100.00
freq_exp$sum<- freq_exp$sum/n*100.00
freq_cheap$sum<- freq_cheap$sum/n*100.00
#####merge val table with frequencies from
val<-left_join(val, freq_too_exp, by = "val")
val<-left_join(val, freq_too_cheap, by = "val")
val<-left_join(val, freq_exp, by = "val")
val<-left_join(val, freq_cheap, by = "val")
cols_freq<-c("sum", "too_expensive", "too_cheap", "expensive", "cheap")
colnames(val) <- cols_freq
###remove unnecessary stuff
rm(list=setdiff(ls(), "val"))
###Impute missing values - 1st and last line, NA's replaced
val[1,2]<-0.00
val[1,3]<-100.00
val[1,4]<-0.00
val[1,5]<-100.00
val[nrow(val),2]<-100.00
val[nrow(val),3]<-0.00
val[nrow(val),4]<-100.00
val[nrow(val),5]<-0.00
###Impute values
val$too_expensive <- na.locf(val$too_expensive)
val$too_cheap <- na.locf(val$too_cheap, fromLast = TRUE)
val$expensive <- na.locf(val$expensive)
val$cheap <- na.locf(val$cheap, fromLast = TRUE)
val <- melt(val, id="sum")
val$variable<-as.character(val$variable)
val$variable[val$variable == "too_expensive"] <- "Too expensive"
val$variable[val$variable == "too_cheap"] <- "Too cheap"
val$variable<-as.factor(val$variable)
val$value<-round(val$value,2)
return(val)
}
################
##server function
##################
shinyServer(function(session,input, output) {
##Upload file
Dataset <- reactive({
infile <- input$datafile
if (is.null(infile)) {
return(NULL)
}
read.spss(infile$datapath, use.value.labels=FALSE, to.data.frame=TRUE)
})
###choice of 4 variables !!!!!!!!!!!!!!!!!to do: TRY TO HIDE FILTER VALUE BUTTON IF NO FILTER VARIABLE SELECTED!!!!!!!!!!!!!
output$too_expensive <- renderUI({
if (identical(Dataset(), '') || identical(Dataset(),data.frame())) return(NULL)
cols <- names(Dataset())
selectInput("too_expensive", "TOO EXPENSIVE variable:",choices=cols, selected=cols, multiple=F)
})
output$too_cheap <- renderUI({
if (identical(Dataset(), '') || identical(Dataset(),data.frame())) return(NULL)
cols <- names(Dataset())
selectInput("too_cheap", "TOO CHEAP variable:",choices=cols, selected=cols, multiple=F)
})
output$expensive <- renderUI({
if (identical(Dataset(), '') || identical(Dataset(),data.frame())) return(NULL)
cols <- names(Dataset())
selectInput("expensive", "EXPENSIVE variable:",choices=cols, selected=cols, multiple=F)
})
output$cheap <- renderUI({
if (identical(Dataset(), '') || identical(Dataset(),data.frame())) return(NULL)
cols <- names(Dataset())
selectInput("cheap", "CHEAP variable:",choices=cols, selected=cols, multiple=F)
})
output$filter_variable <- renderUI({
if (identical(Dataset(), '') || identical(Dataset(),data.frame())) return(NULL)
cols <- names(Dataset())
selectInput("filter_variable", "Filter variable:",choices=c(cols, "not applicable"), multiple=F, selected="not applicable")
})
output$weight<- renderUI({
if (identical(Dataset(), '') || identical(Dataset(),data.frame())) return(NULL)
cols <- names(Dataset())
selectInput("weight", "Weighting variable:",choices=c(cols, "not applicable"), multiple=F, selected="not applicable")
})
##render output
data_funct<-eventReactive(input$goButton, {
myfunction(file = Dataset(),too_expensive = input$too_expensive, too_cheap = input$too_cheap,
cheap = input$cheap, expensive = input$expensive,
weight = input$weight, filter_variable = input$filter_variable,filter_value = input$filter_value,
user_treshold = input$user_treshold)})
output$table <-DT::renderDataTable(datatable(
data_funct(), filter = "top",
options = list(pageLength = 16, autoWidth = TRUE, lengthMenu = c(5, 10, 16)),
rownames= FALSE
))
})
|
# *****************Hierarchal Clustering*****************
library(fields)
library(ggplot2)
# dist() calculates the pairwise distances
# hclust() takes the pairwise dist. variable and gives a dendogram
# To view your dendogram you need plot() to plot it
# plot with as.dendogram() for a neater one
# heatmap() creates a heatmap
# *****************K-means Clustering*****************
# generalized minimum distance func for three clusters
distMin <- function(x,y,cx,cy){
distTmp <- matrix(NA,nrow=3,ncol=length(x))
distTmp[1,] <- (x-cx[1])^2 + (y-cy[1])^2
distTmp[2,] <- (x-cx[2])^2 + (y-cy[2])^2
distTmp[3,] <- (x-cx[3])^2 + (y-cy[3])^2
return(distTmp)
}
# sample x and y points
x <- c(runif(100,0,100))
y <- c(runif(100,0,100))
plot(x,y,type = "n")
# Sample cluster centroid points
cx <- c(50,25,75)
cy <- c(25,75,25)
minD <- distMin(x,y,cx,cy)
# Plot the centroids
for(i in 1:200){
points(cx,cy,col=c("red","blue","green"),pch=3,cex=2,lwd=2)
minD <- distMin(x,y,cx,cy)
newClust <- apply(minD,2,which.min)
cols <- c("red","blue","green")
# Plotting points acc to the cluster colors
points(x,y,pch=19,col=cols[newClust])
# getting the new centroid
cx <- tapply(x,newClust,mean)
cy <- tapply(y,newClust,mean)
# You have the new centroid, repeat the process
}
# dev.off()
# This BS could have been avoided by using kmeans fn
km <- kmeans(cbind(x,y),3)
plot(x,y,type="n")
points(x,y,col=km$cluster,pch = 19)
points(km$centers,col=km$centers,pch = 21, cex = 2.5)
# *****************Extras*****************
# which.min is a fn that can be used with apply to get min from row/col
# image fn can be used to display data
# ***************************************************
|
/Graphs/Clustering.R
|
no_license
|
greywind7/R-language-notes
|
R
| false
| false
| 1,770
|
r
|
# *****************Hierarchal Clustering*****************
library(fields)
library(ggplot2)
# dist() calculates the pairwise distances
# hclust() takes the pairwise dist. variable and gives a dendogram
# To view your dendogram you need plot() to plot it
# plot with as.dendogram() for a neater one
# heatmap() creates a heatmap
# *****************K-means Clustering*****************
# generalized minimum distance func for three clusters
distMin <- function(x,y,cx,cy){
distTmp <- matrix(NA,nrow=3,ncol=length(x))
distTmp[1,] <- (x-cx[1])^2 + (y-cy[1])^2
distTmp[2,] <- (x-cx[2])^2 + (y-cy[2])^2
distTmp[3,] <- (x-cx[3])^2 + (y-cy[3])^2
return(distTmp)
}
# sample x and y points
x <- c(runif(100,0,100))
y <- c(runif(100,0,100))
plot(x,y,type = "n")
# Sample cluster centroid points
cx <- c(50,25,75)
cy <- c(25,75,25)
minD <- distMin(x,y,cx,cy)
# Plot the centroids
for(i in 1:200){
points(cx,cy,col=c("red","blue","green"),pch=3,cex=2,lwd=2)
minD <- distMin(x,y,cx,cy)
newClust <- apply(minD,2,which.min)
cols <- c("red","blue","green")
# Plotting points acc to the cluster colors
points(x,y,pch=19,col=cols[newClust])
# getting the new centroid
cx <- tapply(x,newClust,mean)
cy <- tapply(y,newClust,mean)
# You have the new centroid, repeat the process
}
# dev.off()
# This BS could have been avoided by using kmeans fn
km <- kmeans(cbind(x,y),3)
plot(x,y,type="n")
points(x,y,col=km$cluster,pch = 19)
points(km$centers,col=km$centers,pch = 21, cex = 2.5)
# *****************Extras*****************
# which.min is a fn that can be used with apply to get min from row/col
# image fn can be used to display data
# ***************************************************
|
##' @title Simulation 1
##' @param alpha gene correlation parameter, larger alpha creates larger correlation
##' @param sim_disp gene dispersion from breast cancer data
##' @param empirical_dist average gene expression from breast cancer data
##' @return data with correlated genes for simulation 3.
##' @references Rahman T, Li Y, Ma T, et al. A sparse negative binomial mixture model for clustering RNA-seq count data[J]. arXiv preprint arXiv:1912.02399, 2019.
##' @export
##'
Sim.Corr<-function(alpha=0.5,sim_disp,empirical_dist){
#empirical_dist<-apply(data$data,1,mean)
quantile<-quantile(empirical_dist,0.70)
empirical_dist<-empirical_dist[empirical_dist<quantile]
#sim_disp<-data$disp
sim_disp<-sim_disp[empirical_dist<quantile]
#empirical_dist<-empirical_dist[empirical_dist>500]
#ari_em<-c()
#ari_kmeans<-c()
#ari_da<-c()
#for(ari_ind in 1:20){
min_cor<--0.1
max_cor<-0.1
ngenes=1000
percent_DE=0.15
select_ind<-sample(c(1:length(empirical_dist)),size = ngenes,replace = TRUE)
#lfc=1
#phi=1
#sample1=15
#sample2=15
#sample3=15
#empirical_dist<-empirical_dist[empirical_dist>600]select_ind<-sample(c(1:length(empirical_dist)),size = ngenes,replace = TRUE)
select_mu<-empirical_dist[select_ind]
select_disp<-sim_disp[select_ind]
de<-rep(0,ngenes)
de[1:as.integer(percent_DE*ngenes)]<-1
#lib1<-rtruncnorm(15,a=1,mean=0,sd=1)
#lib2<-rtruncnorm(15,a=1,mean=0,sd=1)
#lib3<-rtruncnorm(15,a=1,mean=0,sd=1)
lib1<-runif(15,min=0.90,1.10)
lib2<-runif(15,min=0.90,1.10)
lib3<-runif(15,min=0.90,1.10)
#select_mu<-rep(1000,ngenes)
lib<-c(lib1,lib2,lib3)
lfc<-rtruncnorm(ngenes,a=0.5,mean=1,sd=1)
comb<-matrix(c(rep(c(-1,0,1),as.integer(sum(de)/3)),rep(c(0,1,1),as.integer(sum(de)/3)),rep(c(1,-1,0),sum(de)-2*as.integer(sum(de)/3)),rep(c(0,0,0),ngenes-sum(de))),ncol=3,byrow = TRUE)
power_comb<-(lfc*comb)
log_mu_matrix<-log(select_mu,base=2)+power_comb
log_matrix<-t(apply(log_mu_matrix,1,function(x) rep(x,each=15)))
ind_module_matrix<-matrix(c(1:10,51:60,101:110,151:160,801:810),ncol=5)
for(ind_mod in 1:5){
log_mu<-log_mu_matrix[ind_module_matrix[,ind_mod],]
#alpha<-a[a_ind]
param_inwish<-(1-alpha)*diag(1,nrow=10,ncol=10)+alpha*matrix(1,nrow=10,ncol=10)
sigma_prime<-riwish(60,param_inwish)
sigma<-cov2cor(sigma_prime)
log_matrix[ind_module_matrix[,ind_mod],1:15]<- t(rmvnorm(15,log_mu[1:10,1],sigma=sigma))
param_inwish<-(1-alpha)*diag(1,nrow=10,ncol=10)+alpha*matrix(1,nrow=10,ncol=10)
sigma_prime<-riwish(60,param_inwish)
sigma<-cov2cor(sigma_prime)
log_matrix[ind_module_matrix[,ind_mod],16:30]<- t(rmvnorm(15,log_mu[,2],sigma=sigma))
param_inwish<-(1-alpha)*diag(1,nrow=10,ncol=10)+alpha*matrix(1,nrow=10,ncol=10)
sigma_prime<-riwish(60,param_inwish)
sigma<-cov2cor(sigma_prime)
log_matrix[ind_module_matrix[,ind_mod],31:45]<- t(rmvnorm(15,log_mu[,3],sigma=sigma))
#heatmap.2(mu_matrix/apply(mu_matrix,1,sum))
}
lib<-c(lib1,lib2,lib3)
sim_matrix<-matrix(,nrow=ngenes,ncol=45)
for(ind_gene in 1:ngenes){
for(sample_ind in 1:45){
sim_matrix[ind_gene,sample_ind]<-rnbinom(1,mu=lib[sample_ind]*2^(log_matrix[ind_gene,sample_ind]),size=select_disp[ind_gene])
}
}
data<-sim_matrix
return(data)
}
|
/snbClust/R/Sim.Corr.R
|
no_license
|
YujiaLi1994/snbClust
|
R
| false
| false
| 3,347
|
r
|
##' @title Simulation 1
##' @param alpha gene correlation parameter, larger alpha creates larger correlation
##' @param sim_disp gene dispersion from breast cancer data
##' @param empirical_dist average gene expression from breast cancer data
##' @return data with correlated genes for simulation 3.
##' @references Rahman T, Li Y, Ma T, et al. A sparse negative binomial mixture model for clustering RNA-seq count data[J]. arXiv preprint arXiv:1912.02399, 2019.
##' @export
##'
Sim.Corr<-function(alpha=0.5,sim_disp,empirical_dist){
#empirical_dist<-apply(data$data,1,mean)
quantile<-quantile(empirical_dist,0.70)
empirical_dist<-empirical_dist[empirical_dist<quantile]
#sim_disp<-data$disp
sim_disp<-sim_disp[empirical_dist<quantile]
#empirical_dist<-empirical_dist[empirical_dist>500]
#ari_em<-c()
#ari_kmeans<-c()
#ari_da<-c()
#for(ari_ind in 1:20){
min_cor<--0.1
max_cor<-0.1
ngenes=1000
percent_DE=0.15
select_ind<-sample(c(1:length(empirical_dist)),size = ngenes,replace = TRUE)
#lfc=1
#phi=1
#sample1=15
#sample2=15
#sample3=15
#empirical_dist<-empirical_dist[empirical_dist>600]select_ind<-sample(c(1:length(empirical_dist)),size = ngenes,replace = TRUE)
select_mu<-empirical_dist[select_ind]
select_disp<-sim_disp[select_ind]
de<-rep(0,ngenes)
de[1:as.integer(percent_DE*ngenes)]<-1
#lib1<-rtruncnorm(15,a=1,mean=0,sd=1)
#lib2<-rtruncnorm(15,a=1,mean=0,sd=1)
#lib3<-rtruncnorm(15,a=1,mean=0,sd=1)
lib1<-runif(15,min=0.90,1.10)
lib2<-runif(15,min=0.90,1.10)
lib3<-runif(15,min=0.90,1.10)
#select_mu<-rep(1000,ngenes)
lib<-c(lib1,lib2,lib3)
lfc<-rtruncnorm(ngenes,a=0.5,mean=1,sd=1)
comb<-matrix(c(rep(c(-1,0,1),as.integer(sum(de)/3)),rep(c(0,1,1),as.integer(sum(de)/3)),rep(c(1,-1,0),sum(de)-2*as.integer(sum(de)/3)),rep(c(0,0,0),ngenes-sum(de))),ncol=3,byrow = TRUE)
power_comb<-(lfc*comb)
log_mu_matrix<-log(select_mu,base=2)+power_comb
log_matrix<-t(apply(log_mu_matrix,1,function(x) rep(x,each=15)))
ind_module_matrix<-matrix(c(1:10,51:60,101:110,151:160,801:810),ncol=5)
for(ind_mod in 1:5){
log_mu<-log_mu_matrix[ind_module_matrix[,ind_mod],]
#alpha<-a[a_ind]
param_inwish<-(1-alpha)*diag(1,nrow=10,ncol=10)+alpha*matrix(1,nrow=10,ncol=10)
sigma_prime<-riwish(60,param_inwish)
sigma<-cov2cor(sigma_prime)
log_matrix[ind_module_matrix[,ind_mod],1:15]<- t(rmvnorm(15,log_mu[1:10,1],sigma=sigma))
param_inwish<-(1-alpha)*diag(1,nrow=10,ncol=10)+alpha*matrix(1,nrow=10,ncol=10)
sigma_prime<-riwish(60,param_inwish)
sigma<-cov2cor(sigma_prime)
log_matrix[ind_module_matrix[,ind_mod],16:30]<- t(rmvnorm(15,log_mu[,2],sigma=sigma))
param_inwish<-(1-alpha)*diag(1,nrow=10,ncol=10)+alpha*matrix(1,nrow=10,ncol=10)
sigma_prime<-riwish(60,param_inwish)
sigma<-cov2cor(sigma_prime)
log_matrix[ind_module_matrix[,ind_mod],31:45]<- t(rmvnorm(15,log_mu[,3],sigma=sigma))
#heatmap.2(mu_matrix/apply(mu_matrix,1,sum))
}
lib<-c(lib1,lib2,lib3)
sim_matrix<-matrix(,nrow=ngenes,ncol=45)
for(ind_gene in 1:ngenes){
for(sample_ind in 1:45){
sim_matrix[ind_gene,sample_ind]<-rnbinom(1,mu=lib[sample_ind]*2^(log_matrix[ind_gene,sample_ind]),size=select_disp[ind_gene])
}
}
data<-sim_matrix
return(data)
}
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
source("setup_import.R")
# Define UI for application that draws a histogram
ui <- fluidPage(
# # Application title
# titlePanel("My NYT Crossword Times"),
title = "My NYT Crossword Times",
fluidRow(
# Input: Specification of range within an interval
column(6, offset = 0, style='padding-left:40px',
sliderInput("daterange", "Date range:",
min = as.Date("2019-01-07"), max = as.Date(Sys.Date()),
value = c(as.Date("2019-01-07"), as.Date(Sys.Date())),
timeFormat="%Y-%m-%d",
width = "95%")
),
# Input: Days to include
column(4, offset = 1, style='padding-left:40px',
checkboxGroupInput("incldays", "Days to include",
daylist,
selected = daylist,
width = "95%",
inline = TRUE)
)
),
hr(),
# Show a plot of the generated distribution
tabsetPanel(
tabPanel("Plot", plotOutput("xwplot", width="100%")),
tabPanel("Table", tableOutput("table"))
)
)
# Define server logic ----
server <- function(input, output, session) {
# autoWidth=TRUE and scrollX=T required for fluidRow columns to work properly
# https://stackoverflow.com/questions/34850382/setting-column-width-in-r-shiny-datatable-does-not-work-in-case-of-lots-of-colum
options = list(autoWidth = TRUE,
scrollX = T)
# Reactive expression to create data frame of all input values
sliderValues <- reactive({
data.frame(
Name = c("DateRange"),
Value = input$daterange,
stringsAsFactors = FALSE)
})
output$table <- renderTable({
# Get the records in the date range we've specified
DateRange <- input$daterange
thisrange_df = xwords_df[xwords_df$PuzzleDate>=as.Date(DateRange[1]) & xwords_df$PuzzleDate<=as.Date(DateRange[2]),]
thisrange_df <- thisrange_df[order(thisrange_df$PuzzleDate),]
out <- thisrange_df %>%
group_by(Day) %>%
summarise(Fastest = as.duration(min(SolveTime)),
Median = median(SolveTime),
Mean = mean(SolveTime),
Slowest = max(SolveTime))
out$Day <- factor(out$Day, levels=c(daylist))
out <- out[order(out$Day), ]
fastest <- format_duration(out$Fastest)
slowest <- format_duration(out$Slowest)
for (d in 1:7) {
fastest[d] <- paste(fastest[d], " (", thisrange_df$PuzzleDate[as.numeric(thisrange_df$SolveTime)==as.numeric(out$Fastest[d])][1], ")", sep="")
slowest[d] <- paste(slowest[d], " (", thisrange_df$PuzzleDate[as.numeric(thisrange_df$SolveTime)==as.numeric(out$Slowest[d])][1], ")", sep="")
}
out$Fastest <- fastest
out$Median <- format_duration(ceiling(out$Median))
out$Mean <- format_duration(ceiling(out$Mean))
out$Slowest <- slowest
out
})
output$xwplot <- renderPlot({
# Get the records in the date range we've specified
DateRange <- input$daterange
thisrange_df = xwords_df[xwords_df$PuzzleDate>=as.Date(DateRange[1]) & xwords_df$PuzzleDate<=as.Date(DateRange[2]),]
thisrange_df = thisrange_df[order(thisrange_df$PuzzleDate),]
for (d in daylist) {
if (!any(input$incldays==d)) {
thisrange_df = thisrange_df[thisrange_df$Day != d,]
}
}
# Ancillaries
give.n <- function(x){
return(c(y = median(x)*1.05, label = length(x)))
# experiment with the multiplier to find the perfect position
}
give.n_zero <- function(x){
return(c(y = median(x)*0, label = length(x)))
}
give.MS_text <- function(x) {
strptime(x, format="%M:%S")
}
give.DOW_max_ypos <- function(x,y){
max_time <- max(x$SolveTime[x$Day2==y])
#return(as.numeric(max_time, units="mins")+1.25)
return(max_time/60+1.25)
}
give.DOW_min_ypos <- function(x,y){
min_time <- min(x$SolveTime[x$Day2==y])
#return(as.numeric(min_time, units="mins")-1.25)
return(min_time/60-1.25)
}
give.DOW_max_label <- function(x,y){
max_time <- max(x$SolveTime[x$Day2==y])
max_time_min <- floor(as.numeric(max_time, units="mins"))
max_time_sec <- as.numeric(max_time, units="secs") %% 60
max_time_string <- paste(max_time_min, max_time_sec, sep=":")
return(max_time_string)
}
give.DOW_max_date <- function(x,y){
x2 <- x[x$Day2==y,]
x3 <- x2[order(-x2$SolveTime),]
return(x3$PuzzleDate[1])
}
give.DOW_min_date <- function(x,y){
x2 <- x[x$Day2==y,]
x3 <- x2[order(x2$SolveTime),]
return(x3$PuzzleDate[1])
}
# ggplot elements
theme_just_ymajors <- theme(panel.grid.minor = element_blank(),
panel.grid.major.x = element_blank(),
panel.grid.major = element_line(color = "#E0E0E0"),
panel.background = element_blank())
outlier_label_size = 4.5
# Set up color palette, removing unchecked days but keeping the colors the same
mypalette = brewer.pal(n=7, name="Paired")
mypalette = mypalette[is.element(daylist,input$incldays)]
# Scatter plot with smoothed line: Current streak ----
# http://www.sthda.com/sthda/RDoc/images/rcolorbrewer.png
pmain <- ggplot(thisrange_df, aes(x=PuzzleDate, y=as.integer(SolveTime)/60, color = Day2)) +
geom_point(shape=1, stroke = 0.8) +
geom_smooth(method = "loess", se=F) +
theme_just_ymajors +
ggtitle(sprintf("Current streak: %d days (showing %d days)", Nstreak, length(thisrange_df$SolveTime))) +
ylab("Time (minutes)") +
theme(legend.key=element_blank(),
plot.title = element_text(size=14, face="bold"),
axis.title.x = element_blank(),
axis.text.x = element_text(size=14, angle=45, hjust=1),
axis.text.y = element_text(size=14),
axis.title.y = element_text(size=14, margin=margin(r=10)),
axis.ticks.y = element_blank(),
axis.ticks.length = unit(0.25, "cm"),
axis.ticks.x = element_line(color="#E0E0E0"),
legend.text=element_text(size=14)) +
scale_color_manual(values=mypalette) +
labs(color = element_blank()) +
scale_y_continuous(limits = c(-2, max(thisrange_df$SolveTime)/60+1.5),
expand = c(0,0)) +
scale_x_date(expand=c(0,0))
annot1 <- function(x, this_day){
min_time <- min(x$SolveTime[x$Day2==this_day])
this_label <- sprintf("%d:%02d", floor(min_time/60), min_time %% 60)
out <- annotate("text",
x=this_day,
y=give.DOW_min_ypos(thisrange_df, this_day),
label=this_label,
size=outlier_label_size,
angle = 90,
hjust=0.8)
return(out)
}
ybox <- axis_canvas(pmain, axis = "y") +
theme_just_ymajors +
geom_boxplot(data = thisrange_df, aes(x=Day2, y=as.integer(SolveTime)/60, color = Day2)) +
scale_x_discrete() +
scale_color_manual(values=mypalette) +
expand_limits(x = -0.1)
for (d in daylist) {
if (any(input$incldays==d)) {
ybox <- ybox + annot1(thisrange_df, d)
}
}
p1 <- insert_yaxis_grob(pmain, ybox, grid::unit(1.5, "in"), position = "right")
ggdraw(p1)
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
/app.R
|
no_license
|
samsrabin/crossword-plot
|
R
| false
| false
| 8,635
|
r
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
source("setup_import.R")
# Define UI for application that draws a histogram
ui <- fluidPage(
# # Application title
# titlePanel("My NYT Crossword Times"),
title = "My NYT Crossword Times",
fluidRow(
# Input: Specification of range within an interval
column(6, offset = 0, style='padding-left:40px',
sliderInput("daterange", "Date range:",
min = as.Date("2019-01-07"), max = as.Date(Sys.Date()),
value = c(as.Date("2019-01-07"), as.Date(Sys.Date())),
timeFormat="%Y-%m-%d",
width = "95%")
),
# Input: Days to include
column(4, offset = 1, style='padding-left:40px',
checkboxGroupInput("incldays", "Days to include",
daylist,
selected = daylist,
width = "95%",
inline = TRUE)
)
),
hr(),
# Show a plot of the generated distribution
tabsetPanel(
tabPanel("Plot", plotOutput("xwplot", width="100%")),
tabPanel("Table", tableOutput("table"))
)
)
# Define server logic ----
server <- function(input, output, session) {
# autoWidth=TRUE and scrollX=T required for fluidRow columns to work properly
# https://stackoverflow.com/questions/34850382/setting-column-width-in-r-shiny-datatable-does-not-work-in-case-of-lots-of-colum
options = list(autoWidth = TRUE,
scrollX = T)
# Reactive expression to create data frame of all input values
sliderValues <- reactive({
data.frame(
Name = c("DateRange"),
Value = input$daterange,
stringsAsFactors = FALSE)
})
output$table <- renderTable({
# Get the records in the date range we've specified
DateRange <- input$daterange
thisrange_df = xwords_df[xwords_df$PuzzleDate>=as.Date(DateRange[1]) & xwords_df$PuzzleDate<=as.Date(DateRange[2]),]
thisrange_df <- thisrange_df[order(thisrange_df$PuzzleDate),]
out <- thisrange_df %>%
group_by(Day) %>%
summarise(Fastest = as.duration(min(SolveTime)),
Median = median(SolveTime),
Mean = mean(SolveTime),
Slowest = max(SolveTime))
out$Day <- factor(out$Day, levels=c(daylist))
out <- out[order(out$Day), ]
fastest <- format_duration(out$Fastest)
slowest <- format_duration(out$Slowest)
for (d in 1:7) {
fastest[d] <- paste(fastest[d], " (", thisrange_df$PuzzleDate[as.numeric(thisrange_df$SolveTime)==as.numeric(out$Fastest[d])][1], ")", sep="")
slowest[d] <- paste(slowest[d], " (", thisrange_df$PuzzleDate[as.numeric(thisrange_df$SolveTime)==as.numeric(out$Slowest[d])][1], ")", sep="")
}
out$Fastest <- fastest
out$Median <- format_duration(ceiling(out$Median))
out$Mean <- format_duration(ceiling(out$Mean))
out$Slowest <- slowest
out
})
output$xwplot <- renderPlot({
# Get the records in the date range we've specified
DateRange <- input$daterange
thisrange_df = xwords_df[xwords_df$PuzzleDate>=as.Date(DateRange[1]) & xwords_df$PuzzleDate<=as.Date(DateRange[2]),]
thisrange_df = thisrange_df[order(thisrange_df$PuzzleDate),]
for (d in daylist) {
if (!any(input$incldays==d)) {
thisrange_df = thisrange_df[thisrange_df$Day != d,]
}
}
# Ancillaries
give.n <- function(x){
return(c(y = median(x)*1.05, label = length(x)))
# experiment with the multiplier to find the perfect position
}
give.n_zero <- function(x){
return(c(y = median(x)*0, label = length(x)))
}
give.MS_text <- function(x) {
strptime(x, format="%M:%S")
}
give.DOW_max_ypos <- function(x,y){
max_time <- max(x$SolveTime[x$Day2==y])
#return(as.numeric(max_time, units="mins")+1.25)
return(max_time/60+1.25)
}
give.DOW_min_ypos <- function(x,y){
min_time <- min(x$SolveTime[x$Day2==y])
#return(as.numeric(min_time, units="mins")-1.25)
return(min_time/60-1.25)
}
give.DOW_max_label <- function(x,y){
max_time <- max(x$SolveTime[x$Day2==y])
max_time_min <- floor(as.numeric(max_time, units="mins"))
max_time_sec <- as.numeric(max_time, units="secs") %% 60
max_time_string <- paste(max_time_min, max_time_sec, sep=":")
return(max_time_string)
}
give.DOW_max_date <- function(x,y){
x2 <- x[x$Day2==y,]
x3 <- x2[order(-x2$SolveTime),]
return(x3$PuzzleDate[1])
}
give.DOW_min_date <- function(x,y){
x2 <- x[x$Day2==y,]
x3 <- x2[order(x2$SolveTime),]
return(x3$PuzzleDate[1])
}
# ggplot elements
theme_just_ymajors <- theme(panel.grid.minor = element_blank(),
panel.grid.major.x = element_blank(),
panel.grid.major = element_line(color = "#E0E0E0"),
panel.background = element_blank())
outlier_label_size = 4.5
# Set up color palette, removing unchecked days but keeping the colors the same
mypalette = brewer.pal(n=7, name="Paired")
mypalette = mypalette[is.element(daylist,input$incldays)]
# Scatter plot with smoothed line: Current streak ----
# http://www.sthda.com/sthda/RDoc/images/rcolorbrewer.png
pmain <- ggplot(thisrange_df, aes(x=PuzzleDate, y=as.integer(SolveTime)/60, color = Day2)) +
geom_point(shape=1, stroke = 0.8) +
geom_smooth(method = "loess", se=F) +
theme_just_ymajors +
ggtitle(sprintf("Current streak: %d days (showing %d days)", Nstreak, length(thisrange_df$SolveTime))) +
ylab("Time (minutes)") +
theme(legend.key=element_blank(),
plot.title = element_text(size=14, face="bold"),
axis.title.x = element_blank(),
axis.text.x = element_text(size=14, angle=45, hjust=1),
axis.text.y = element_text(size=14),
axis.title.y = element_text(size=14, margin=margin(r=10)),
axis.ticks.y = element_blank(),
axis.ticks.length = unit(0.25, "cm"),
axis.ticks.x = element_line(color="#E0E0E0"),
legend.text=element_text(size=14)) +
scale_color_manual(values=mypalette) +
labs(color = element_blank()) +
scale_y_continuous(limits = c(-2, max(thisrange_df$SolveTime)/60+1.5),
expand = c(0,0)) +
scale_x_date(expand=c(0,0))
annot1 <- function(x, this_day){
min_time <- min(x$SolveTime[x$Day2==this_day])
this_label <- sprintf("%d:%02d", floor(min_time/60), min_time %% 60)
out <- annotate("text",
x=this_day,
y=give.DOW_min_ypos(thisrange_df, this_day),
label=this_label,
size=outlier_label_size,
angle = 90,
hjust=0.8)
return(out)
}
ybox <- axis_canvas(pmain, axis = "y") +
theme_just_ymajors +
geom_boxplot(data = thisrange_df, aes(x=Day2, y=as.integer(SolveTime)/60, color = Day2)) +
scale_x_discrete() +
scale_color_manual(values=mypalette) +
expand_limits(x = -0.1)
for (d in daylist) {
if (any(input$incldays==d)) {
ybox <- ybox + annot1(thisrange_df, d)
}
}
p1 <- insert_yaxis_grob(pmain, ybox, grid::unit(1.5, "in"), position = "right")
ggdraw(p1)
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
library(randomForest)
model<- randomForest(Species~., data=trainData, nTree=500)
summary(model)
prediction <- predict(model, newdata=testData, type='class')
table(prediction, testData$Species)
importance(model)
library(adabag)
iris.adaboost <- boosting(Species~., data=iristrain, boost=TRUE, mfinal=5)
iris.adaboost
?boosting
|
/Bagging_and_Boosting.R
|
no_license
|
vkrit/chula_datamining
|
R
| false
| false
| 344
|
r
|
library(randomForest)
model<- randomForest(Species~., data=trainData, nTree=500)
summary(model)
prediction <- predict(model, newdata=testData, type='class')
table(prediction, testData$Species)
importance(model)
library(adabag)
iris.adaboost <- boosting(Species~., data=iristrain, boost=TRUE, mfinal=5)
iris.adaboost
?boosting
|
#' Various utility functions
#'
#' Miscellaneous set of functions that can be used with results from the
#' package.
#'
#' Function \code{extract.indices} extracts the parameter indices from the
#' parameter index matrices (PIMS) for a particular type of \code{parameter}
#' that match a set of group numbers and rows and columns that are defined in
#' the dataframe \code{df}. It returns a vector of indices which can be used to
#' specify the set of real parameters to be extracted by
#' \code{\link{covariate.predictions}} using the index column in \code{data} or
#' the \code{indices} argument.
#'
#' Function \code{nat.surv} produces estimates of natural survival (Sn) from
#' total survival (S) and recovery rate (r) from a joint live-dead model in
#' which all harvest recoveries are reported. In that case, Taylor et al 2005
#' suggest the following estimator of natural survival Sn=S + (1-S)*r. The
#' arguments for the function are a mark \code{model} object and a dataframe
#' \code{df} that defines the set of groups and times (row,col) for the natural
#' survival computations. It returns a list with elements: 1) \code{Sn} - a
#' vector of estimates for natural survival; one for each entry in \code{df}
#' and 2) \code{vcv} - a variance-covariance matrix for the estimates of
#' natural survival.
#'
#' Function \code{pop.est} produces estimates of abundance using a vector of
#' counts of animals captured (\code{ns}) and estimates of capture
#' probabilities (\code{ps}). The estimates can be aggregated or averaged
#' using the \code{design} matrix argument. If individual estimates are
#' needed, use an nxn identity matrix for design where n is the length of
#' \code{ns}. To get a total of all the estimates use a nx1 column matrix of
#' 1s. Any other \code{design} matrix can be specified to subset, aggregate
#' and/or average the estimates. The argument \code{p.vcv} is needed to
#' compute the variance-covariance matrix for the abundance estimates using the
#' formula described in Taylor et al. (2002). The function returns a list with
#' elements: 1) \code{Nhat} - a vector of abundance estimates and 2) \code{vcv}
#' - variance-covariance matrix for the abundance estimates.
#'
#' Function \code{Compute.Sn} creates list structure for natural survival using
#' \code{nat.surv} to be used for model averaging natural survival estimates
#' (e.g., \code{model.average(compute.Sn(x,df,criterion))}). It returns a list
#' with elements estimates, vcv, weight: 1) estimates - matrix of estimates of
#' natural survival, 2)vcv - list of var-cov matrix for the estimates, and 3)
#' weight - vector of model weights.
#'
#' Function \code{search.output.files}searches for occurrence of a specific
#' string in output files associated with models in a marklist x. It returns a
#' vector of model numbers in the marklist which have an output file containing
#' the string.
#'
#' @usage extract.indices(model,parameter,df)
#'
#' nat.surv(model,df)
#'
#' pop.est(ns,ps,design,p.vcv)
#'
#' compute.Sn(x,df,criterion)
#'
#' logitCI(x,se)
#'
#' search.output.files(x,string)
#'
#' @aliases extract.indices nat.surv pop.est compute.Sn search.output.files logitCI
#' @param model a mark model object
#' @param parameter character string for a type of parameter for that model
#' (eg, "Phi","p")
#' @param df dataframe containing the columns group, row, column which specify
#' the group number, the row number and column number of the PIM
#' @param ns vector of counts of animals captured
#' @param ps vector of capture probability estimates which match counts
#' @param design design matrix that specifies how counts will be aggregate
#' @param p.vcv variance-covariance matrix for capture probability estimates
#' @param x marklist of models for compute.Sn and a vector of real estimates for logitCI
#' @param se vector of std errors for real estimates
#' @param criterion vector of model selection criterion values (eg AICc)
#' @param string string to be found in output files contained in models in x
#' @author Jeff Laake
#' @export extract.indices nat.surv pop.est compute.Sn logitCI search.output.files
#' @references TAYLOR, M. K., J. LAAKE, H. D. CLUFF, M. RAMSAY and F. MESSIER.
#' 2002. Managing the risk from hunting for the Viscount Melville Sound polar
#' bear population. Ursus 13: 185-202.
#'
#' TAYLOR, M. K., J. LAAKE, P. D. MCLOUGHLIN, E. W. BORN, H. D. CLUFF, S. H.
#' FERGUSON, A. ROSING-ASVID, R. SCHWEINSBURG and F. MESSIER. 2005. Demography
#' and viability of a hunted population of polar bears. Arctic 58: 203-214.
#' @examples
#'
#' # Example of computing N-hat for occasions 2 to 7 for the p=~time model
#' data(dipper)
#' md=mark(dipper,model.parameters=list(p=list(formula=~time),
#' Phi=list(formula=~1)))
#' # Create a matrix from the capture history strings
#' xmat=matrix(as.numeric(unlist(strsplit(dipper$ch,""))),
#' ncol=nchar(dipper$ch[1]))
#' # sum number of captures in each column but don't use the first
#' # column because p[1] can't be estimated
#' ns=colSums(xmat)[-1]
#' # extract the indices and then get covariate predictions for p(2),...,p(7)
#' # which are row-colums 1-6 in PIM for p
#' p.indices=extract.indices(md,"p",df=data.frame(group=rep(1,6),
#' row=1:6,col=1:6))
#' p.list=covariate.predictions(md,data=data.frame(index=p.indices))
#' # call pop.est using diagonal design matrix to get
#' # separate estimate for each occasion
#' pop.est(ns,p.list$estimates$estimate,
#' design=diag(1,ncol=6,nrow=6),p.list$vcv)
#'
extract.indices=function(model,parameter,df)
{
# Extracts the parameter indices from a model for a particular parameter
# as defined by the dataframe df which contains columns group, row, column
#
# Arguments:
# model - mark model object
# parameter - character string for a type of parameter for that model (eg, "Phi","p")
# df - dataframe containing the columns group, row, column which specify
# the group number, the row number and column number of the PIM
#
# Value: vector of indices which can be used to specify the set of real parameters
# to be extracted
#
if(!valid.parameters(model$model,parameter))stop()
indices=vector("numeric",length=nrow(df))
for(i in 1:nrow(df))
{
group=df$group[i]
irow=df$row[i]
jcol=df$col[i]
if(group > model$number.of.groups)
stop(paste("Specified group number", group, " is greater than number of groups", model$number.of.groups))
if(irow > nrow(model$pims[[parameter]][[group]]$pim))
stop(paste("Specified row number", jcol, " is greater than number of rows", nrow(model$pims[[parameter]][[group]]$pim)))
if(jcol > ncol(model$pims[[parameter]][[group]]$pim))
stop(paste("Specified column number", jcol, " is greater than number of columns", ncol(model$pims[[parameter]][[group]]$pim)))
indices[i]=model$pims[[parameter]][[group]]$pim[irow,jcol]
}
return(indices)
}
nat.surv=function(model,df)
{
# Computes estimates of natural survival as Sn=S+(1-S)*r See Taylor et al 2005
#
# Arguments:
#
# model - mark model object
# df - dataframe containing the columns group, row, column which specify
# the group number, the row number and column number of the PIM
#
# Value: list with elements Sn and vcv
# Sn - a vector of estimates for natural survival; one for each entry in df
# vcv - a var-cov matrix for the estimates of natural survival
#
if(class(model)[1]!="mark" | !(class(model)[2]=="Burnham" | class(model)[2]=="Barker"))
stop("This function only works with Burnham or Barker model for RMark")
r.indices=extract.indices(model,"r",df)
S.indices=extract.indices(model,"S",df)
npar=nrow(df)
covar=covariate.predictions(model,data.frame(index=c(r.indices,S.indices)))
Sn=covar$estimates$estimate[(npar+1):(2*npar)]+covar$estimates$estimate[1:npar]*(1-covar$estimates$estimate[(npar+1):(2*npar)])
partial=matrix(0,nrow=length(Sn),ncol=2*length(Sn))
partial[cbind(1:length(Sn),1:length(Sn))]=1-covar$estimates$estimate[(npar+1):(2*npar)]
partial[cbind(1:length(Sn),(length(Sn)+1):(2*length(Sn)))]=1-covar$estimates$estimate[1:npar]
vcv=partial%*%covar$vcv%*%t(partial)
return(list(Sn=Sn,vcv=vcv))
}
pop.est=function(ns,ps,design,p.vcv)
{
# Computes estimates of population size; See Taylor et al 2002
#
# Arguments:
#
# ns - vector of counts of animals captured
# ps - vector of capture probabilities which match counts
# design - design matrix that specifies how counts will be aggregated
# p.vcv - variance-covariance matrix for capture probabilities
#
# Value: list with elements Nhat and vcv
# Nhat - a vector of estimates for abundance
# vcv - a var-cov matrix for the estimates of abundance
#
if(length(ns)!=length(ps))stop("Length of ps must match length of ns")
if(length(ps)!=nrow(p.vcv) | length(ps)!=ncol(p.vcv))stop("Length of ps must match dimensions of p.vcv")
if(length(ns)!=nrow(design))stop("Length of ns must match number of rows in design")
# Compute values of Nhat
Nhat=t(ns/ps)%*%design
# Compute values of v-c matrix
Nhat.vcv= diag(as.vector(t(ns*(1-ps)/ps^2)%*%design),nrow=ncol(design),ncol=ncol(design)) +
t(design)%*%(outer(ns/ps^2,ns/ps^2,"*")*p.vcv)%*%design
return(list(Nhat=Nhat,vcv=Nhat.vcv))
}
compute.Sn=function(x,df,criterion)
{
# Computes list structure for natural survival using nat.surv to
# be used for model averaging (model.average(compute.Sn(x,df,criterion))
#
# Arguments:
#
# x - marklist of models
# df - dataframe containing the columns group, row, column which specify
# the group number, the row number and column number of the PIM
# criterion - vector of model selection criterion values (eg AICc)
#
# Value: list with elements estimates, vcv, weight
# estimates - matrix of estimates of natural survival
# vcv - list of var-cov matrix for the estimates
# weight - model weights
#
weight=criterion
weight=weight-min(weight)
weight=exp(-.5*weight)/sum(exp(-.5*weight))
modelnums=as.numeric(row.names(x$model.table))
Sn.estimates=matrix(0,nrow=nrow(x$model.table),ncol=nrow(df))
Sn.vcv=vector("list",length=nrow(x$model.table))
model=NULL
for (i in 1:nrow(x$model.table))
{
if(is.list(x[[1]]))
Sn.list=nat.surv(x[[modelnums[i]]],df)
else
{
load(x[[modelnums[i]]])
Sn.list=nat.surv(model,df)
}
Sn.estimates[i,]=Sn.list$Sn
Sn.vcv[[i]]=Sn.list$vcv
}
return(list(estimates=Sn.estimates,vcv=Sn.vcv,weight=weight))
}
logitCI=function(x,se)
{
#
# Computes conf interval of real parameter (bounded bt 0-1) using logit transform
#
# Arguments:
#
# x - vector of real estimates
# se - vector of se estimates
#
# Value:
#
# A dataframe with x,se and conf interval
#
link.values=log(x/(1-x))
deriv.link.values=1/x+1/(1-x)
if(length(x)==1)
{
se.links=sqrt(deriv.link.values^2*se^2)
}
else
{
deriv.link.matrix=matrix(0,nrow=length(deriv.link.values),ncol=length(deriv.link.values))
diag(deriv.link.matrix)=deriv.link.values
vcv.real=diag(se^2)
se.links=sqrt(diag(deriv.link.matrix%*%vcv.real%*%t(deriv.link.matrix)))
}
lcl=plogis(link.values-1.96*se.links)
ucl=plogis(link.values+1.96*se.links)
return(data.frame(estimate=x,se=se,lcl=lcl,ucl=ucl))
}
search.output.files=function(x,string)
{
#
# Searches for occurrence of a specific string in output files associated with models in a marklist.
#
# Arguments:
#
# x - marklist of models
# string - string to be found in output files
#
# Value:
#
# Vector of model numbers in the marklist which have an output file containing the string.
#
indices=NULL
for(i in 1:nrow(x$model.table))
{
output=readLines(paste(x[[i]]$output,".out",sep=""))
positions=grep(string,output)
if(length(positions)!=0)
{
indices=c(indices,i)
cat("\nModel ",i," Messages: ",paste(output[positions],sep="\n"))
}
}
return(indices)
}
|
/RMark/R/utility.r
|
no_license
|
wchallenger/RMark
|
R
| false
| false
| 12,374
|
r
|
#' Various utility functions
#'
#' Miscellaneous set of functions that can be used with results from the
#' package.
#'
#' Function \code{extract.indices} extracts the parameter indices from the
#' parameter index matrices (PIMS) for a particular type of \code{parameter}
#' that match a set of group numbers and rows and columns that are defined in
#' the dataframe \code{df}. It returns a vector of indices which can be used to
#' specify the set of real parameters to be extracted by
#' \code{\link{covariate.predictions}} using the index column in \code{data} or
#' the \code{indices} argument.
#'
#' Function \code{nat.surv} produces estimates of natural survival (Sn) from
#' total survival (S) and recovery rate (r) from a joint live-dead model in
#' which all harvest recoveries are reported. In that case, Taylor et al 2005
#' suggest the following estimator of natural survival Sn=S + (1-S)*r. The
#' arguments for the function are a mark \code{model} object and a dataframe
#' \code{df} that defines the set of groups and times (row,col) for the natural
#' survival computations. It returns a list with elements: 1) \code{Sn} - a
#' vector of estimates for natural survival; one for each entry in \code{df}
#' and 2) \code{vcv} - a variance-covariance matrix for the estimates of
#' natural survival.
#'
#' Function \code{pop.est} produces estimates of abundance using a vector of
#' counts of animals captured (\code{ns}) and estimates of capture
#' probabilities (\code{ps}). The estimates can be aggregated or averaged
#' using the \code{design} matrix argument. If individual estimates are
#' needed, use an nxn identity matrix for design where n is the length of
#' \code{ns}. To get a total of all the estimates use a nx1 column matrix of
#' 1s. Any other \code{design} matrix can be specified to subset, aggregate
#' and/or average the estimates. The argument \code{p.vcv} is needed to
#' compute the variance-covariance matrix for the abundance estimates using the
#' formula described in Taylor et al. (2002). The function returns a list with
#' elements: 1) \code{Nhat} - a vector of abundance estimates and 2) \code{vcv}
#' - variance-covariance matrix for the abundance estimates.
#'
#' Function \code{Compute.Sn} creates list structure for natural survival using
#' \code{nat.surv} to be used for model averaging natural survival estimates
#' (e.g., \code{model.average(compute.Sn(x,df,criterion))}). It returns a list
#' with elements estimates, vcv, weight: 1) estimates - matrix of estimates of
#' natural survival, 2)vcv - list of var-cov matrix for the estimates, and 3)
#' weight - vector of model weights.
#'
#' Function \code{search.output.files}searches for occurrence of a specific
#' string in output files associated with models in a marklist x. It returns a
#' vector of model numbers in the marklist which have an output file containing
#' the string.
#'
#' @usage extract.indices(model,parameter,df)
#'
#' nat.surv(model,df)
#'
#' pop.est(ns,ps,design,p.vcv)
#'
#' compute.Sn(x,df,criterion)
#'
#' logitCI(x,se)
#'
#' search.output.files(x,string)
#'
#' @aliases extract.indices nat.surv pop.est compute.Sn search.output.files logitCI
#' @param model a mark model object
#' @param parameter character string for a type of parameter for that model
#' (eg, "Phi","p")
#' @param df dataframe containing the columns group, row, column which specify
#' the group number, the row number and column number of the PIM
#' @param ns vector of counts of animals captured
#' @param ps vector of capture probability estimates which match counts
#' @param design design matrix that specifies how counts will be aggregate
#' @param p.vcv variance-covariance matrix for capture probability estimates
#' @param x marklist of models for compute.Sn and a vector of real estimates for logitCI
#' @param se vector of std errors for real estimates
#' @param criterion vector of model selection criterion values (eg AICc)
#' @param string string to be found in output files contained in models in x
#' @author Jeff Laake
#' @export extract.indices nat.surv pop.est compute.Sn logitCI search.output.files
#' @references TAYLOR, M. K., J. LAAKE, H. D. CLUFF, M. RAMSAY and F. MESSIER.
#' 2002. Managing the risk from hunting for the Viscount Melville Sound polar
#' bear population. Ursus 13: 185-202.
#'
#' TAYLOR, M. K., J. LAAKE, P. D. MCLOUGHLIN, E. W. BORN, H. D. CLUFF, S. H.
#' FERGUSON, A. ROSING-ASVID, R. SCHWEINSBURG and F. MESSIER. 2005. Demography
#' and viability of a hunted population of polar bears. Arctic 58: 203-214.
#' @examples
#'
#' # Example of computing N-hat for occasions 2 to 7 for the p=~time model
#' data(dipper)
#' md=mark(dipper,model.parameters=list(p=list(formula=~time),
#' Phi=list(formula=~1)))
#' # Create a matrix from the capture history strings
#' xmat=matrix(as.numeric(unlist(strsplit(dipper$ch,""))),
#' ncol=nchar(dipper$ch[1]))
#' # sum number of captures in each column but don't use the first
#' # column because p[1] can't be estimated
#' ns=colSums(xmat)[-1]
#' # extract the indices and then get covariate predictions for p(2),...,p(7)
#' # which are row-colums 1-6 in PIM for p
#' p.indices=extract.indices(md,"p",df=data.frame(group=rep(1,6),
#' row=1:6,col=1:6))
#' p.list=covariate.predictions(md,data=data.frame(index=p.indices))
#' # call pop.est using diagonal design matrix to get
#' # separate estimate for each occasion
#' pop.est(ns,p.list$estimates$estimate,
#' design=diag(1,ncol=6,nrow=6),p.list$vcv)
#'
extract.indices=function(model,parameter,df)
{
# Extracts the parameter indices from a model for a particular parameter
# as defined by the dataframe df which contains columns group, row, column
#
# Arguments:
# model - mark model object
# parameter - character string for a type of parameter for that model (eg, "Phi","p")
# df - dataframe containing the columns group, row, column which specify
# the group number, the row number and column number of the PIM
#
# Value: vector of indices which can be used to specify the set of real parameters
# to be extracted
#
if(!valid.parameters(model$model,parameter))stop()
indices=vector("numeric",length=nrow(df))
for(i in 1:nrow(df))
{
group=df$group[i]
irow=df$row[i]
jcol=df$col[i]
if(group > model$number.of.groups)
stop(paste("Specified group number", group, " is greater than number of groups", model$number.of.groups))
if(irow > nrow(model$pims[[parameter]][[group]]$pim))
stop(paste("Specified row number", jcol, " is greater than number of rows", nrow(model$pims[[parameter]][[group]]$pim)))
if(jcol > ncol(model$pims[[parameter]][[group]]$pim))
stop(paste("Specified column number", jcol, " is greater than number of columns", ncol(model$pims[[parameter]][[group]]$pim)))
indices[i]=model$pims[[parameter]][[group]]$pim[irow,jcol]
}
return(indices)
}
nat.surv=function(model,df)
{
# Computes estimates of natural survival as Sn=S+(1-S)*r See Taylor et al 2005
#
# Arguments:
#
# model - mark model object
# df - dataframe containing the columns group, row, column which specify
# the group number, the row number and column number of the PIM
#
# Value: list with elements Sn and vcv
# Sn - a vector of estimates for natural survival; one for each entry in df
# vcv - a var-cov matrix for the estimates of natural survival
#
if(class(model)[1]!="mark" | !(class(model)[2]=="Burnham" | class(model)[2]=="Barker"))
stop("This function only works with Burnham or Barker model for RMark")
r.indices=extract.indices(model,"r",df)
S.indices=extract.indices(model,"S",df)
npar=nrow(df)
covar=covariate.predictions(model,data.frame(index=c(r.indices,S.indices)))
Sn=covar$estimates$estimate[(npar+1):(2*npar)]+covar$estimates$estimate[1:npar]*(1-covar$estimates$estimate[(npar+1):(2*npar)])
partial=matrix(0,nrow=length(Sn),ncol=2*length(Sn))
partial[cbind(1:length(Sn),1:length(Sn))]=1-covar$estimates$estimate[(npar+1):(2*npar)]
partial[cbind(1:length(Sn),(length(Sn)+1):(2*length(Sn)))]=1-covar$estimates$estimate[1:npar]
vcv=partial%*%covar$vcv%*%t(partial)
return(list(Sn=Sn,vcv=vcv))
}
pop.est=function(ns,ps,design,p.vcv)
{
# Computes estimates of population size; See Taylor et al 2002
#
# Arguments:
#
# ns - vector of counts of animals captured
# ps - vector of capture probabilities which match counts
# design - design matrix that specifies how counts will be aggregated
# p.vcv - variance-covariance matrix for capture probabilities
#
# Value: list with elements Nhat and vcv
# Nhat - a vector of estimates for abundance
# vcv - a var-cov matrix for the estimates of abundance
#
if(length(ns)!=length(ps))stop("Length of ps must match length of ns")
if(length(ps)!=nrow(p.vcv) | length(ps)!=ncol(p.vcv))stop("Length of ps must match dimensions of p.vcv")
if(length(ns)!=nrow(design))stop("Length of ns must match number of rows in design")
# Compute values of Nhat
Nhat=t(ns/ps)%*%design
# Compute values of v-c matrix
Nhat.vcv= diag(as.vector(t(ns*(1-ps)/ps^2)%*%design),nrow=ncol(design),ncol=ncol(design)) +
t(design)%*%(outer(ns/ps^2,ns/ps^2,"*")*p.vcv)%*%design
return(list(Nhat=Nhat,vcv=Nhat.vcv))
}
compute.Sn=function(x,df,criterion)
{
# Computes list structure for natural survival using nat.surv to
# be used for model averaging (model.average(compute.Sn(x,df,criterion))
#
# Arguments:
#
# x - marklist of models
# df - dataframe containing the columns group, row, column which specify
# the group number, the row number and column number of the PIM
# criterion - vector of model selection criterion values (eg AICc)
#
# Value: list with elements estimates, vcv, weight
# estimates - matrix of estimates of natural survival
# vcv - list of var-cov matrix for the estimates
# weight - model weights
#
weight=criterion
weight=weight-min(weight)
weight=exp(-.5*weight)/sum(exp(-.5*weight))
modelnums=as.numeric(row.names(x$model.table))
Sn.estimates=matrix(0,nrow=nrow(x$model.table),ncol=nrow(df))
Sn.vcv=vector("list",length=nrow(x$model.table))
model=NULL
for (i in 1:nrow(x$model.table))
{
if(is.list(x[[1]]))
Sn.list=nat.surv(x[[modelnums[i]]],df)
else
{
load(x[[modelnums[i]]])
Sn.list=nat.surv(model,df)
}
Sn.estimates[i,]=Sn.list$Sn
Sn.vcv[[i]]=Sn.list$vcv
}
return(list(estimates=Sn.estimates,vcv=Sn.vcv,weight=weight))
}
logitCI=function(x,se)
{
#
# Computes conf interval of real parameter (bounded bt 0-1) using logit transform
#
# Arguments:
#
# x - vector of real estimates
# se - vector of se estimates
#
# Value:
#
# A dataframe with x,se and conf interval
#
link.values=log(x/(1-x))
deriv.link.values=1/x+1/(1-x)
if(length(x)==1)
{
se.links=sqrt(deriv.link.values^2*se^2)
}
else
{
deriv.link.matrix=matrix(0,nrow=length(deriv.link.values),ncol=length(deriv.link.values))
diag(deriv.link.matrix)=deriv.link.values
vcv.real=diag(se^2)
se.links=sqrt(diag(deriv.link.matrix%*%vcv.real%*%t(deriv.link.matrix)))
}
lcl=plogis(link.values-1.96*se.links)
ucl=plogis(link.values+1.96*se.links)
return(data.frame(estimate=x,se=se,lcl=lcl,ucl=ucl))
}
search.output.files=function(x,string)
{
#
# Searches for occurrence of a specific string in output files associated with models in a marklist.
#
# Arguments:
#
# x - marklist of models
# string - string to be found in output files
#
# Value:
#
# Vector of model numbers in the marklist which have an output file containing the string.
#
indices=NULL
for(i in 1:nrow(x$model.table))
{
output=readLines(paste(x[[i]]$output,".out",sep=""))
positions=grep(string,output)
if(length(positions)!=0)
{
indices=c(indices,i)
cat("\nModel ",i," Messages: ",paste(output[positions],sep="\n"))
}
}
return(indices)
}
|
library(stringr)
Letter_dirs<-dir('../../RESULTS/CTTV015_Landers_Sabatini_Library_analysis/outComes/')
nLetters<-length(Letter_dirs)
load('../../DATA/CTTV015_Landers_Sabatini_Library_analysis/R/alreadyincludedGuides.rdata')
# includedGuides<-read.table('../../DATA/CTTV015_Landers_Sabatini_Library_analysis/internal/Copy of aac7041_SM_Table_S1-toFrancesco2.txt',sep='\t',header = TRUE,stringsAsFactors = FALSE)
# includedGuides<-includedGuides[which(includedGuides$Present.in.the.current.library.=='Yes' | includedGuides$BbsI.sites.=='Yes'),1]
# save(includedGuides,file='../../DATA/CTTV015_Landers_Sabatini_Library_analysis/alreadyincludedGuides.rdata')
iniy<-1
for (i in 1:nLetters){
currentFC<-dir(paste('../../RESULTS/CTTV015_Landers_Sabatini_Library_analysis/outComes/',Letter_dirs[i],sep=''))
nfiles<-length(currentFC)
for (j in 1:nfiles){
print(c(Letter_dirs[i],currentFC[j]))
currentFN<-paste('../../RESULTS/CTTV015_Landers_Sabatini_Library_analysis/outComes/',Letter_dirs[i],'/',currentFC[j],sep='')
fileContent<-read.table(currentFN,sep='\t',header = TRUE,stringsAsFactors = FALSE)
flag<-2
while(flag>0){
currentList<-fileContent[flag,1]
currentList<-unlist(str_split(currentList,","))
if(sum(is.element(currentList,includedGuides))==0){flag<-0}
else{flag<-flag+1}
}
if (length(currentList)>0){
currentGene<-unlist(str_split(currentFC[j],'.txt'))[1]
currentBunch<-cbind(rep(currentGene,5),currentList)
}else{
currentBunch<-NULL
}
if (iniy == 1){
TOTRES<-currentBunch
iniy<-0
}else{
TOTRES<-rbind(TOTRES,currentBunch)
}
}
}
|
/Lander_Sabatini_old_Analysis/AssembleResults.R
|
no_license
|
francescojm/OT-CPI-Pipelines
|
R
| false
| false
| 1,928
|
r
|
library(stringr)
Letter_dirs<-dir('../../RESULTS/CTTV015_Landers_Sabatini_Library_analysis/outComes/')
nLetters<-length(Letter_dirs)
load('../../DATA/CTTV015_Landers_Sabatini_Library_analysis/R/alreadyincludedGuides.rdata')
# includedGuides<-read.table('../../DATA/CTTV015_Landers_Sabatini_Library_analysis/internal/Copy of aac7041_SM_Table_S1-toFrancesco2.txt',sep='\t',header = TRUE,stringsAsFactors = FALSE)
# includedGuides<-includedGuides[which(includedGuides$Present.in.the.current.library.=='Yes' | includedGuides$BbsI.sites.=='Yes'),1]
# save(includedGuides,file='../../DATA/CTTV015_Landers_Sabatini_Library_analysis/alreadyincludedGuides.rdata')
iniy<-1
for (i in 1:nLetters){
currentFC<-dir(paste('../../RESULTS/CTTV015_Landers_Sabatini_Library_analysis/outComes/',Letter_dirs[i],sep=''))
nfiles<-length(currentFC)
for (j in 1:nfiles){
print(c(Letter_dirs[i],currentFC[j]))
currentFN<-paste('../../RESULTS/CTTV015_Landers_Sabatini_Library_analysis/outComes/',Letter_dirs[i],'/',currentFC[j],sep='')
fileContent<-read.table(currentFN,sep='\t',header = TRUE,stringsAsFactors = FALSE)
flag<-2
while(flag>0){
currentList<-fileContent[flag,1]
currentList<-unlist(str_split(currentList,","))
if(sum(is.element(currentList,includedGuides))==0){flag<-0}
else{flag<-flag+1}
}
if (length(currentList)>0){
currentGene<-unlist(str_split(currentFC[j],'.txt'))[1]
currentBunch<-cbind(rep(currentGene,5),currentList)
}else{
currentBunch<-NULL
}
if (iniy == 1){
TOTRES<-currentBunch
iniy<-0
}else{
TOTRES<-rbind(TOTRES,currentBunch)
}
}
}
|
library(CvM2SL2Test)
library(MASS)
library(verification)
OPAM.project.dataset.RNAi <- function(
input.ds,
output.ds,
gene.set.databases,
gene.set.selection = "ALL", # "ALL" or list with names of gene sets
sample.norm.type = "rank", # "rank", "log" or "log.rank"
weight = 0.25,
statistic = "area.under.RES",
output.score.type = "ES", # "ES" or "NES"
nperm = 200, # number of random permutations for NES case
combine.mode = "combine.off", # "combine.off" do not combine *_UP and *_DN versions in
# a single score. "combine.replace" combine *_UP and
# *_DN versions in a single score that replaces the individual
# *_UP and *_DN versions. "combine.add" combine *_UP and
# *_DN versions in a single score and add it but keeping
# the individual *_UP and *_DN versions.
min.overlap = 1,
correl.type = "rank") # "rank", "z.score", "symm.rank"
{ #----------------------------------------------------------------------------------------
# Load libraries
library(gtools)
library(verification)
library(RColorBrewer)
# Read input dataset
dataset <- MSIG.Gct2Frame(filename = input.ds) # Read gene expression dataset (GCT format)
m <- data.matrix(dataset$ds)
# gene.names <- dataset$row.names # in Ataris or hairpin gct files the gene symbols are in the descs column
gene.names <- dataset$descs
gene.descs <- dataset$descs
sample.names <- dataset$names
Ns <- length(m[1,])
Ng <- length(m[,1])
temp <- strsplit(input.ds, split="/") # Extract input file name
s <- length(temp[[1]])
input.file.name <- temp[[1]][s]
temp <- strsplit(input.file.name, split=".gct")
input.file.prefix <- temp[[1]][1]
# Sample normalization
if (sample.norm.type == "rank") {
for (j in 1:Ns) { # column rank normalization
m[,j] <- rank(m[,j], ties.method = "average")
}
m <- 10000*m/Ng
} else if (sample.norm.type == "log.rank") {
for (j in 1:Ns) { # column rank normalization
m[,j] <- rank(m[,j], ties.method = "average")
}
m <- log(10000*m/Ng + exp(1))
} else if (sample.norm.type == "log") {
m[m < 1] <- 1
m <- log(m + exp(1))
}
# Read gene set databases
max.G <- 0
max.N <- 0
for (gsdb in gene.set.databases) {
GSDB <- Read.GeneSets.db(gsdb, thres.min = 2, thres.max = 2000, gene.names = NULL)
max.G <- max(max.G, max(GSDB$size.G))
max.N <- max.N + GSDB$N.gs
}
N.gs <- 0
gs <- matrix("null", nrow=max.N, ncol=max.G)
gs.names <- vector(length=max.N, mode="character")
gs.descs <- vector(length=max.N, mode="character")
size.G <- vector(length=max.N, mode="numeric")
start <- 1
for (gsdb in gene.set.databases) {
GSDB <- Read.GeneSets.db(gsdb, thres.min = 2, thres.max = 2000, gene.names = NULL)
N.gs <- GSDB$N.gs
gs.names[start:(start + N.gs - 1)] <- GSDB$gs.names
gs.descs[start:(start + N.gs - 1)] <- GSDB$gs.desc
size.G[start:(start + N.gs - 1)] <- GSDB$size.G
gs[start:(start + N.gs - 1), 1:max(GSDB$size.G)] <- GSDB$gs[1:N.gs, 1:max(GSDB$size.G)]
start <- start + N.gs
}
N.gs <- max.N
# Select desired gene sets
if (gene.set.selection[1] != "ALL") {
locs <- match(gene.set.selection, gs.names)
N.gs <- sum(!is.na(locs))
if(N.gs > 1) {
gs <- gs[locs,]
} else {
gs <- t(as.matrix(gs[locs,])) # Force vector to matrix if only one gene set specified
}
gs.names <- gs.names[locs]
gs.descs <- gs.descs[locs]
size.G <- size.G[locs]
}
# Loop over gene sets
score.matrix <- matrix(0, nrow=N.gs, ncol=Ns)
for (gs.i in 1:N.gs) {
#browser()
gene.set <- gs[gs.i, 1:size.G[gs.i]]
gene.overlap <- intersect(gene.set, gene.names)
print(paste(gs.i, "gene set:", gs.names[gs.i], " overlap=", length(gene.overlap)))
if (length(gene.overlap) < min.overlap) {
score.matrix[gs.i, ] <- rep(NA, Ns)
next
} else {
gene.set.locs <- match(gene.overlap, gene.set)
gene.names.locs <- match(gene.overlap, gene.names)
msig <- m[gene.names.locs,]
msig.names <- gene.names[gene.names.locs]
if (output.score.type == "ES") {
OPAM <- OPAM.Projection.RNAi(data.array = m, gene.names = gene.names, n.cols = Ns,
n.rows = Ng, weight = weight, statistic = statistic,
gene.set = gene.overlap, nperm = 1, correl.type = correl.type)
score.matrix[gs.i,] <- OPAM$ES.vector
} else if (output.score.type == "NES") {
OPAM <- OPAM.Projection.RNAi(data.array = m, gene.names = gene.names, n.cols = Ns,
n.rows = Ng, weight = weight, statistic = statistic,
gene.set = gene.overlap, nperm = nperm, correl.type = correl.type)
score.matrix[gs.i,] <- OPAM$NES.vector
}
}
}
locs <- !is.na(score.matrix[,1])
print(paste("N.gs before overlap prunning:", N.gs))
N.gs <- sum(locs)
print(paste("N.gs after overlap prunning:", N.gs))
score.matrix <- score.matrix[locs,]
gs.names <- gs.names[locs]
gs.descs <- gs.descs[locs]
initial.up.entries <- 0
final.up.entries <- 0
initial.dn.entries <- 0
final.dn.entries <- 0
combined.entries <- 0
other.entries <- 0
if (combine.mode == "combine.off") {
score.matrix.2 <- score.matrix
gs.names.2 <- gs.names
gs.descs.2 <- gs.descs
} else if ((combine.mode == "combine.replace") || (combine.mode == "combine.add")) {
score.matrix.2 <- NULL
gs.names.2 <- NULL
gs.descs.2 <- NULL
k <- 1
for (i in 1:N.gs) {
temp <- strsplit(gs.names[i], split="_")
body <- paste(temp[[1]][seq(1, length(temp[[1]]) -1)], collapse="_")
suffix <- tail(temp[[1]], 1)
print(paste("i:", i, "gene set:", gs.names[i], "body:", body, "suffix:", suffix))
if (suffix == "UP") { # This is an "UP" gene set
initial.up.entries <- initial.up.entries + 1
target <- paste(body, "DN", sep="_")
loc <- match(target, gs.names)
if (!is.na(loc)) { # found corresponding "DN" gene set: create combined entry
score <- score.matrix[i,] - score.matrix[loc,]
score.matrix.2 <- rbind(score.matrix.2, score)
gs.names.2 <- c(gs.names.2, body)
gs.descs.2 <- c(gs.descs.2, paste(gs.descs[i], "combined UP & DN"))
combined.entries <- combined.entries + 1
if (combine.mode == "combine.add") { # also add the "UP entry
score.matrix.2 <- rbind(score.matrix.2, score.matrix[i,])
gs.names.2 <- c(gs.names.2, gs.names[i])
gs.descs.2 <- c(gs.descs.2, gs.descs[i])
final.up.entries <- final.up.entries + 1
}
} else { # did not find corresponding "DN" gene set: create "UP" entry
score.matrix.2 <- rbind(score.matrix.2, score.matrix[i,])
gs.names.2 <- c(gs.names.2, gs.names[i])
gs.descs.2 <- c(gs.descs.2, gs.descs[i])
final.up.entries <- final.up.entries + 1
}
} else if (suffix == "DN") { # This is a "DN" gene set
initial.dn.entries <- initial.dn.entries + 1
target <- paste(body, "UP", sep="_")
loc <- match(target, gs.names)
if (is.na(loc)) { # did not find corresponding "UP" gene set: create "DN" entry
score.matrix.2 <- rbind(score.matrix.2, score.matrix[i,])
gs.names.2 <- c(gs.names.2, gs.names[i])
gs.descs.2 <- c(gs.descs.2, gs.descs[i])
final.dn.entries <- final.dn.entries + 1
} else { # it found corresponding "UP" gene set
if (combine.mode == "combine.add") { # create "DN" entry
score.matrix.2 <- rbind(score.matrix.2, score.matrix[i,])
gs.names.2 <- c(gs.names.2, gs.names[i])
gs.descs.2 <- c(gs.descs.2, gs.descs[i])
final.dn.entries <- final.dn.entries + 1
}
}
} else { # This is neither "UP nor "DN" gene set: create individual entry
score.matrix.2 <- rbind(score.matrix.2, score.matrix[i,])
gs.names.2 <- c(gs.names.2, gs.names[i])
gs.descs.2 <- c(gs.descs.2, gs.descs[i])
other.entries <- other.entries + 1
}
} # end for loop over gene sets
print(paste("initial.up.entries:", initial.up.entries))
print(paste("final.up.entries:", final.up.entries))
print(paste("initial.dn.entries:", initial.dn.entries))
print(paste("final.dn.entries:", final.dn.entries))
print(paste("other.entries:", other.entries))
print(paste("combined.entries:", combined.entries))
print(paste("total entries:", length(score.matrix.2[,1])))
}
V.GCT <- data.frame(score.matrix.2)
names(V.GCT) <- sample.names
row.names(V.GCT) <- gs.names.2
write.gct(gct.data.frame = V.GCT, descs = gs.descs.2, filename = output.ds)
} # end of OPAM.project.RNAi.dataset
OPAM.project.dataset.4 <- function(
input.ds,
output.ds,
gene.set.databases,
gene.set.selection = "ALL", # "ALL" or list with names of gene sets
sample.norm.type = "rank", # "rank", "log" or "log.rank"
weight = 0.25,
statistic = "area.under.RES",
output.score.type = "ES", # "ES" or "NES"
nperm = 200, # number of random permutations for NES case
combine.mode = "combine.off", # "combine.off" do not combine *_UP and *_DN versions in
# a single score. "combine.replace" combine *_UP and
# *_DN versions in a single score that replaces the individual
# *_UP and *_DN versions. "combine.add" combine *_UP and
# *_DN versions in a single score and add it but keeping
# the individual *_UP and *_DN versions.
min.overlap = 1,
correl.type = "rank") # "rank", "z.score", "symm.rank"
{ #----------------------------------------------------------------------------------------
# Load libraries
library(gtools)
library(verification)
library(RColorBrewer)
# Read input dataset
dataset <- MSIG.Gct2Frame(filename = input.ds) # Read gene expression dataset (GCT format)
m <- data.matrix(dataset$ds)
gene.names <- dataset$row.names
gene.descs <- dataset$descs
sample.names <- dataset$names
Ns <- length(m[1,])
Ng <- length(m[,1])
temp <- strsplit(input.ds, split="/") # Extract input file name
s <- length(temp[[1]])
input.file.name <- temp[[1]][s]
temp <- strsplit(input.file.name, split=".gct")
input.file.prefix <- temp[[1]][1]
# Sample normalization
if (sample.norm.type == "rank") {
for (j in 1:Ns) { # column rank normalization
m[,j] <- rank(m[,j], ties.method = "average")
}
m <- 10000*m/Ng
} else if (sample.norm.type == "log.rank") {
for (j in 1:Ns) { # column rank normalization
m[,j] <- rank(m[,j], ties.method = "average")
}
m <- log(10000*m/Ng + exp(1))
} else if (sample.norm.type == "log") {
m[m < 1] <- 1
m <- log(m + exp(1))
}
# Read gene set databases
max.G <- 0
max.N <- 0
for (gsdb in gene.set.databases) {
GSDB <- Read.GeneSets.db(gsdb, thres.min = 2, thres.max = 2000, gene.names = NULL)
max.G <- max(max.G, max(GSDB$size.G))
max.N <- max.N + GSDB$N.gs
}
N.gs <- 0
gs <- matrix("null", nrow=max.N, ncol=max.G)
gs.names <- vector(length=max.N, mode="character")
gs.descs <- vector(length=max.N, mode="character")
size.G <- vector(length=max.N, mode="numeric")
start <- 1
for (gsdb in gene.set.databases) {
GSDB <- Read.GeneSets.db(gsdb, thres.min = 2, thres.max = 2000, gene.names = NULL)
N.gs <- GSDB$N.gs
gs.names[start:(start + N.gs - 1)] <- GSDB$gs.names
gs.descs[start:(start + N.gs - 1)] <- GSDB$gs.desc
size.G[start:(start + N.gs - 1)] <- GSDB$size.G
gs[start:(start + N.gs - 1), 1:max(GSDB$size.G)] <- GSDB$gs[1:N.gs, 1:max(GSDB$size.G)]
start <- start + N.gs
}
N.gs <- max.N
# Select desired gene sets
if (gene.set.selection[1] != "ALL") {
locs <- match(gene.set.selection, gs.names)
N.gs <- sum(!is.na(locs))
if(N.gs > 1) {
gs <- gs[locs,]
} else {
gs <- t(as.matrix(gs[locs,])) # Force vector to matrix if only one gene set specified
}
gs.names <- gs.names[locs]
gs.descs <- gs.descs[locs]
size.G <- size.G[locs]
}
# Loop over gene sets
score.matrix <- matrix(0, nrow=N.gs, ncol=Ns)
for (gs.i in 1:N.gs) {
#browser()
gene.set <- gs[gs.i, 1:size.G[gs.i]]
gene.overlap <- intersect(gene.set, gene.names)
print(paste(gs.i, "gene set:", gs.names[gs.i], " overlap=", length(gene.overlap)))
if (length(gene.overlap) < min.overlap) {
score.matrix[gs.i, ] <- rep(NA, Ns)
next
} else {
gene.set.locs <- match(gene.overlap, gene.set)
gene.names.locs <- match(gene.overlap, gene.names)
msig <- m[gene.names.locs,]
msig.names <- gene.names[gene.names.locs]
if (output.score.type == "ES") {
OPAM <- OPAM.Projection.3(data.array = m, gene.names = gene.names, n.cols = Ns,
n.rows = Ng, weight = weight, statistic = statistic,
gene.set = gene.overlap, nperm = 1, correl.type = correl.type)
score.matrix[gs.i,] <- OPAM$ES.vector
} else if (output.score.type == "NES") {
OPAM <- OPAM.Projection.3(data.array = m, gene.names = gene.names, n.cols = Ns,
n.rows = Ng, weight = weight, statistic = statistic,
gene.set = gene.overlap, nperm = nperm, correl.type = correl.type)
score.matrix[gs.i,] <- OPAM$NES.vector
}
}
}
locs <- !is.na(score.matrix[,1])
print(paste("N.gs before overlap prunning:", N.gs))
N.gs <- sum(locs)
print(paste("N.gs after overlap prunning:", N.gs))
score.matrix <- score.matrix[locs,]
gs.names <- gs.names[locs]
gs.descs <- gs.descs[locs]
initial.up.entries <- 0
final.up.entries <- 0
initial.dn.entries <- 0
final.dn.entries <- 0
combined.entries <- 0
other.entries <- 0
if (combine.mode == "combine.off") {
score.matrix.2 <- score.matrix
gs.names.2 <- gs.names
gs.descs.2 <- gs.descs
} else if ((combine.mode == "combine.replace") || (combine.mode == "combine.add")) {
score.matrix.2 <- NULL
gs.names.2 <- NULL
gs.descs.2 <- NULL
k <- 1
for (i in 1:N.gs) {
temp <- strsplit(gs.names[i], split="_")
body <- paste(temp[[1]][seq(1, length(temp[[1]]) -1)], collapse="_")
suffix <- tail(temp[[1]], 1)
print(paste("i:", i, "gene set:", gs.names[i], "body:", body, "suffix:", suffix))
if (suffix == "UP") { # This is an "UP" gene set
initial.up.entries <- initial.up.entries + 1
target <- paste(body, "DN", sep="_")
loc <- match(target, gs.names)
if (!is.na(loc)) { # found corresponding "DN" gene set: create combined entry
score <- score.matrix[i,] - score.matrix[loc,]
score.matrix.2 <- rbind(score.matrix.2, score)
gs.names.2 <- c(gs.names.2, body)
gs.descs.2 <- c(gs.descs.2, paste(gs.descs[i], "combined UP & DN"))
combined.entries <- combined.entries + 1
if (combine.mode == "combine.add") { # also add the "UP entry
score.matrix.2 <- rbind(score.matrix.2, score.matrix[i,])
gs.names.2 <- c(gs.names.2, gs.names[i])
gs.descs.2 <- c(gs.descs.2, gs.descs[i])
final.up.entries <- final.up.entries + 1
}
} else { # did not find corresponding "DN" gene set: create "UP" entry
score.matrix.2 <- rbind(score.matrix.2, score.matrix[i,])
gs.names.2 <- c(gs.names.2, gs.names[i])
gs.descs.2 <- c(gs.descs.2, gs.descs[i])
final.up.entries <- final.up.entries + 1
}
} else if (suffix == "DN") { # This is a "DN" gene set
initial.dn.entries <- initial.dn.entries + 1
target <- paste(body, "UP", sep="_")
loc <- match(target, gs.names)
if (is.na(loc)) { # did not find corresponding "UP" gene set: create "DN" entry
score.matrix.2 <- rbind(score.matrix.2, score.matrix[i,])
gs.names.2 <- c(gs.names.2, gs.names[i])
gs.descs.2 <- c(gs.descs.2, gs.descs[i])
final.dn.entries <- final.dn.entries + 1
} else { # it found corresponding "UP" gene set
if (combine.mode == "combine.add") { # create "DN" entry
score.matrix.2 <- rbind(score.matrix.2, score.matrix[i,])
gs.names.2 <- c(gs.names.2, gs.names[i])
gs.descs.2 <- c(gs.descs.2, gs.descs[i])
final.dn.entries <- final.dn.entries + 1
}
}
} else { # This is neither "UP nor "DN" gene set: create individual entry
score.matrix.2 <- rbind(score.matrix.2, score.matrix[i,])
gs.names.2 <- c(gs.names.2, gs.names[i])
gs.descs.2 <- c(gs.descs.2, gs.descs[i])
other.entries <- other.entries + 1
}
} # end for loop over gene sets
print(paste("initial.up.entries:", initial.up.entries))
print(paste("final.up.entries:", final.up.entries))
print(paste("initial.dn.entries:", initial.dn.entries))
print(paste("final.dn.entries:", final.dn.entries))
print(paste("other.entries:", other.entries))
print(paste("combined.entries:", combined.entries))
print(paste("total entries:", length(score.matrix.2[,1])))
}
V.GCT <- data.frame(score.matrix.2)
names(V.GCT) <- sample.names
row.names(V.GCT) <- gs.names.2
write.gct(gct.data.frame = V.GCT, descs = gs.descs.2, filename = output.ds)
} # end of OPAM.project.dataset.4
OPAM.project.dataset.5 <- function(
input.ds,
output.ds,
gene.set.databases,
gene.set.selection = "ALL", # "ALL" or list with names of gene sets
sample.norm.type = "rank", # "rank", "log" or "log.rank"
weight = 0.25,
statistic = "area.under.RES",
output.score.type = "ES", # "ES" or "NES"
nperm = 200, # number of random permutations for NES case
combine.mode = "combine.off", # "combine.off" do not combine *_UP and *_DN versions in
# a single score. "combine.replace" combine *_UP and
# *_DN versions in a single score that replaces the individual
# *_UP and *_DN versions. "combine.add" combine *_UP and
# *_DN versions in a single score and add it but keeping
# the individual *_UP and *_DN versions.
min.overlap = 1,
correl.type = "rank") # "rank", "z.score", "symm.rank"
{ #----------------------------------------------------------------------------------------
# Load libraries
library(gtools)
library(verification)
library(RColorBrewer)
# Read input dataset
dataset <- MSIG.Gct2Frame(filename = input.ds) # Read gene expression dataset (GCT format)
m <- data.matrix(dataset$ds)
gene.names <- dataset$row.names
gene.descs <- dataset$descs
sample.names <- dataset$names
Ns <- length(m[1,])
Ng <- length(m[,1])
temp <- strsplit(input.ds, split="/") # Extract input file name
s <- length(temp[[1]])
input.file.name <- temp[[1]][s]
temp <- strsplit(input.file.name, split=".gct")
input.file.prefix <- temp[[1]][1]
# Sample normalization
if (sample.norm.type == "rank") {
for (j in 1:Ns) { # column rank normalization
m[,j] <- rank(m[,j], ties.method = "average")
}
m <- 10000*m/Ng
} else if (sample.norm.type == "log.rank") {
for (j in 1:Ns) { # column rank normalization
m[,j] <- rank(m[,j], ties.method = "average")
}
m <- log(10000*m/Ng + exp(1))
} else if (sample.norm.type == "log") {
m[m < 1] <- 1
m <- log(m + exp(1))
}
# Read gene set databases
max.G <- 0
max.N <- 0
for (gsdb in gene.set.databases) {
GSDB <- Read.GeneSets.db(gsdb, thres.min = 2, thres.max = 2000, gene.names = NULL)
max.G <- max(max.G, max(GSDB$size.G))
max.N <- max.N + GSDB$N.gs
}
N.gs <- 0
gs <- matrix("null", nrow=max.N, ncol=max.G)
gs.names <- vector(length=max.N, mode="character")
gs.descs <- vector(length=max.N, mode="character")
size.G <- vector(length=max.N, mode="numeric")
start <- 1
for (gsdb in gene.set.databases) {
GSDB <- Read.GeneSets.db(gsdb, thres.min = 2, thres.max = 2000, gene.names = NULL)
N.gs <- GSDB$N.gs
gs.names[start:(start + N.gs - 1)] <- GSDB$gs.names
gs.descs[start:(start + N.gs - 1)] <- GSDB$gs.desc
size.G[start:(start + N.gs - 1)] <- GSDB$size.G
gs[start:(start + N.gs - 1), 1:max(GSDB$size.G)] <- GSDB$gs[1:N.gs, 1:max(GSDB$size.G)]
start <- start + N.gs
}
N.gs <- max.N
# Select desired gene sets
if (gene.set.selection[1] != "ALL") {
locs <- match(gene.set.selection, gs.names)
print(rbind(gene.set.selection, locs))
N.gs <- sum(!is.na(locs))
if(N.gs > 1) {
gs <- gs[locs,]
} else {
gs <- t(as.matrix(gs[locs,])) # Force vector to matrix if only one gene set specified
}
gs.names <- gs.names[locs]
gs.descs <- gs.descs[locs]
size.G <- size.G[locs]
}
# Check for redundant gene sets
tab <- as.data.frame(table(gs.names))
ind <- order(tab[, "Freq"], decreasing=T)
tab <- tab[ind,]
print(tab[1:10,])
print(paste("Total gene sets:", length(gs.names)))
print(paste("Unique gene sets:", length(unique(gs.names))))
# Loop over gene sets
score.matrix <- matrix(0, nrow=N.gs, ncol=Ns)
for (gs.i in 1:N.gs) {
#browser()
gene.set <- gs[gs.i, 1:size.G[gs.i]]
gene.overlap <- intersect(gene.set, gene.names)
print(paste(gs.i, "gene set:", gs.names[gs.i], " overlap=", length(gene.overlap)))
if (length(gene.overlap) < min.overlap) {
score.matrix[gs.i, ] <- rep(NA, Ns)
next
} else {
gene.set.locs <- match(gene.overlap, gene.set)
gene.names.locs <- match(gene.overlap, gene.names)
msig <- m[gene.names.locs,]
msig.names <- gene.names[gene.names.locs]
if (output.score.type == "ES") {
OPAM <- OPAM.Projection.3(data.array = m, gene.names = gene.names, n.cols = Ns,
n.rows = Ng, weight = weight, statistic = statistic,
gene.set = gene.overlap, nperm = 1, correl.type = correl.type)
score.matrix[gs.i,] <- OPAM$ES.vector
} else if (output.score.type == "NES") {
OPAM <- OPAM.Projection.3(data.array = m, gene.names = gene.names, n.cols = Ns,
n.rows = Ng, weight = weight, statistic = statistic,
gene.set = gene.overlap, nperm = nperm, correl.type = correl.type)
score.matrix[gs.i,] <- OPAM$NES.vector
}
}
}
locs <- !is.na(score.matrix[,1])
print(paste("N.gs before overlap prunning:", N.gs))
N.gs <- sum(locs)
print(paste("N.gs after overlap prunning:", N.gs))
score.matrix <- score.matrix[locs,]
gs.names <- gs.names[locs]
gs.descs <- gs.descs[locs]
initial.up.entries <- 0
final.up.entries <- 0
initial.dn.entries <- 0
final.dn.entries <- 0
combined.entries <- 0
other.entries <- 0
if (combine.mode == "combine.off") {
score.matrix.2 <- score.matrix
gs.names.2 <- gs.names
gs.descs.2 <- gs.descs
} else if ((combine.mode == "combine.replace") || (combine.mode == "combine.add")) {
score.matrix.2 <- NULL
gs.names.2 <- NULL
gs.descs.2 <- NULL
k <- 1
for (i in 1:N.gs) {
temp <- strsplit(gs.names[i], split="_")
body <- paste(temp[[1]][seq(1, length(temp[[1]]) -1)], collapse="_")
suffix <- tail(temp[[1]], 1)
print(paste("i:", i, "gene set:", gs.names[i], "body:", body, "suffix:", suffix))
if (suffix == "UP") { # This is an "UP" gene set
initial.up.entries <- initial.up.entries + 1
target <- paste(body, "DN", sep="_")
loc <- match(target, gs.names)
if (!is.na(loc)) { # found corresponding "DN" gene set: create combined entry
score <- score.matrix[i,] - score.matrix[loc,]
score.matrix.2 <- rbind(score.matrix.2, score)
gs.names.2 <- c(gs.names.2, body)
gs.descs.2 <- c(gs.descs.2, paste(gs.descs[i], "combined UP & DN"))
combined.entries <- combined.entries + 1
if (combine.mode == "combine.add") { # also add the "UP entry
score.matrix.2 <- rbind(score.matrix.2, score.matrix[i,])
gs.names.2 <- c(gs.names.2, gs.names[i])
gs.descs.2 <- c(gs.descs.2, gs.descs[i])
final.up.entries <- final.up.entries + 1
}
} else { # did not find corresponding "DN" gene set: create "UP" entry
score.matrix.2 <- rbind(score.matrix.2, score.matrix[i,])
gs.names.2 <- c(gs.names.2, gs.names[i])
gs.descs.2 <- c(gs.descs.2, gs.descs[i])
final.up.entries <- final.up.entries + 1
}
} else if (suffix == "DN") { # This is a "DN" gene set
initial.dn.entries <- initial.dn.entries + 1
target <- paste(body, "UP", sep="_")
loc <- match(target, gs.names)
if (is.na(loc)) { # did not find corresponding "UP" gene set: create "DN" entry
score.matrix.2 <- rbind(score.matrix.2, score.matrix[i,])
gs.names.2 <- c(gs.names.2, gs.names[i])
gs.descs.2 <- c(gs.descs.2, gs.descs[i])
final.dn.entries <- final.dn.entries + 1
} else { # it found corresponding "UP" gene set
if (combine.mode == "combine.add") { # create "DN" entry
score.matrix.2 <- rbind(score.matrix.2, score.matrix[i,])
gs.names.2 <- c(gs.names.2, gs.names[i])
gs.descs.2 <- c(gs.descs.2, gs.descs[i])
final.dn.entries <- final.dn.entries + 1
}
}
} else { # This is neither "UP nor "DN" gene set: create individual entry
score.matrix.2 <- rbind(score.matrix.2, score.matrix[i,])
gs.names.2 <- c(gs.names.2, gs.names[i])
gs.descs.2 <- c(gs.descs.2, gs.descs[i])
other.entries <- other.entries + 1
}
} # end for loop over gene sets
print(paste("initial.up.entries:", initial.up.entries))
print(paste("final.up.entries:", final.up.entries))
print(paste("initial.dn.entries:", initial.dn.entries))
print(paste("final.dn.entries:", final.dn.entries))
print(paste("other.entries:", other.entries))
print(paste("combined.entries:", combined.entries))
print(paste("total entries:", length(score.matrix.2[,1])))
}
# Check for redundant gene sets
tab <- as.data.frame(table(gs.names.2))
ind <- order(tab[, "Freq"], decreasing=T)
tab <- tab[ind,]
print(tab[1:20,])
print(paste("Total gene sets:", length(gs.names.2)))
print(paste("Unique gene sets:", length(unique(gs.names.2))))
V.GCT <- data.frame(score.matrix.2)
names(V.GCT) <- sample.names
row.names(V.GCT) <- gs.names.2
write.gct(gct.data.frame = V.GCT, descs = gs.descs.2, filename = output.ds)
} # end of OPAM.project.dataset.5
OPAM.Projection.2 <- function(
data.array,
gene.names,
n.cols,
n.rows,
weight = 0,
statistic = "Kolmogorov-Smirnov", # "Kolmogorov-Smirnov", # "Kolmogorov-Smirnov", "Cramer-von-Mises",
# "Anderson-Darling", "Zhang_A", "Zhang_C", "Zhang_K",
# "area.under.RES", or "Wilcoxon"
gene.set,
nperm = 200,
correl.type = "rank") # "rank", "z.score", "symm.rank"
{
ES.vector <- vector(length=n.cols)
NES.vector <- vector(length=n.cols)
p.val.vector <- vector(length=n.cols)
correl.vector <- vector(length=n.rows, mode="numeric")
# Compute ES score for signatures in each sample
# print("Computing GSEA.....")
phi <- array(0, c(n.cols, nperm))
for (sample.index in 1:n.cols) {
gene.list <- order(data.array[, sample.index], decreasing=T)
# print(paste("Computing observed enrichment for UP signature in sample:", sample.index, sep=" "))
gene.set2 <- match(gene.set, gene.names)
if (weight == 0) {
correl.vector <- rep(1, n.rows)
} else if (weight > 0) {
if (correl.type == "rank") {
correl.vector <- data.array[gene.list, sample.index]
} else if (correl.type == "symm.rank") {
correl.vector <- data.array[gene.list, sample.index]
correl.vector <- ifelse(correl.vector > correl.vector[ceiling(n.rows/2)],
correl.vector,
correl.vector + correl.vector - correl.vector[ceiling(n.rows/2)])
} else if (correl.type == "z.score") {
x <- data.array[gene.list, sample.index]
correl.vector <- (x - mean(x))/sd(x)
}
}
GSEA.results <- GSEA.EnrichmentScore5(gene.list=gene.list, gene.set=gene.set2,
statistic = statistic, alpha = weight, correl.vector = correl.vector)
ES.vector[sample.index] <- GSEA.results$ES
if (nperm == 0) {
NES.vector[sample.index] <- ES.vector[sample.index]
p.val.vector[sample.index] <- 1
} else {
for (r in 1:nperm) {
reshuffled.gene.labels <- sample(1:n.rows)
if (weight == 0) {
correl.vector <- rep(1, n.rows)
} else if (weight > 0) {
correl.vector <- data.array[reshuffled.gene.labels, sample.index]
}
GSEA.results <- GSEA.EnrichmentScore5(gene.list=reshuffled.gene.labels, gene.set=gene.set2,
statistic = statistic, alpha = weight, correl.vector = correl.vector)
phi[sample.index, r] <- GSEA.results$ES
}
if (ES.vector[sample.index] >= 0) {
pos.phi <- phi[sample.index, phi[sample.index, ] >= 0]
if (length(pos.phi) == 0) pos.phi <- 0.5
pos.m <- mean(pos.phi)
NES.vector[sample.index] <- ES.vector[sample.index]/pos.m
s <- sum(pos.phi >= ES.vector[sample.index])/length(pos.phi)
p.val.vector[sample.index] <- ifelse(s == 0, 1/nperm, s)
} else {
neg.phi <- phi[sample.index, phi[sample.index, ] < 0]
if (length(neg.phi) == 0) neg.phi <- 0.5
neg.m <- mean(neg.phi)
NES.vector[sample.index] <- ES.vector[sample.index]/abs(neg.m)
s <- sum(neg.phi <= ES.vector[sample.index])/length(neg.phi)
p.val.vector[sample.index] <- ifelse(s == 0, 1/nperm, s)
}
}
}
return(list(ES.vector = ES.vector, NES.vector = NES.vector, p.val.vector = p.val.vector))
} # end of OPAM.Projection.2
OPAM.Projection.3 <- function(
data.array,
gene.names,
n.cols,
n.rows,
weight = 0,
statistic = "Kolmogorov-Smirnov", # "Kolmogorov-Smirnov", # "Kolmogorov-Smirnov", "Cramer-von-Mises",
# "Anderson-Darling", "Zhang_A", "Zhang_C", "Zhang_K",
# "area.under.RES", or "Wilcoxon"
gene.set,
nperm = 200,
correl.type = "rank") # "rank", "z.score", "symm.rank"
# Runs a 2-3x faster (2-2.5x for ES statistic and 2.5-3x faster for area.under.ES statsitic)
# version of GSEA.EnrichmentScore.5 internally that avoids overhead from the function call.
{
ES.vector <- vector(length=n.cols)
NES.vector <- vector(length=n.cols)
p.val.vector <- vector(length=n.cols)
correl.vector <- vector(length=n.rows, mode="numeric")
# Compute ES score for signatures in each sample
# print("Computing GSEA.....")
phi <- array(0, c(n.cols, nperm))
for (sample.index in 1:n.cols) {
gene.list <- order(data.array[, sample.index], decreasing=T)
# print(paste("Computing observed enrichment for UP signature in sample:", sample.index, sep=" "))
gene.set2 <- match(gene.set, gene.names)
if (weight == 0) {
correl.vector <- rep(1, n.rows)
} else if (weight > 0) {
if (correl.type == "rank") {
correl.vector <- data.array[gene.list, sample.index]
} else if (correl.type == "symm.rank") {
correl.vector <- data.array[gene.list, sample.index]
correl.vector <- ifelse(correl.vector > correl.vector[ceiling(n.rows/2)],
correl.vector,
correl.vector + correl.vector - correl.vector[ceiling(n.rows/2)])
} else if (correl.type == "z.score") {
x <- data.array[gene.list, sample.index]
correl.vector <- (x - mean(x))/sd(x)
}
}
### Olga's Additions ###
# ptm.new = proc.time()
tag.indicator <- sign(match(gene.list, gene.set2, nomatch=0)) # notice that the sign is 0 (no tag) or 1 (tag)
no.tag.indicator <- 1 - tag.indicator
N <- length(gene.list)
Nh <- length(gene.set2)
Nm <- N - Nh
orig.correl.vector <- correl.vector
if (weight == 0) correl.vector <- rep(1, N) # unweighted case
ind = which(tag.indicator==1)
correl.vector <- abs(correl.vector[ind])^weight
sum.correl = sum(correl.vector)
up = correl.vector/sum.correl # "up" represents the peaks in the mountain plot
gaps = (c(ind-1, N) - c(0, ind)) # gaps between ranked pathway genes
down = gaps/Nm
RES = cumsum(c(up,up[Nh])-down)
valleys = RES[1:Nh]-up
max.ES = max(RES)
min.ES = min(valleys)
if( statistic == "Kolmogorov-Smirnov" ){
if( max.ES > -min.ES ){
ES <- signif(max.ES, digits=5)
arg.ES <- which.max(RES)
} else{
ES <- signif(min.ES, digits=5)
arg.ES <- which.min(RES)
}
}
if( statistic == "area.under.RES"){
if( max.ES > -min.ES ){
arg.ES <- which.max(RES)
} else{
arg.ES <- which.min(RES)
}
gaps = gaps+1
RES = c(valleys,0) * (gaps) + 0.5*( c(0,RES[1:Nh]) - c(valleys,0) ) * (gaps)
ES = sum(RES)
}
GSEA.results = list(ES = ES, arg.ES = arg.ES, RES = RES, indicator = tag.indicator)
# new.time <<- new.time + (proc.time() - ptm.new)
### End Olga's Additions ###
#GSEA.results <- GSEA.EnrichmentScore5(gene.list=gene.list, gene.set=gene.set2,
# statistic = statistic, alpha = weight, correl.vector = correl.vector)
ES.vector[sample.index] <- GSEA.results$ES
if (nperm == 0) {
NES.vector[sample.index] <- ES.vector[sample.index]
p.val.vector[sample.index] <- 1
} else {
for (r in 1:nperm) {
reshuffled.gene.labels <- sample(1:n.rows)
if (weight == 0) {
correl.vector <- rep(1, n.rows)
} else if (weight > 0) {
correl.vector <- data.array[reshuffled.gene.labels, sample.index]
}
# GSEA.results <- GSEA.EnrichmentScore5(gene.list=reshuffled.gene.labels, gene.set=gene.set2,
# statistic = statistic, alpha = weight, correl.vector = correl.vector)
### Olga's Additions ###
tag.indicator <- sign(match(reshuffled.gene.labels, gene.set2, nomatch=0)) # notice that the sign is 0 (no tag) or 1 (tag)
no.tag.indicator <- 1 - tag.indicator
N <- length(reshuffled.gene.labels)
Nh <- length(gene.set2)
Nm <- N - Nh
# orig.correl.vector <- correl.vector
if (weight == 0) correl.vector <- rep(1, N) # unweighted case
ind <- which(tag.indicator==1)
correl.vector <- abs(correl.vector[ind])^weight
sum.correl <- sum(correl.vector)
up = correl.vector/sum.correl
gaps = (c(ind-1, N) - c(0, ind))
down = gaps/Nm
RES = cumsum(c(up,up[Nh])-down)
valleys = RES[1:Nh]-up
max.ES = max(RES)
min.ES = min(valleys)
if( statistic == "Kolmogorov-Smirnov" ){
if( max.ES > -min.ES ){
ES <- signif(max.ES, digits=5)
arg.ES <- which.max(RES)
} else{
ES <- signif(min.ES, digits=5)
arg.ES <- which.min(RES)
}
}
if( statistic == "area.under.RES"){
if( max.ES > -min.ES ){
arg.ES <- which.max(RES)
} else{
arg.ES <- which.min(RES)
}
gaps = gaps+1
RES = c(valleys,0) * (gaps) + 0.5*( c(0,RES[1:Nh]) - c(valleys,0) ) * (gaps)
ES = sum(RES)
}
GSEA.results = list(ES = ES, arg.ES = arg.ES, RES = RES, indicator = tag.indicator)
### End Olga's Additions ###
phi[sample.index, r] <- GSEA.results$ES
}
if (ES.vector[sample.index] >= 0) {
pos.phi <- phi[sample.index, phi[sample.index, ] >= 0]
if (length(pos.phi) == 0) pos.phi <- 0.5
pos.m <- mean(pos.phi)
NES.vector[sample.index] <- ES.vector[sample.index]/pos.m
s <- sum(pos.phi >= ES.vector[sample.index])/length(pos.phi)
p.val.vector[sample.index] <- ifelse(s == 0, 1/nperm, s)
} else {
neg.phi <- phi[sample.index, phi[sample.index, ] < 0]
if (length(neg.phi) == 0) neg.phi <- 0.5
neg.m <- mean(neg.phi)
NES.vector[sample.index] <- ES.vector[sample.index]/abs(neg.m)
s <- sum(neg.phi <= ES.vector[sample.index])/length(neg.phi)
p.val.vector[sample.index] <- ifelse(s == 0, 1/nperm, s)
}
}
}
return(list(ES.vector = ES.vector, NES.vector = NES.vector, p.val.vector = p.val.vector))
} # end of OPAM.Projection.3
OPAM.Projection.RNAi <- function(
data.array,
gene.names,
n.cols,
n.rows,
weight = 0,
statistic = "Kolmogorov-Smirnov", # "Kolmogorov-Smirnov", # "Kolmogorov-Smirnov", "Cramer-von-Mises",
# "Anderson-Darling", "Zhang_A", "Zhang_C", "Zhang_K",
# "area.under.RES", or "Wilcoxon"
gene.set,
nperm = 200,
correl.type = "rank") # "rank", "z.score", "symm.rank"
# Runs a 2-3x faster (2-2.5x for ES statistic and 2.5-3x faster for area.under.ES statsitic)
# version of GSEA.EnrichmentScore.5 internally that avoids overhead from the function call.
{
ES.vector <- vector(length=n.cols)
NES.vector <- vector(length=n.cols)
p.val.vector <- vector(length=n.cols)
correl.vector <- vector(length=n.rows, mode="numeric")
# Compute ES score for signatures in each sample
# print("Computing GSEA.....")
phi <- array(0, c(n.cols, nperm))
for (sample.index in 1:n.cols) {
gene.list <- order(data.array[, sample.index], decreasing=T)
# print(paste("Computing observed enrichment for UP signature in sample:", sample.index, sep=" "))
# gene.set2 <- match(gene.set, gene.names)
gene.set2 <- seq(1:length(gene.names))[!is.na(match(gene.names, gene.set))]
if (weight == 0) {
correl.vector <- rep(1, n.rows)
} else if (weight > 0) {
if (correl.type == "rank") {
correl.vector <- data.array[gene.list, sample.index]
} else if (correl.type == "symm.rank") {
correl.vector <- data.array[gene.list, sample.index]
correl.vector <- ifelse(correl.vector > correl.vector[ceiling(n.rows/2)],
correl.vector,
correl.vector + correl.vector - correl.vector[ceiling(n.rows/2)])
} else if (correl.type == "z.score") {
x <- data.array[gene.list, sample.index]
correl.vector <- (x - mean(x))/sd(x)
}
}
### Olga's Additions ###
# ptm.new = proc.time()
tag.indicator <- sign(match(gene.list, gene.set2, nomatch=0)) # notice that the sign is 0 (no tag) or 1 (tag)
no.tag.indicator <- 1 - tag.indicator
N <- length(gene.list)
Nh <- length(gene.set2)
Nm <- N - Nh
orig.correl.vector <- correl.vector
if (weight == 0) correl.vector <- rep(1, N) # unweighted case
ind = which(tag.indicator==1)
correl.vector <- abs(correl.vector[ind])^weight
sum.correl = sum(correl.vector)
up = correl.vector/sum.correl # "up" represents the peaks in the mountain plot
gaps = (c(ind-1, N) - c(0, ind)) # gaps between ranked pathway genes
down = gaps/Nm
RES = cumsum(c(up,up[Nh])-down)
valleys = RES[1:Nh]-up
max.ES = max(RES)
min.ES = min(valleys)
if( statistic == "Kolmogorov-Smirnov" ){
if( max.ES > -min.ES ){
ES <- signif(max.ES, digits=5)
arg.ES <- which.max(RES)
} else{
ES <- signif(min.ES, digits=5)
arg.ES <- which.min(RES)
}
}
if( statistic == "area.under.RES"){
if( max.ES > -min.ES ){
arg.ES <- which.max(RES)
} else{
arg.ES <- which.min(RES)
}
gaps = gaps+1
RES = c(valleys,0) * (gaps) + 0.5*( c(0,RES[1:Nh]) - c(valleys,0) ) * (gaps)
ES = sum(RES)
}
GSEA.results = list(ES = ES, arg.ES = arg.ES, RES = RES, indicator = tag.indicator)
# new.time <<- new.time + (proc.time() - ptm.new)
### End Olga's Additions ###
#GSEA.results <- GSEA.EnrichmentScore5(gene.list=gene.list, gene.set=gene.set2,
# statistic = statistic, alpha = weight, correl.vector = correl.vector)
ES.vector[sample.index] <- GSEA.results$ES
if (nperm == 0) {
NES.vector[sample.index] <- ES.vector[sample.index]
p.val.vector[sample.index] <- 1
} else {
for (r in 1:nperm) {
reshuffled.gene.labels <- sample(1:n.rows)
if (weight == 0) {
correl.vector <- rep(1, n.rows)
} else if (weight > 0) {
correl.vector <- data.array[reshuffled.gene.labels, sample.index]
}
# GSEA.results <- GSEA.EnrichmentScore5(gene.list=reshuffled.gene.labels, gene.set=gene.set2,
# statistic = statistic, alpha = weight, correl.vector = correl.vector)
### Olga's Additions ###
tag.indicator <- sign(match(reshuffled.gene.labels, gene.set2, nomatch=0)) # notice that the sign is 0 (no tag) or 1 (tag)
no.tag.indicator <- 1 - tag.indicator
N <- length(reshuffled.gene.labels)
Nh <- length(gene.set2)
Nm <- N - Nh
# orig.correl.vector <- correl.vector
if (weight == 0) correl.vector <- rep(1, N) # unweighted case
ind <- which(tag.indicator==1)
correl.vector <- abs(correl.vector[ind])^weight
sum.correl <- sum(correl.vector)
up = correl.vector/sum.correl
gaps = (c(ind-1, N) - c(0, ind))
down = gaps/Nm
RES = cumsum(c(up,up[Nh])-down)
valleys = RES[1:Nh]-up
max.ES = max(RES)
min.ES = min(valleys)
if( statistic == "Kolmogorov-Smirnov" ){
if( max.ES > -min.ES ){
ES <- signif(max.ES, digits=5)
arg.ES <- which.max(RES)
} else{
ES <- signif(min.ES, digits=5)
arg.ES <- which.min(RES)
}
}
if( statistic == "area.under.RES"){
if( max.ES > -min.ES ){
arg.ES <- which.max(RES)
} else{
arg.ES <- which.min(RES)
}
gaps = gaps+1
RES = c(valleys,0) * (gaps) + 0.5*( c(0,RES[1:Nh]) - c(valleys,0) ) * (gaps)
ES = sum(RES)
}
GSEA.results = list(ES = ES, arg.ES = arg.ES, RES = RES, indicator = tag.indicator)
### End Olga's Additions ###
phi[sample.index, r] <- GSEA.results$ES
}
if (ES.vector[sample.index] >= 0) {
pos.phi <- phi[sample.index, phi[sample.index, ] >= 0]
if (length(pos.phi) == 0) pos.phi <- 0.5
pos.m <- mean(pos.phi)
NES.vector[sample.index] <- ES.vector[sample.index]/pos.m
s <- sum(pos.phi >= ES.vector[sample.index])/length(pos.phi)
p.val.vector[sample.index] <- ifelse(s == 0, 1/nperm, s)
} else {
neg.phi <- phi[sample.index, phi[sample.index, ] < 0]
if (length(neg.phi) == 0) neg.phi <- 0.5
neg.m <- mean(neg.phi)
NES.vector[sample.index] <- ES.vector[sample.index]/abs(neg.m)
s <- sum(neg.phi <= ES.vector[sample.index])/length(neg.phi)
p.val.vector[sample.index] <- ifelse(s == 0, 1/nperm, s)
}
}
}
return(list(ES.vector = ES.vector, NES.vector = NES.vector, p.val.vector = p.val.vector))
} # end of OPAM.Projection.3
OPAM.project.dataset.2 <- function(
input.ds,
output.ds,
gene.set.databases,
gene.set.selection = "ALL", # "ALL" or list with names of gene sets
sample.norm.type = "rank", # "rank", "log" or "log.rank"
weight = 0.25,
statistic = "area.under.RES",
output.score.type = "ES", # "ES" or "NES"
nperm = 200, # number of random permutations for NES case
combine.mode = "combine.off", # "combine.off" do not combine *_UP and *_DN versions in
# a single score. "combine.replace" combine *_UP and
# *_DN versions in a single score that replaces the individual
# *_UP and *_DN versions. "combine.add" combine *_UP and
# *_DN versions in a single score and add it but keeping
# the individual *_UP and *_DN versions.
correl.type = "rank") # "rank", "z.score", "symm.rank"
{ #----------------------------------------------------------------------------------------
# Load libraries
library(gtools)
library(verification)
library(RColorBrewer)
# Read input dataset
dataset <- MSIG.Gct2Frame(filename = input.ds) # Read gene expression dataset (GCT format)
m <- data.matrix(dataset$ds)
gene.names <- dataset$row.names
gene.descs <- dataset$descs
sample.names <- dataset$names
gene.descs <- dataset$descs
Ns <- length(m[1,])
Ng <- length(m[,1])
temp <- strsplit(input.ds, split="/") # Extract input file name
s <- length(temp[[1]])
input.file.name <- temp[[1]][s]
temp <- strsplit(input.file.name, split=".gct")
input.file.prefix <- temp[[1]][1]
# Sample normalization
if (sample.norm.type == "rank") {
for (j in 1:Ns) { # column rank normalization
m[,j] <- rank(m[,j], ties.method = "average")
}
m <- 10000*m/Ng
} else if (sample.norm.type == "log.rank") {
for (j in 1:Ns) { # column rank normalization
m[,j] <- rank(m[,j], ties.method = "average")
}
m <- log(10000*m/Ng + exp(1))
} else if (sample.norm.type == "log") {
m[m < 1] <- 1
m <- log(m + exp(1))
}
# Read gene set databases
max.G <- 0
max.N <- 0
for (gsdb in gene.set.databases) {
GSDB <- Read.GeneSets.db(gsdb, thres.min = 2, thres.max = 2000, gene.names = NULL)
max.G <- max(max.G, max(GSDB$size.G))
max.N <- max.N + GSDB$N.gs
}
N.gs <- 0
gs <- matrix("null", nrow=max.N, ncol=max.G)
gs.names <- vector(length=max.N, mode="character")
gs.descs <- vector(length=max.N, mode="character")
size.G <- vector(length=max.N, mode="numeric")
start <- 1
for (gsdb in gene.set.databases) {
GSDB <- Read.GeneSets.db(gsdb, thres.min = 2, thres.max = 2000, gene.names = NULL)
N.gs <- GSDB$N.gs
gs.names[start:(start + N.gs - 1)] <- GSDB$gs.names
gs.descs[start:(start + N.gs - 1)] <- GSDB$gs.desc
size.G[start:(start + N.gs - 1)] <- GSDB$size.G
gs[start:(start + N.gs - 1), 1:max(GSDB$size.G)] <- GSDB$gs[1:N.gs, 1:max(GSDB$size.G)]
start <- start + N.gs
}
N.gs <- max.N
# Select desired gene sets
if (gene.set.selection[1] != "ALL") {
locs <- match(gene.set.selection, gs.names)
N.gs <- sum(!is.na(locs))
gs <- gs[locs,]
gs.names <- gs.names[locs]
gs.descs <- gs.descs[locs]
size.G <- size.G[locs]
}
# Loop over gene sets
score.matrix <- matrix(0, nrow=N.gs, ncol=Ns)
for (gs.i in 1:N.gs) {
gene.set <- gs[gs.i, 1:size.G[gs.i]]
gene.overlap <- intersect(gene.set, gene.names)
print(paste(gs.i, "gene set:", gs.names[gs.i], " overlap=", length(gene.overlap)))
if (length(gene.overlap) == 0) {
score.matrix[gs.i, ] <- runif(Ns, min=1E-06, max=1.1E-06)
next
} else {
gene.set.locs <- match(gene.overlap, gene.set)
gene.names.locs <- match(gene.overlap, gene.names)
msig <- m[gene.names.locs,]
msig.names <- gene.names[gene.names.locs]
if (output.score.type == "ES") {
OPAM <- OPAM.Projection.2(data.array = m, gene.names = gene.names, n.cols = Ns,
n.rows = Ng, weight = weight, statistic = statistic,
gene.set = gene.overlap, nperm = 1, correl.type = correl.type)
score.matrix[gs.i,] <- OPAM$ES.vector
} else if (output.score.type == "NES") {
OPAM <- OPAM.Projection.2(data.array = m, gene.names = gene.names, n.cols = Ns,
n.rows = Ng, weight = weight, statistic = statistic,
gene.set = gene.overlap, nperm = nperm, correl.type = correl.type)
score.matrix[gs.i,] <- OPAM$NES.vector
}
}
}
initial.up.entries <- 0
final.up.entries <- 0
initial.dn.entries <- 0
final.dn.entries <- 0
combined.entries <- 0
other.entries <- 0
if (combine.mode == "combine.off") {
score.matrix.2 <- score.matrix
gs.names.2 <- gs.names
gs.descs.2 <- gs.descs
} else if ((combine.mode == "combine.replace") || (combine.mode == "combine.add")) {
score.matrix.2 <- NULL
gs.names.2 <- NULL
gs.descs.2 <- NULL
k <- 1
for (i in 1:N.gs) {
temp <- strsplit(gs.names[i], split="_")
body <- paste(temp[[1]][seq(1, length(temp[[1]]) -1)], collapse="_")
suffix <- tail(temp[[1]], 1)
print(paste("i:", i, "gene set:", gs.names[i], "body:", body, "suffix:", suffix))
if (suffix == "UP") { # This is an "UP" gene set
initial.up.entries <- initial.up.entries + 1
target <- paste(body, "DN", sep="_")
loc <- match(target, gs.names)
if (!is.na(loc)) { # found corresponding "DN" gene set: create combined entry
score <- score.matrix[i,] - score.matrix[loc,]
score.matrix.2 <- rbind(score.matrix.2, score)
gs.names.2 <- c(gs.names.2, body)
gs.descs.2 <- c(gs.descs.2, paste(gs.descs[i], "combined UP & DN"))
combined.entries <- combined.entries + 1
if (combine.mode == "combine.add") { # also add the "UP entry
score.matrix.2 <- rbind(score.matrix.2, score.matrix[i,])
gs.names.2 <- c(gs.names.2, gs.names[i])
gs.descs.2 <- c(gs.descs.2, gs.descs[i])
final.up.entries <- final.up.entries + 1
}
} else { # did not find corresponding "DN" gene set: create "UP" entry
score.matrix.2 <- rbind(score.matrix.2, score.matrix[i,])
gs.names.2 <- c(gs.names.2, gs.names[i])
gs.descs.2 <- c(gs.descs.2, gs.descs[i])
final.up.entries <- final.up.entries + 1
}
} else if (suffix == "DN") { # This is a "DN" gene set
initial.dn.entries <- initial.dn.entries + 1
target <- paste(body, "UP", sep="_")
loc <- match(target, gs.names)
if (is.na(loc)) { # did not find corresponding "UP" gene set: create "DN" entry
score.matrix.2 <- rbind(score.matrix.2, score.matrix[i,])
gs.names.2 <- c(gs.names.2, gs.names[i])
gs.descs.2 <- c(gs.descs.2, gs.descs[i])
final.dn.entries <- final.dn.entries + 1
} else { # it found corresponding "UP" gene set
if (combine.mode == "combine.add") { # create "DN" entry
score.matrix.2 <- rbind(score.matrix.2, score.matrix[i,])
gs.names.2 <- c(gs.names.2, gs.names[i])
gs.descs.2 <- c(gs.descs.2, gs.descs[i])
final.dn.entries <- final.dn.entries + 1
}
}
} else { # This is neither "UP nor "DN" gene set: create individual entry
score.matrix.2 <- rbind(score.matrix.2, score.matrix[i,])
gs.names.2 <- c(gs.names.2, gs.names[i])
gs.descs.2 <- c(gs.descs.2, gs.descs[i])
other.entries <- other.entries + 1
}
} # end for loop over gene sets
print(paste("initial.up.entries:", initial.up.entries))
print(paste("final.up.entries:", final.up.entries))
print(paste("initial.dn.entries:", initial.dn.entries))
print(paste("final.dn.entries:", final.dn.entries))
print(paste("other.entries:", other.entries))
print(paste("combined.entries:", combined.entries))
print(paste("total entries:", length(score.matrix.2[,1])))
}
V.GCT <- data.frame(score.matrix.2)
names(V.GCT) <- sample.names
row.names(V.GCT) <- gs.names.2
write.gct(gct.data.frame = V.GCT, descs = gs.descs.2, filename = output.ds)
} # end of OPAM.project.dataset.2
OPAM.project.dataset.3 <- function(
input.ds,
output.ds,
gene.set.databases,
gene.set.selection = "ALL", # "ALL" or list with names of gene sets
sample.norm.type = "rank", # "rank", "log" or "log.rank"
weight = 0.25,
statistic = "area.under.RES",
output.score.type = "ES", # "ES" or "NES"
nperm = 200, # number of random permutations for NES case
combine.mode = "combine.off", # "combine.off" do not combine *_UP and *_DN versions in
# a single score. "combine.replace" combine *_UP and
# *_DN versions in a single score that replaces the individual
# *_UP and *_DN versions. "combine.add" combine *_UP and
# *_DN versions in a single score and add it but keeping
# the individual *_UP and *_DN versions.
correl.type = "rank") # "rank", "z.score", "symm.rank"
{ #----------------------------------------------------------------------------------------
# Load libraries
library(gtools)
library(verification)
library(RColorBrewer)
# Read input dataset
dataset <- MSIG.Gct2Frame(filename = input.ds) # Read gene expression dataset (GCT format)
m <- data.matrix(dataset$ds)
gene.names <- dataset$row.names
gene.descs <- dataset$descs
sample.names <- dataset$names
Ns <- length(m[1,])
Ng <- length(m[,1])
temp <- strsplit(input.ds, split="/") # Extract input file name
s <- length(temp[[1]])
input.file.name <- temp[[1]][s]
temp <- strsplit(input.file.name, split=".gct")
input.file.prefix <- temp[[1]][1]
# Sample normalization
if (sample.norm.type == "rank") {
for (j in 1:Ns) { # column rank normalization
m[,j] <- rank(m[,j], ties.method = "average")
}
m <- 10000*m/Ng
} else if (sample.norm.type == "log.rank") {
for (j in 1:Ns) { # column rank normalization
m[,j] <- rank(m[,j], ties.method = "average")
}
m <- log(10000*m/Ng + exp(1))
} else if (sample.norm.type == "log") {
m[m < 1] <- 1
m <- log(m + exp(1))
}
# Read gene set databases
max.G <- 0
max.N <- 0
for (gsdb in gene.set.databases) {
GSDB <- Read.GeneSets.db(gsdb, thres.min = 2, thres.max = 2000, gene.names = NULL)
max.G <- max(max.G, max(GSDB$size.G))
max.N <- max.N + GSDB$N.gs
}
N.gs <- 0
gs <- matrix("null", nrow=max.N, ncol=max.G)
gs.names <- vector(length=max.N, mode="character")
gs.descs <- vector(length=max.N, mode="character")
size.G <- vector(length=max.N, mode="numeric")
start <- 1
for (gsdb in gene.set.databases) {
GSDB <- Read.GeneSets.db(gsdb, thres.min = 2, thres.max = 2000, gene.names = NULL)
N.gs <- GSDB$N.gs
gs.names[start:(start + N.gs - 1)] <- GSDB$gs.names
gs.descs[start:(start + N.gs - 1)] <- GSDB$gs.desc
size.G[start:(start + N.gs - 1)] <- GSDB$size.G
gs[start:(start + N.gs - 1), 1:max(GSDB$size.G)] <- GSDB$gs[1:N.gs, 1:max(GSDB$size.G)]
start <- start + N.gs
}
N.gs <- max.N
# Select desired gene sets
if (gene.set.selection[1] != "ALL") {
locs <- match(gene.set.selection, gs.names)
N.gs <- sum(!is.na(locs))
if(N.gs > 1) {
gs <- gs[locs,]
} else {
gs <- t(as.matrix(gs[locs,])) # Force vector to matrix if only one gene set specified
}
gs.names <- gs.names[locs]
gs.descs <- gs.descs[locs]
size.G <- size.G[locs]
}
# Loop over gene sets
score.matrix <- matrix(0, nrow=N.gs, ncol=Ns)
for (gs.i in 1:N.gs) {
#browser()
gene.set <- gs[gs.i, 1:size.G[gs.i]]
gene.overlap <- intersect(gene.set, gene.names)
print(paste(gs.i, "gene set:", gs.names[gs.i], " overlap=", length(gene.overlap)))
if (length(gene.overlap) == 0) {
score.matrix[gs.i, ] <- runif(Ns, min=1E-06, max=1.1E-06)
next
} else {
gene.set.locs <- match(gene.overlap, gene.set)
gene.names.locs <- match(gene.overlap, gene.names)
msig <- m[gene.names.locs,]
msig.names <- gene.names[gene.names.locs]
if (output.score.type == "ES") {
OPAM <- OPAM.Projection.3(data.array = m, gene.names = gene.names, n.cols = Ns,
n.rows = Ng, weight = weight, statistic = statistic,
gene.set = gene.overlap, nperm = 1, correl.type = correl.type)
score.matrix[gs.i,] <- OPAM$ES.vector
} else if (output.score.type == "NES") {
OPAM <- OPAM.Projection.3(data.array = m, gene.names = gene.names, n.cols = Ns,
n.rows = Ng, weight = weight, statistic = statistic,
gene.set = gene.overlap, nperm = nperm, correl.type = correl.type)
score.matrix[gs.i,] <- OPAM$NES.vector
}
}
}
initial.up.entries <- 0
final.up.entries <- 0
initial.dn.entries <- 0
final.dn.entries <- 0
combined.entries <- 0
other.entries <- 0
if (combine.mode == "combine.off") {
score.matrix.2 <- score.matrix
gs.names.2 <- gs.names
gs.descs.2 <- gs.descs
} else if ((combine.mode == "combine.replace") || (combine.mode == "combine.add")) {
score.matrix.2 <- NULL
gs.names.2 <- NULL
gs.descs.2 <- NULL
k <- 1
for (i in 1:N.gs) {
temp <- strsplit(gs.names[i], split="_")
body <- paste(temp[[1]][seq(1, length(temp[[1]]) -1)], collapse="_")
suffix <- tail(temp[[1]], 1)
print(paste("i:", i, "gene set:", gs.names[i], "body:", body, "suffix:", suffix))
if (suffix == "UP") { # This is an "UP" gene set
initial.up.entries <- initial.up.entries + 1
target <- paste(body, "DN", sep="_")
loc <- match(target, gs.names)
if (!is.na(loc)) { # found corresponding "DN" gene set: create combined entry
score <- score.matrix[i,] - score.matrix[loc,]
score.matrix.2 <- rbind(score.matrix.2, score)
gs.names.2 <- c(gs.names.2, body)
gs.descs.2 <- c(gs.descs.2, paste(gs.descs[i], "combined UP & DN"))
combined.entries <- combined.entries + 1
if (combine.mode == "combine.add") { # also add the "UP entry
score.matrix.2 <- rbind(score.matrix.2, score.matrix[i,])
gs.names.2 <- c(gs.names.2, gs.names[i])
gs.descs.2 <- c(gs.descs.2, gs.descs[i])
final.up.entries <- final.up.entries + 1
}
} else { # did not find corresponding "DN" gene set: create "UP" entry
score.matrix.2 <- rbind(score.matrix.2, score.matrix[i,])
gs.names.2 <- c(gs.names.2, gs.names[i])
gs.descs.2 <- c(gs.descs.2, gs.descs[i])
final.up.entries <- final.up.entries + 1
}
} else if (suffix == "DN") { # This is a "DN" gene set
initial.dn.entries <- initial.dn.entries + 1
target <- paste(body, "UP", sep="_")
loc <- match(target, gs.names)
if (is.na(loc)) { # did not find corresponding "UP" gene set: create "DN" entry
score.matrix.2 <- rbind(score.matrix.2, score.matrix[i,])
gs.names.2 <- c(gs.names.2, gs.names[i])
gs.descs.2 <- c(gs.descs.2, gs.descs[i])
final.dn.entries <- final.dn.entries + 1
} else { # it found corresponding "UP" gene set
if (combine.mode == "combine.add") { # create "DN" entry
score.matrix.2 <- rbind(score.matrix.2, score.matrix[i,])
gs.names.2 <- c(gs.names.2, gs.names[i])
gs.descs.2 <- c(gs.descs.2, gs.descs[i])
final.dn.entries <- final.dn.entries + 1
}
}
} else { # This is neither "UP nor "DN" gene set: create individual entry
score.matrix.2 <- rbind(score.matrix.2, score.matrix[i,])
gs.names.2 <- c(gs.names.2, gs.names[i])
gs.descs.2 <- c(gs.descs.2, gs.descs[i])
other.entries <- other.entries + 1
}
} # end for loop over gene sets
print(paste("initial.up.entries:", initial.up.entries))
print(paste("final.up.entries:", final.up.entries))
print(paste("initial.dn.entries:", initial.dn.entries))
print(paste("final.dn.entries:", final.dn.entries))
print(paste("other.entries:", other.entries))
print(paste("combined.entries:", combined.entries))
print(paste("total entries:", length(score.matrix.2[,1])))
}
V.GCT <- data.frame(score.matrix.2)
names(V.GCT) <- sample.names
row.names(V.GCT) <- gs.names.2
write.gct(gct.data.frame = V.GCT, descs = gs.descs.2, filename = output.ds)
} # end of OPAM.project.dataset.2
OPAM.match.projection.to.phenotypes <- function(
input.ds,
input.cls,
results.dir,
normalize.score = T,
normalization.type = "zero.one", # "zero.one", "z.score" or "r.z.score"
markers.num=5,
user.colors = NA,
markers.metric = "ROC", # "ROC" or "T.TEST"
markers.file = NULL,
sort.phenotypes = T,
sort.decreasing = T, # T = decreasing, F = increasing
sort.expression = T,
sort.decreasing.genes = T,
legend = T,
char.res = 1,
only.up = F,
cmap.type = 3,
show.desc = T,
row.norm = T)
{
library(gtools)
library(verification)
library(ROCR)
library(MASS)
library(RColorBrewer)
library(heatmap.plus)
dataset <- MSIG.Gct2Frame(filename = input.ds) # Read gene expression dataset (GCT format)
m <- data.matrix(dataset$ds)
model.names <- dataset$row.names
model.descs <- dataset$descs
n.models <-
Ns <- length(m[1,])
for (i in 1:length(m[,1])) {
if (sd(m[i,]) == 0) {
val <- m[i, 1]
m[i,] <- m[i,] + runif(n=Ns, min= val - 0.001, max=val + 0.001) # add small noise to flat profiles
}
}
dim(m)
sample.names <- dataset$names
temp <- strsplit(input.ds, split="/") # Extract test file name
s <- length(temp[[1]])
test.file.name <- temp[[1]][s]
temp <- strsplit(test.file.name, split=".gct")
test.file.prefix <- temp[[1]][1]
# char.res <- 0.013 * n.models + 0.65
# normalize scores
if (normalize.score == T) {
if (normalization.type == "zero.one") {
for (i in 1:n.models) {
m[i,] <- (m[i,] - min(m[i,]))/(max(m[i,]) - min(m[i,]))
}
} else if (normalization.type == "z.score") {
for (i in 1:n.models) {
m[i,] <- (m[i,] - mean(m[i,]))/sd(m[i,])
}
} else if (normalization.type == "r.z.score") {
for (i in 1:n.models) {
m[i,] <- (m[i,] - median(m[i,]))/mad(m[i,])
}
}
}
CLS <- MSIG.ReadPhenFile.2(file = input.cls) # Read phenotype file (CLS format)
cls.labels <- CLS$class.v
cls.phen <- CLS$phen
cls.list <- CLS$class.list
if (is.vector(cls.labels)) {
n.phen <- 1
} else {
n.phen <- length(cls.labels[,1])
}
if (!is.na(user.colors)) {
c.test <- user.colors
} else {
if (!is.null(CLS$col.phen)) {
c.test <- CLS$col.phen
} else {
c.test <- c(brewer.pal(n=7, name="Dark2"), brewer.pal(n=7, name="Set2"), brewer.pal(n=7, name="Set1"),
brewer.pal(n=8, name="Accent"), brewer.pal(n=9, name="Spectral"), brewer.pal(n=8, name="Set3"),
brewer.pal(n=8, name="BuGn"),
brewer.pal(n=7, name="Set2"), brewer.pal(n=7, name="Dark2"), brewer.pal(n=7, name="Set1"),
brewer.pal(n=8, name="Accent"), brewer.pal(n=10, name="Spectral"), brewer.pal(n=8, name="Set3"),
brewer.pal(n=8, name="BuGn"),
brewer.pal(n=7, name="Set2"), brewer.pal(n=7, name="Dark2"), brewer.pal(n=7, name="Set1"),
brewer.pal(n=8, name="Accent"), brewer.pal(n=10, name="Spectral"), brewer.pal(n=8, name="Set3"),
brewer.pal(n=8, name="BuGn"))
}
}
if (!is.null(CLS$phen.names)) {
phen.names <- CLS$phen.names
# if (is.vector(cls.list)) {
# cls.phen <- paste(phen.names, cls.phen, collapse="_")
# } else {
# for (i in 1:length(cls.phen)) {
# for (j in 1:length(cls.phen[[i]])) {
# cls.phen[[i]][j] <- paste(phen.names[i], cls.phen[[i]][j], collapse="_")
# }
# }
# }
} else {
phen.names <- "NA"
}
cls.phen.index <- unlist(cls.phen)
cls.phen.colors <- c.test[1:length(cls.phen.index)]
n.classes <- vector(length=n.phen, mode="numeric")
if (n.phen == 1) {
max.classes <- length(cls.phen)
n.classes[1] <- max.classes
} else {
max.classes <- max(unlist(lapply(cls.phen, FUN=length)))
for (i in 1:n.phen) {
n.classes[i] <- length(cls.phen[[i]])
}
}
x <- rbind(sample.names, cls.list, cls.labels)
print("before loop")
print(x)
print(cls.phen)
print(phen.names)
filename <- paste(results.dir, test.file.prefix, ".PHEN.MARKERS.", markers.metric, ".pdf", sep="")
pdf(file=filename, height = 10, width = 10)
# Loop over phenotypes
for (k.phen in 1:n.phen) {
if (is.vector(cls.labels)) {
k.phen.labels <- cls.labels
k.phen.list <- cls.list
} else {
k.phen.labels <- as.vector(cls.labels[k.phen,])
k.phen.list <- as.vector(cls.list[k.phen,])
}
# Sort according to current phenotype
if(sort.expression == T) {
phen.index <- order(k.phen.labels, decreasing=sort.decreasing)
} else {
phen.index <- seq(1, length(k.phen.labels))
}
if (is.vector(cls.labels)) {
cls.labels2 <- cls.labels[phen.index]
cls.list2 <- cls.list[phen.index]
} else {
cls.labels2 <- cls.labels[, phen.index]
cls.list2 <- cls.list[, phen.index]
}
k.phen.labels <- k.phen.labels[phen.index]
k.phen.list <- k.phen.list[phen.index]
sample.names2 <- sample.names[phen.index]
m2 <- m[, phen.index]
x <- rbind(sample.names2, cls.list2, cls.labels2)
print(paste("inside loop phen=", k.phen))
print(x)
print(cls.phen)
print(phen.names)
# Markers for each class
if (is.vector(cls.labels2)) {
classes <- unique(cls.list2)
} else {
classes <- unique(cls.list2[k.phen, ])
}
if (length(classes) > 2) {
k.only.up <- T
} else {
k.only.up <- only.up
}
if(length(classes) == 2) classes <- classes[1]
markers <- NULL
markers.descs <- NULL
metric.list <- NULL
p.val.list <- NULL
k.class <- NULL
for (k in classes) {
if (is.vector(cls.labels2)) {
bin.class <- ifelse(cls.list2 == k, 0, 1)
} else {
bin.class <- ifelse(cls.list2[k.phen, ] == k, 0, 1)
}
if (markers.metric == "T.TEST") {
metric <- vector(length=n.models, mode="numeric")
p.val <- vector(length=n.models, mode="numeric")
for (i in 1:n.models) {
temp <- split(m2[i, ], bin.class)
x <- temp[[1]]
y <- temp[[2]]
metric[i] <- signif(t.test(x=x, y=y)$statistic, digits=3)
p.val[i] <- signif(t.test(x=x, y=y)$p.value, digits=3)
}
} else if (markers.metric == "ROC") {
bin.class <- ifelse(bin.class == 1, 0, 1)
metric <- vector(length=n.models, mode="numeric")
p.val <- vector(length=n.models, mode="numeric")
for (i in 1:n.models) {
m.score <- m2[i,]
m.score.norm <- (m.score - min(m.score))/(max(m.score) - min(m.score))
if (length(table(bin.class)) > 1) {
perf.auc <- roc.area(bin.class, m.score.norm)
metric[i] <- signif(perf.auc$A, digits=3)
p.val[i] <- signif(perf.auc$p.value, digits=3)
} else {
metric[i] <- 1
p.val[i] <- 1
}
}
} else if (markers.metric == "MEAN.DIFF") {
bin.class <- ifelse(bin.class == 1, 0, 1)
metric <- vector(length=n.models, mode="numeric")
p.val <- vector(length=n.models, mode="numeric")
for (i in 1:n.models) {
temp <- split(m2[i, ], bin.class)
x <- temp[[1]]
y <- temp[[2]]
metric[i] <- signif(mean(x) - mean(y), digits=3)
p.val[i] <- signif(t.test(x=x, y=y)$p.value, digits=3)
}
}
if (is.na(sort.decreasing.genes)) {
metric.order <- seq(1, length(metric))
} else {
metric.order <- order(metric, decreasing=sort.decreasing.genes)
}
if (only.up == TRUE) {
k.markers.num <- ifelse(markers.num > n.models, n.models, markers.num)
# if (length(classes) == 2) {
# k.markers.num <- ifelse(markers.num > n.models, n.models, markers.num)
# } else {
# k.markers.num <- ifelse(length(classes)*markers.num > n.models,
# floor(n.models/length(classes)), markers.num)
# }
markers <- c(markers, model.names[metric.order][1:k.markers.num])
markers.descs <- c(markers.descs, model.descs[metric.order][1:k.markers.num])
metric.list <- c(metric.list, metric[metric.order][1:k.markers.num])
p.val.list <- c(p.val.list, p.val[metric.order][1:k.markers.num])
k.class <- c(k.class, rep(k, k.markers.num))
} else {
k.markers.num <- ifelse(length(classes)*markers.num > n.models, floor(n.models/length(classes)),
markers.num)
markers <- c(markers, model.names[metric.order][1:k.markers.num],
model.names[metric.order][(length(model.names) - k.markers.num +1):length(model.names)])
markers.descs <- c(markers.descs, model.descs[metric.order][1:k.markers.num],
model.descs[metric.order][(length(model.names) - k.markers.num + 1):length(model.names)])
metric.list <- c(metric.list, metric[metric.order][1:k.markers.num],
metric[metric.order][(length(model.names) - k.markers.num + 1):length(model.names)])
p.val.list <- c(p.val.list, p.val[metric.order][1:k.markers.num],
p.val[metric.order][(length(model.names) - k.markers.num + 1):length(model.names)])
k.class <- c(k.class, rep(k, k.markers.num), rep(paste("not", k), k.markers.num))
}
}
V3 <- m2[markers,]
print(V3)
print(markers)
if (show.desc == T) {
model.descs2 <- paste(metric.list, p.val.list, k.class, markers.descs)
} else {
model.descs2 <- paste(metric.list, p.val.list)
}
height <- ifelse(length(markers) + n.phen >= 9, 10, (length(markers) + n.phen)*0.44 + 5)
# char.res <- 0.0085 * length(markers) + 0.65
# Sort markers inside each phenotype class
if(sort.expression == T) {
for (j in unique(k.phen.labels)) {
V4 <- V3[ , k.phen.labels == j]
sn <- sample.names2[k.phen.labels == j]
if (is.vector(cls.labels)) {
clab <- cls.labels2[k.phen.labels == j]
clis <- cls.list2[k.phen.labels == j]
} else {
clab <- cls.labels2[, k.phen.labels == j]
clis <- cls.list2[, k.phen.labels == j]
}
l.phen <- sum(k.phen.labels == j)
if (l.phen > 1) {
dist.matrix <- dist(t(V4))
HC <- hclust(dist.matrix, method="complete")
HC.order <- HC$order
V4 <- V4[ , HC.order]
sn <- sn[HC.order]
if (is.vector(cls.labels2)) {
clab <- clab[HC.order]
clis <- clis[HC.order]
} else {
clab <- clab[, HC.order]
clis <- clis[, HC.order]
}
}
V3[ , k.phen.labels == j] <- V4
sample.names2[k.phen.labels == j] <- sn
if (is.vector(cls.labels2)) {
cls.labels2[k.phen.labels == j] <- clab
cls.list2[k.phen.labels == j] <- clis
} else {
cls.labels2[, k.phen.labels == j] <- clab
cls.list2[, k.phen.labels == j] <- clis
}
}
}
x <- rbind(sample.names2, cls.list2, cls.labels2)
print(paste("inside loop after in-class sort phen=", k.phen))
print(x)
print(cls.phen)
print(phen.names)
# Recompute cls.phen and cls.labels2 as order may have changed
cls.phen2 <- list(NULL)
if (is.vector(cls.labels2)) {
classes <- unique(cls.list2)
cls.phen2 <- classes
cls.labels2 <- match(cls.list2, cls.phen2)
} else {
for (kk in 1:length(cls.list2[, 1])) {
classes <- unique(cls.list2[kk,])
cls.phen2[[kk]] <- classes
cls.labels2[kk,] <- match(cls.list2[kk,], cls.phen2[[kk]])
}
}
x <- rbind(sample.names2, cls.list2, cls.labels2)
print(paste("inside loop after cls.phen renorm phen=", k.phen))
print(cls.phen2)
print(phen.names)
library(gmodels)
if (!is.vector(cls.labels2)) {
if (sort.phenotypes == T) {
phen.score <- vector(length=n.phen, mode="numeric")
for (k.lab in 1:n.phen) {
tab <- table(as.vector(cls.list2[k.lab,]), k.phen.list)
print(tab)
# phen.score[k.lab] <- 1 - chisq.test(tab)$p.value
# phen.score[k.lab] <- 1 - fisher.test(tab)$p.value
if ((length(tab[,1]) > 1) && (length(tab[1,]) > 1)) {
CT <- CrossTable(tab, chisq=T)
phen.score[k.lab] <- CT$chisq$p.value
print(phen.score[k.lab])
} else {
phen.score[k.lab] <- 0.50
print(phen.score[k.lab])
}
}
phen.order <- order(phen.score, decreasing= T)
print(phen.order)
cls.labels2 <- cls.labels2[phen.order,]
cls.phen2 <- cls.phen2[phen.order]
phen.names2 <- phen.names[phen.order]
main.string <- paste(test.file.prefix, " - ", phen.names2[n.phen], markers.metric, " order")
} else {
phen.names2 <- phen.names
main.string <- paste(test.file.prefix, " - ", phen.names2[k.phen], markers.metric, " order")
}
} else {
phen.names2 <- phen.names[1]
main.string <- paste(test.file.prefix, " - ", phen.names2, markers.metric, " order")
}
# windows(width=15, height=height)
x <- rbind(sample.names2, cls.list2, cls.labels2)
print(paste("inside loop after phen sort before figure phen=", k.phen))
print(x)
print(cls.phen2)
print(phen.names2)
phen.list <- unlist(cls.phen2)
colors.list <- cls.phen.colors[match(phen.list, cls.phen.index)]
print(rbind(phen.list, colors.list))
if (show.desc == T) {
markers <- paste(markers, seq(1, length(markers)), sep="_")
}
MSIG.HeatMapPlot.7(V = V3, row.names = markers,
row.names2 = model.descs2, col.labels = cls.labels2,
col.classes = cls.phen2, phen.cmap = colors.list, phen.names = phen.names2,
col.names = sample.names2, main = main.string, xlab=" ", ylab=" ",
row.norm = row.norm,
cmap.type = cmap.type, char.rescale = char.res, legend=legend)
V3 <- data.frame(V3)
colnames(V3) <- sample.names2
row.names(V3) <- markers
if (!is.null(markers.file)) {
write.gct(gct.data.frame = V3, descs = model.descs2, filename = markers.file)
}
} # end loop over phenotypes
dev.off()
}
OPAM.sort.projection.by.score.2 <- function(
input.ds,
input.cls,
results.dir,
normalize.score = T,
normalization.type = "zero.one",
model,
target.phen = NA,
target.class = NA,
user.colors = NA,
decreasing.order = T,
output.dataset = NA,
char.rescale = 1,
cmap.type = 3,
row.norm = T)
{
library(gtools)
library(verification)
library(ROCR)
library(MASS)
library(RColorBrewer)
library(heatmap.plus)
dataset <- MSIG.Gct2Frame(filename = input.ds) # Read gene expression dataset (GCT format)
m <- data.matrix(dataset$ds)
model.names <- dataset$row.names
model.descs <- dataset$descs
Ns <- length(m[1,])
dim(m)
sample.names <- dataset$names
n.models <- length(m[,1])
temp <- strsplit(input.ds, split="/") # Extract test file name
s <- length(temp[[1]])
test.file.name <- temp[[1]][s]
temp <- strsplit(test.file.name, split=".gct")
test.file.prefix <- temp[[1]][1]
char.res <- 0.013 * n.models + 0.65
# normalize scores
if (normalize.score == T) {
if (normalization.type == "zero.one") {
for (i in 1:n.models) {
m[i,] <- (m[i,] - min(m[i,]))/(max(m[i,]) - min(m[i,]))
}
} else if (normalization.type == "z.score") {
for (i in 1:n.models) {
m[i,] <- (m[i,] - mean(m[i,]))/sd(m[i,])
}
} else if (normalization.type == "r.z.score") {
for (i in 1:n.models) {
m[i,] <- (m[i,] - median(m[i,]))/mad(m[i,])
}
}
}
CLS <- MSIG.ReadPhenFile.2(file = input.cls) # Read phenotype file (CLS format)
cls.labels <- CLS$class.v
cls.phen <- CLS$phen
cls.list <- CLS$class.list
if (is.vector(cls.labels)) {
n.phen <- 1
} else {
n.phen <- length(cls.labels[,1])
}
if (!is.na(user.colors[1])) {
c.test <- user.colors
} else {
if (!is.null(CLS$col.phen)) {
c.test <- CLS$col.phen
} else {
c.test <- c(brewer.pal(n=7, name="Set2"), brewer.pal(n=7, name="Dark2"), brewer.pal(n=7, name="Set1"),
brewer.pal(n=8, name="Accent"), brewer.pal(n=9, name="Spectral"), brewer.pal(n=8, name="Set3"),
brewer.pal(n=8, name="BuGn"),
brewer.pal(n=7, name="Set2"), brewer.pal(n=7, name="Dark2"), brewer.pal(n=7, name="Set1"),
brewer.pal(n=8, name="Accent"), brewer.pal(n=10, name="Spectral"), brewer.pal(n=8, name="Set3"),
brewer.pal(n=8, name="BuGn"),
brewer.pal(n=7, name="Set2"), brewer.pal(n=7, name="Dark2"), brewer.pal(n=7, name="Set1"),
brewer.pal(n=8, name="Accent"), brewer.pal(n=10, name="Spectral"), brewer.pal(n=8, name="Set3"),
brewer.pal(n=8, name="BuGn"))
}
}
if (!is.null(CLS$phen.names)) {
phen.names <- CLS$phen.names
} else {
phen.names <- "NA"
}
cls.phen.index <- unlist(cls.phen)
cls.phen.colors <- c.test[1:length(cls.phen.index)]
n.classes <- vector(length=n.phen, mode="numeric")
if (n.phen == 1) {
max.classes <- length(cls.phen)
n.classes[1] <- max.classes
} else {
max.classes <- max(unlist(lapply(cls.phen, FUN=length)))
for (i in 1:n.phen) {
n.classes[i] <- length(cls.phen[[i]])
}
}
filename <- paste(results.dir, test.file.prefix, ".SORT.PROJ", sep="")
# pdf(file=paste(filename, ".pdf", sep=""), height = 11, width = 9)
pdf(file=paste(filename, ".pdf", sep=""), height = 8.5, width = 11)
# windows(width=12, height=8)
loc <- match(model, model.names)
print(c("loc:", loc))
s.order <- order(m[loc,], decreasing = decreasing.order)
m2 <- m[, s.order]
sample.names2 <- sample.names[s.order]
if (is.vector(cls.labels)) {
cls.labels2 <- cls.labels[s.order]
cls.list2 <- cls.list[s.order]
} else {
cls.labels2 <- cls.labels[, s.order]
cls.list2 <- cls.list[, s.order]
}
# Recompute cls.phen and cls.labels2 as order may have changed
cls.phen2 <- NULL
if (is.vector(cls.labels)) {
classes <- unique(cls.list2)
cls.phen2 <- classes
cls.labels2 <- match(cls.list2, cls.phen2)
} else {
for (kk in 1:length(cls.list2[, 1])) {
classes <- unique(cls.list2[kk,])
# cls.phen2[[kk]] <- classes
cls.phen2 <- c(cls.phen2, classes)
cls.labels2[kk,] <- match(cls.list2[kk,], classes)
}
}
correl <- cor(t(m2))[, loc]
m.order <- order(correl, decreasing=T)
correl2 <- correl[m.order]
m2 <- m2[m.order,]
model.names2 <- model.names[m.order]
model.descs2 <- paste(model.descs[m.order], signif(correl2, digits=3))
phen.list <- unlist(cls.phen2)
colors.list <- cls.phen.colors[match(phen.list, cls.phen.index)]
if (!is.na(target.phen)) {
bin.class <- ifelse(cls.list2[target.phen,] == target.class, 1, 0)
} else {
bin.class <- ifelse(cls.list2[1,] == cls.list2[1,1], 1, 0)
}
sample.names2 <- paste(sample.names, bin.class, sep="_")
print(bin.class)
print(paste("n models:", n.models))
for (i in 1:n.models) {
print(paste(i, model.names2[i]))
m.score <- m2[i,]
m.score.norm <- (m.score - min(m.score))/(max(m.score) - min(m.score))
print(m.score.norm)
if (length(unique(bin.class)) > 1) {
perf.auc <- roc.area(bin.class, m.score.norm)
roc <- signif(perf.auc$A, digits=3)
p.val <- signif(perf.auc$p.value, digits=3)
} else {
roc <- p.val <- "-"
}
print(paste("ROC=", roc, " p-val=", p.val))
model.descs2[i] <- paste(roc, " (", p.val, ")")
}
MSIG.HeatMapPlot.7(V = m2, row.names = model.names2,
row.names2 = model.descs2, col.labels = cls.labels2,
col.classes = cls.phen2, phen.cmap = colors.list, phen.names = phen.names,
col.names = sample.names2, main = " ", xlab=" ", ylab=" ", row.norm = row.norm,
cmap.type = cmap.type, char.rescale = char.rescale, legend=T)
dev.off()
if (!is.na(output.dataset)) {
V.GCT <- data.frame(m2)
colnames(V.GCT) <- sample.names2
row.names(V.GCT) <- model.names2
write.gct.2(gct.data.frame = V.GCT, descs = model.descs2, filename =output.dataset)
}
}
OPAM.sort.projection.by.score.3 <- function(
input.ds,
input.cls,
results.dir,
normalize.score = T,
normalization.type = "zero.one",
target.phen = NA,
target.class = NA,
user.colors = NA,
decreasing.order = T,
output.dataset = NA,
char.rescale = 1,
cmap.type = 3,
row.norm = T)
# Calls MSIG.HeatMapPlot.8 and makes a plot sorted by the highest-scoring
# signatures and abnormalities (gene mutations or copy number alterations)
# i.e. doesn't require a "model" to score by as OPAM.sort.projection.by.score.2 does.
{
library(gtools)
library(verification)
library(ROCR)
library(MASS)
library(RColorBrewer)
library(heatmap.plus)
dataset <- MSIG.Gct2Frame(filename = input.ds) # Read gene expression dataset (GCT format)
m <- data.matrix(dataset$ds)
model.names <- dataset$row.names
# model.descs <- dataset$descs
Ns <- length(m[1,])
dim(m)
sample.names <- dataset$names
n.models <- length(m[,1])
temp <- strsplit(input.ds, split="/") # Extract test file name
s <- length(temp[[1]])
test.file.name <- temp[[1]][s]
temp <- strsplit(test.file.name, split=".gct")
test.file.prefix <- temp[[1]][1]
char.res <- 0.013 * n.models + 0.65
# normalize scores
if (normalize.score == T) {
if (normalization.type == "zero.one") {
for (i in 1:n.models) {
m[i,] <- (m[i,] - min(m[i,]))/(max(m[i,]) - min(m[i,]))
}
} else if (normalization.type == "z.score") {
for (i in 1:n.models) {
m[i,] <- (m[i,] - mean(m[i,]))/sd(m[i,])
}
} else if (normalization.type == "r.z.score") {
for (i in 1:n.models) {
m[i,] <- (m[i,] - median(m[i,]))/mad(m[i,])
}
}
}
CLS <- MSIG.ReadPhenFile.2(file = input.cls) # Read phenotype file (CLS format)
cls.labels <- CLS$class.v
cls.phen <- CLS$phen
cls.list <- CLS$class.list
# browser()
if (is.vector(cls.labels)) {
n.phen <- 1
} else {
n.phen <- length(cls.labels[,1])
}
if (!is.na(user.colors[1])) {
c.test <- user.colors
} else {
if (!is.null(CLS$col.phen)) {
c.test <- CLS$col.phen
} else {
c.test <- c(brewer.pal(n=7, name="Set2"), brewer.pal(n=7, name="Dark2"), brewer.pal(n=7, name="Set1"),
brewer.pal(n=8, name="Accent"), brewer.pal(n=9, name="Spectral"), brewer.pal(n=8, name="Set3"),
brewer.pal(n=8, name="BuGn"),
brewer.pal(n=7, name="Set2"), brewer.pal(n=7, name="Dark2"), brewer.pal(n=7, name="Set1"),
brewer.pal(n=8, name="Accent"), brewer.pal(n=10, name="Spectral"), brewer.pal(n=8, name="Set3"),
brewer.pal(n=8, name="BuGn"),
brewer.pal(n=7, name="Set2"), brewer.pal(n=7, name="Dark2"), brewer.pal(n=7, name="Set1"),
brewer.pal(n=8, name="Accent"), brewer.pal(n=10, name="Spectral"), brewer.pal(n=8, name="Set3"),
brewer.pal(n=8, name="BuGn"))
}
}
if (!is.null(CLS$phen.names)) {
phen.names <- CLS$phen.names
} else {
phen.names <- "NA"
}
cls.phen.index <- unlist(cls.phen)
cls.phen.colors <- c.test[1:length(cls.phen.index)]
print("cls.phen.colors:")
print(cls.phen.colors)
n.classes <- vector(length=n.phen, mode="numeric")
if (n.phen == 1) {
max.classes <- length(cls.phen)
n.classes[1] <- max.classes
} else {
max.classes <- max(unlist(lapply(cls.phen, FUN=length)))
for (i in 1:n.phen) {
n.classes[i] <- length(cls.phen[[i]])
}
}
filename <- paste(results.dir, test.file.prefix, ".SORT.PROJ", sep="")
# pdf(file=paste(filename, ".pdf", sep=""), height = 11, width = 9)
pdf(file=paste(filename, ".pdf", sep=""), height = 11, width = 17 )
# windows(width=12, height=8)
roc.list = vector( length=n.models, mode="numeric" )
p.val.list = vector( length=n.models, mode="numeric" )
if (!is.na(target.phen)) {
bin.class <- ifelse(cls.list[target.phen,] == target.class, 1, 0)
} else {
bin.class <- ifelse(cls.list[1,] == cls.list2[1,1], 1, 0)
}
# browser()
model.descs2 = vector(length = n.models, mode="character")
for (i in 1:n.models) {
m.score <- m[i,]
m.score.norm <- (m.score - min(m.score))/(max(m.score) - min(m.score))
# browser()
if (length(unique(bin.class)) > 1) {
perf.auc <- roc.area(bin.class, m.score.norm)
roc <- signif(perf.auc$A, digits=3)
p.val <- signif(perf.auc$p.value, digits=3)
} else {
roc <- p.val <- "-"
}
print(paste("ROC=", roc, " p-val=", p.val))
roc.list[i] = roc
p.val.list[i] = p.val
model.descs2[i] <- paste(roc, " (", p.val, ")")
}
# browser()
m.order = order(roc.list, decreasing=TRUE)
model.descs2 = model.descs2[m.order]
loc = m.order[1]
m2 <- m[m.order, ]
model.names <- model.names[m.order]
# loc <- match(model, model.names)
print(c("loc:", loc))
s.order <- order(m[loc,], decreasing = TRUE)
m2 <- m2[, s.order]
sample.names2 <- sample.names[s.order]
# if (is.vector(cls.labels)) {
# cls.labels2 <- cls.labels[s.order]
# cls.list2 <- cls.list[s.order]
# } else {
# cls.labels2 <- cls.labels[, s.order]
# cls.list2 <- cls.list[, s.order]
# }
# Recompute cls.phen and cls.labels2 as order may have changed
# cls.phen2 <- list(NULL)
# if (is.vector(cls.labels)) {
# classes <- unique(cls.list2)
# cls.phen2 <- classes
# cls.labels2 <- match(cls.list2, cls.phen2)
# } else {
# for (kk in 1:length(cls.list2[, 1])) {
# classes <- unique(cls.list2[kk,])
# cls.phen2[[kk]] <- classes
# cls.labels2[kk,] <- match(cls.list2[kk,], cls.phen2[[kk]])
# }
# }
# browser()
if (is.vector(cls.labels)) {
cls.labels2 <- cls.labels[s.order]
cls.list2 <- cls.list[s.order]
} else {
cls.labels2 <- cls.labels[, s.order]
cls.list2 <- cls.list[, s.order]
}
#browser()
# browser()
m.score <- m2[1,]
m.score.norm <- (m.score - min(m.score))/(max(m.score) - min(m.score))
roc.list = vector(mode="numeric", length=n.phen)
phen.descs = vector(mode="character", length=n.phen)
for( i in 1:n.phen ){
bin.gene = ifelse( cls.list2[i,]=="WT", 0, 1)
if (length(unique(bin.gene)) > 1) {
perf.auc <- roc.area(bin.gene, m.score.norm)
roc <- signif(perf.auc$A, digits=3)
p.val <- signif(perf.auc$p.value, digits=3)
} else {
roc <- "-"
p.val <- "-"
}
print(paste("ROC=", roc, " p-val=", p.val))
roc.list[i] = roc
# p.val.list[i] = p.val
phen.descs[i] <- paste(roc, " (", p.val, ")")
}
# browser()
g.order = c(1, 2, order(roc.list[3:n.phen], decreasing=TRUE)+2) # skip PATHWAY.MUT and COPY.NUMBER
phen.descs2 = phen.descs[g.order][1:40]
cls.list2= cls.list2[g.order,][1:40,]
phen.names = phen.names[g.order][1:40]
# Recompute cls.list2 as some mutations or copy numbers may have been removed
# Recompute cls.phen and cls.labels2 as order may have changed
cls.phen2 <- NULL
if (is.vector(cls.labels)) {
classes <- unique(cls.list2)
cls.phen2 <- classes
cls.labels2 <- match(cls.list2, cls.phen2)
} else {
for (kk in 1:length(cls.list2[, 1])) {
classes <- unique(cls.list2[kk,])
# cls.phen2[[kk]] <- classes
cls.phen2 <- c(cls.phen2, classes)
cls.labels2[kk,] <- match(cls.list2[kk,], classes)
}
}
cls.labels2 = cls.labels2[1:40,]
# browser()
# correl <- cor(t(m2))[, loc]
# m.order <- order(correl, decreasing=decreasing.order)
# correl2 <- correl[m.order]
# model.descs2 <- paste(model.descs[m.order], signif(correl2, digits=3))
phen.list <- unlist(cls.phen2)
# colors.list <- ifelse(unlist(cls.phen2) == target.class,
# ifelse(unlist(cls.phen2) == "DEL" | unlist(cls.phen2) == "AMP",
# ifelse(unlist(cls.phen2) == "DEL", cls.phen.colors[3], cls.phen.colors[4]), cls.phen.colors[1]), cls.phen.colors[2])
colors.list = rep( "gray", length(phen.list))
colors.list[phen.list=="MUT"] = cls.phen.colors[1]
colors.list[phen.list=="DEL"] = cls.phen.colors[3]
colors.list[phen.list=="AMP"] = cls.phen.colors[4]
colors.list[phen.list=="ALT"] = cls.phen.colors[5]
# roc.list = vector( length=n.models, mode="numeric" )
# p.val.list = vector( length=n.models, mode="numeric" )
#
# if (!is.na(target.phen)) {
# bin.class <- ifelse(cls.list2[target.phen,] == target.class, 1, 0)
# } else {
# bin.class <- ifelse(cls.list2[1,] == cls.list2[1,1], 1, 0)
# }
## browser()
# for (i in 1:n.models) {
# m.score <- m2[i,]
# m.score.norm <- (m.score - min(m.score))/(max(m.score) - min(m.score))
## browser()
# if (length(unique(bin.class)) > 1) {
# perf.auc <- roc.area(bin.class, m.score.norm)
# roc <- signif(perf.auc$A, digits=3)
# p.val <- signif(perf.auc$p.value, digits=3)
# } else {
# roc <- NA
# p.val <- NA
# }
# print(paste("ROC=", roc, " p-val=", p.val))
# roc.list[i] = roc
# p.val.list[i] = p.val
# model.descs2[i] <- paste(roc, " (", p.val, ")")
# }
# m.order = order(roc.list, decreasing=TRUE)
# loc = which(m.order==1)
## correl <- cor(t(m2))[, loc]
# print(c("loc:", loc))
# s.order <- order(m[loc,], decreasing = decreasing.order)
# m2 <- m[, s.order]
# cls.phen2 = cls.phen2[s.order]
# cls.labels2 = cls.labels2[s.order]
#
# correl2 <- correl[m.order]
# m2 = m2[m.order,]
# model.names2 = model.names2[m.order]
# model.descs2 = model.descs2[m.order]
print("cls.phen2:")
print(unlist(cls.phen2))
print("cls.phen:")
print(unlist(cls.phen))
print("colors.list:")
print(colors.list)
# browser()
MSIG.HeatMapPlot.8(V = m2, row.names = model.names,
row.names2 = model.descs2,
col.labels = cls.labels2,
col.classes = cls.phen2, phen.cmap = colors.list, phen.names = phen.names,
phen.names2 = phen.descs2,
col.names = sample.names2, main = " ", xlab=" ", ylab=" ", row.norm = row.norm,
cmap.type = cmap.type, char.rescale = char.rescale, legend=F)
dev.off()
if (!is.na(output.dataset)) {
V.GCT <- m2
colnames(V.GCT) <- sample.names2
row.names(V.GCT) <- model.names2
write.gct(gct.data.frame = V.GCT, descs = model.descs2, filename =output.dataset)
}
}
OPAM.sort.projection.by.score.4 <- function(
input.ds,
input.cls,
tissue = "NA",
results.dir,
normalize.score = T,
normalization.type = "zero.one",
model = "NA",
target.phen = NA,
target.class = NA,
user.colors = NA,
decreasing.order = T,
output.dataset = NA,
char.rescale = 1,
cmap.type = 3,
row.norm = T,
u.gene.names.known = "NA"
)
# Calls MSIG.HeatMapPlot.8 and makes a plot sorted by the highest-scoring
# signatures and abnormalities (gene mutations or copy number alterations)
# i.e. doesn't require a "model" to score by as OPAM.sort.projection.by.score.2 does.
# However, it *will* use "model" if it cannot calculate p-values on the gene signatures, which
# happens when every cell line has a genomic aberration.
#
# Runs 3 passes on the data:
# 1st pass: looks at the genes and copy number alterations specified by u.gene.names.known
# 2nd pass: looks at only the top abnormalities (using a p-value cutoff) from the 1st pass, and adjusts
# the PATHWAY.MUT vector accordingly (only according to the genes, not by copy number data)
# 3rd pass: Takes the winning signature from the 2nd pass and then looks all the genes available
{
library(gtools)
library(verification)
library(ROCR)
library(MASS)
library(RColorBrewer)
library(heatmap.plus)
dataset <- MSIG.Gct2Frame(filename = input.ds) # Read gene expression dataset (GCT format)
m <- data.matrix(dataset$ds)
model.names <- dataset$row.names
# model.descs <- dataset$descs
Ns <- length(m[1,])
dim(m)
sample.names <- dataset$names
n.models <- length(m[,1])
temp <- strsplit(input.ds, split="/") # Extract test file name
s <- length(temp[[1]])
test.file.name <- temp[[1]][s]
temp <- strsplit(test.file.name, split=".gct")
test.file.prefix <- temp[[1]][1]
char.res <- 0.013 * n.models + 0.65
# normalize scores
if (normalize.score == T) {
if (normalization.type == "zero.one") {
for (i in 1:n.models) {
m[i,] <- (m[i,] - min(m[i,]))/(max(m[i,]) - min(m[i,]))
}
} else if (normalization.type == "z.score") {
for (i in 1:n.models) {
m[i,] <- (m[i,] - mean(m[i,]))/sd(m[i,])
}
} else if (normalization.type == "r.z.score") {
for (i in 1:n.models) {
m[i,] <- (m[i,] - median(m[i,]))/mad(m[i,])
}
}
}
CLS <- MSIG.ReadPhenFile.2(file = input.cls) # Read phenotype file (CLS format)
cls.labels <- CLS$class.v
cls.phen <- CLS$phen
cls.list <- CLS$class.list
if (is.vector(cls.labels)) {
n.phen <- 1
} else {
n.phen <- length(cls.labels[,1])
}
if (!is.na(user.colors[1])) {
c.test <- user.colors
} else {
if (!is.null(CLS$col.phen)) {
c.test <- CLS$col.phen
} else {
c.test <- c(brewer.pal(n=7, name="Set2"), brewer.pal(n=7, name="Dark2"), brewer.pal(n=7, name="Set1"),
brewer.pal(n=8, name="Accent"), brewer.pal(n=9, name="Spectral"), brewer.pal(n=8, name="Set3"),
brewer.pal(n=8, name="BuGn"),
brewer.pal(n=7, name="Set2"), brewer.pal(n=7, name="Dark2"), brewer.pal(n=7, name="Set1"),
brewer.pal(n=8, name="Accent"), brewer.pal(n=10, name="Spectral"), brewer.pal(n=8, name="Set3"),
brewer.pal(n=8, name="BuGn"),
brewer.pal(n=7, name="Set2"), brewer.pal(n=7, name="Dark2"), brewer.pal(n=7, name="Set1"),
brewer.pal(n=8, name="Accent"), brewer.pal(n=10, name="Spectral"), brewer.pal(n=8, name="Set3"),
brewer.pal(n=8, name="BuGn"))
}
}
if (!is.null(CLS$phen.names)) {
phen.names <- CLS$phen.names
} else {
phen.names <- "NA"
}
cls.phen.index <- unlist(cls.phen)
cls.phen.colors <- c.test[1:length(cls.phen.index)]
# print("cls.phen.colors:")
# print(cls.phen.colors)
n.classes <- vector(length=n.phen, mode="numeric")
if (n.phen == 1) {
max.classes <- length(cls.phen)
n.classes[1] <- max.classes
} else {
max.classes <- max(unlist(lapply(cls.phen, FUN=length)))
for (i in 1:n.phen) {
n.classes[i] <- length(cls.phen[[i]])
}
}
print("--- Begin Pass 1 ---")
# model.names.original = model.names
# m.original = m
phen.pass1 = c( "PATHWAY.MUT", u.gene.names.known)
n.phen.pass1 = length(u.gene.names.known)+1
ind.phen.pass1 = which( phen.names %in% phen.pass1 )
phen.pass1 = phen.names[ind.phen.pass1]
roc.list.pass1 = vector( length=n.models, mode="numeric" )
p.val.list.pass1 = vector( length=n.models, mode="numeric" )
cls.list.pass1 = cls.list[ind.phen.pass1,]
cls.list.pass1.2 = ifelse(cls.list.pass1 == "WT", 0, 1)
cls.labels.pass1 = cls.labels[ind.phen.pass1,]
# browser()
if (!is.na(target.phen) && length(phen.pass1) > 2 ) {
# bin.class.pass1 = apply( cls.list2.pass1.2[3:n.phen,], MARGIN=2, FUN=sum)/(n.phen-2)
# bin.class.pass1 = ( bin.class.pass1 - min(bin.class.pass1))/(max(bin.class.pass1) - min(bin.class.pass1))
# bin.class.pass1 <- ifelse(cls.list[target.phen,] == target.class, 1, 0)
bin.class.pass1 <- ifelse(apply( cls.list.pass1.2[2:n.phen.pass1,], MARGIN=2, FUN=sum) > 0, 1, 0)
cls.labels.pass1[1,] = bin.class.pass1
cls.list.pass1[1,] = ifelse(bin.class.pass1 == 1, "MUT", "WT")
# if( length(unique(bin.class.pass1)) == 1) {
# cls.list.3 = ifelse( cls.list == "DEL" | cls.list == "AMP", 1, 0)
# copy.number.pass1 = ifelse( apply(cls.list.3[3:n.phen,], MARGIN=2, FUN=sum) >= 1, "ALT", "WT")
# copy.class.pass1 = ifelse( copy.number.pass1 == "ALT", 1, 0)
# bin.class.pass1 = copy.class.pass1
# print( "Calculating p-value with respect to copy number alterations")
# }
} else if (length(phen.pass1)==2 ) {
bin.class.pass1 = ifelse(cls.list[2,]== "WT", 0,1)
cls.labels.pass1[1,] = bin.class.pass1
cls.list.pass1[1,] = ifelse(bin.class.pass1 == 1, "MUT", "WT")
} else {
bin.class.pass1 <- ifelse(cls.list[1,] == cls.list2[1,1], 1, 0)
}
# browser()
#pdf("ROCplots.pdf")
model.descs2.pass1 = vector(length = n.models, mode="character")
for (i in 1:n.models) {
m.score <- m[i,]
m.score.norm <- (m.score - min(m.score))/(max(m.score) - min(m.score))
# browser()
if (length(unique(bin.class.pass1)) > 1) {
perf.auc <- roc.area(bin.class.pass1, m.score.norm)
roc <- signif(perf.auc$A, digits=3)
p.val <- signif(perf.auc$p.value, digits=3)
roc.list.pass1[i] = perf.auc$A
p.val.list.pass1[i] = perf.auc$p.value
roc.plot(bin.class.pass1, m.score.norm)
} else {
roc <- p.val <- "-"
roc.list.pass1[i] = NA
p.val.list.pass1[i] = NA
}
print(paste(format(rownames(m)[i], width=30), "ROC=", roc, " p-val=", p.val))
model.descs2.pass1[i] <- paste(roc, " (", p.val, ")")
}
dev.off()
# browser()
if( is.na(roc.list.pass1[1]) ){
loc <- match(model, model.names)
s.order.pass1 <- order(m[loc,], decreasing = decreasing.order)
# loc = s.order.pass1[1]
m2.pass1 <- m[, s.order.pass1]
correl <- cor(t(m2.pass1))[, loc]
m.order.pass1 <- order(correl, decreasing=T)
m2.pass1 <- m2.pass1[m.order.pass1, ]
} else{
m.order.pass1 = order(roc.list.pass1, decreasing=TRUE, na.last=TRUE)
m2.pass1 <- m[m.order.pass1, ]
s.order.pass1 <- order(m2.pass1[1,], decreasing = TRUE )
m2.pass1 <- m2.pass1[, s.order.pass1]
}
# m2.pass1 <- m2.pass1[m.order.pass1, ]
model.descs2.pass1 = model.descs2.pass1[m.order.pass1]
sample.names2.pass1 <- colnames(m2.pass1)
model.names.pass1 <- rownames(m2.pass1)
if (is.vector(cls.labels)) {
cls.labels2.pass1 <- cls.labels.pass1[s.order.pass1]
cls.list2.pass1 <- cls.list.pass1[s.order.pass1]
} else {
cls.labels2.pass1 <- cls.labels.pass1[, s.order.pass1]
cls.list2.pass1 <- cls.list.pass1[, s.order.pass1]
}
#browser()
# browser()
m.score.pass1 <- m2.pass1[1,]
m.score.norm.pass1 <- (m.score.pass1 - min(m.score.pass1))/(max(m.score.pass1) - min(m.score.pass1))
roc.list.phen.pass1 = vector(mode="numeric", length=n.phen)
p.val.list.phen.pass1 = vector(mode="numeric", length=n.phen)
phen.descs.pass1 = vector(mode="character", length=n.phen)
for( i in 1:n.phen.pass1 ){
bin.gene = ifelse( cls.list2.pass1[i,]=="WT", 0, 1)
if (length(unique(bin.gene)) > 1) {
perf.auc <- roc.area(bin.gene, m.score.norm.pass1)
# if( perf.auc$A < 0.5 ){
## browser()
# roc = signif(1 - perf.auc$A, digits=3)
# p.val = signif(1- perf.auc$p.val, digits=3)
# abnormality = unique(cls.list2.pass1[i,])[which(unique(cls.list2.pass1[i,]) != "WT")]
## cls.list2.pass1[i,] = ifelse( cls.list2.pass1[i,] == "WT", abnormality, "WT" )
# phen.names[i] = paste(phen.names[i], "-opposite", sep="")
# roc.list.phen.pass1[i] = 1 - perf.auc$A
# p.val.list.phen.pass1[i] = perf.auc$p.val # Don't want to use these "opposite" genomic aberrations in Pass 2
# # because they make PATHWAY.MUT+COPY.NUMBER too dense
# } else{
roc <- signif(perf.auc$A, digits=3)
p.val <- signif(perf.auc$p.value, digits=3)
roc.list.phen.pass1[i] = perf.auc$A
p.val.list.phen.pass1[i] = perf.auc$p.val
# }
} else {
roc <- "-"
p.val <- "-"
roc.list.phen.pass1[i] = NA
p.val.list.phen.pass1[i] = NA
}
print(paste(format(phen.pass1[i], width=12), "ROC=", roc, " p-val=", p.val))
# p.val.list[i] = p.val
phen.descs.pass1[i] <- paste(roc, " (", p.val, ")")
}
# browser()
g.order.pass1 = c(1, order(roc.list.phen.pass1[2:n.phen.pass1], decreasing=TRUE, na.last=TRUE)+1) # keep PATHWAY.MUT as 1
roc.list.phen.pass1 = roc.list.phen.pass1[g.order.pass1]
p.val.list.phen.pass1 = p.val.list.phen.pass1[g.order.pass1]
phen.descs2.pass1 = phen.descs.pass1[g.order.pass1][1:n.phen.pass1]
cls.list2.pass1 = cls.list2.pass1[g.order.pass1,][1:n.phen.pass1,]
phen.names.pass1 = phen.pass1[g.order.pass1][1:n.phen.pass1]
# Recompute cls.list2 as some mutations or copy numbers may have been removed
# Recompute cls.phen and cls.labels2 as order may have changed
cls.phen2.pass1 <- NULL
if (is.vector(cls.labels)) {
classes <- unique(cls.list2.pass1)
cls.phen2.pass1 <- classes
cls.labels2.pass1 <- match(cls.list2.pass1, cls.phen2.pass1)
} else {
for (kk in 1:length(cls.list2.pass1[, 1])) {
classes <- unique(cls.list2.pass1[kk,])
# cls.phen2[[kk]] <- classes
cls.phen2.pass1 <- c(cls.phen2.pass1, classes)
cls.labels2.pass1[kk,] <- match(cls.list2.pass1[kk,], classes)
}
}
cls.labels2.pass1 = cls.labels2.pass1[1:n.phen.pass1,]
# browser()
# correl <- cor(t(m2))[, loc]
# m.order <- order(correl, decreasing=decreasing.order)
# correl2 <- correl[m.order]
# model.descs2 <- paste(model.descs[m.order], signif(correl2, digits=3))
phen.list.pass1 <- unlist(cls.phen2.pass1)
# colors.list <- ifelse(unlist(cls.phen2) == target.class,
# ifelse(unlist(cls.phen2) == "DEL" | unlist(cls.phen2) == "AMP",
# ifelse(unlist(cls.phen2) == "DEL", cls.phen.colors[3], cls.phen.colors[4]), cls.phen.colors[1]), cls.phen.colors[2])
colors.list.pass1 = rep( "gray", length(phen.list.pass1))
colors.list.pass1[phen.list.pass1=="MUT"] = cls.phen.colors[1]
colors.list.pass1[phen.list.pass1=="DEL"] = cls.phen.colors[3]
colors.list.pass1[phen.list.pass1=="AMP"] = cls.phen.colors[4]
colors.list.pass1[phen.list.pass1=="ALT"] = cls.phen.colors[5]
# print("cls.phen2:")
# print(unlist(cls.phen2))
#
# print("cls.phen:")
# print(unlist(cls.phen))
#
# print("colors.list:")
# print(colors.list)
# browser()
filename <- paste(results.dir, test.file.prefix, ".3Passes.ROC", sep="")
# pdf(file=paste(filename, ".pdf", sep=""), height = 11, width = 9)
pdf(file=paste(filename, ".pdf", sep=""), height = 11, width = 17 )
# windows(width=12, height=8)
MSIG.HeatMapPlot.9(V = m2.pass1, row.names = model.names.pass1,
row.names2 = model.descs2.pass1,
col.labels = cls.labels2.pass1,
col.classes = cls.phen2.pass1,
phen.cmap = colors.list.pass1, phen.names = phen.names.pass1,
phen.names2 = phen.descs2.pass1,
col.names = sample.names2.pass1, main = paste(tissue, "- Phase 1: Known KRAS Pathway Abnormalities (ROC)"),
xlab=" ", ylab=" ", row.norm = row.norm,
cmap.type = cmap.type, char.rescale = char.rescale, legend=F)
# dev.off()
# break
### Begin Pass 2 ###
print( "--- Begin Pass 2 ---")
# browser()
# p.val.threshold = 0.1
roc.threshold = 0.65
# ind.top.pval = which(p.val.list.phen.pass1[2:n.phen.pass1] <= p.val.threshold )+1
ind.top.roc = which(roc.list.phen.pass1[-1] >= roc.threshold)+1
if( length(ind.top.roc) > 0){
ind.roc.threshold = c(1, ind.top.roc)
}else{
roc.threshold = 0.6
ind.top.roc = which(roc.list.phen.pass1[-1] >= roc.threshold)+1
ind.roc.threshold = c(1, ind.top.roc)
}
# if( length(ind.top.pval) > 0 ){
# ind.p.val.threshold = c(1, ind.top.pval)
# } else if( length(ind.top.pval) < 1 ) {
# p.val.threshold = 0.15
# ind.top.pval = which(p.val.list.phen.pass1[2:n.phen.pass1] <= p.val.threshold )+1
# ind.p.val.threshold = c(1, ind.top.pval) }
# if( length(ind.top.pval) < 1){
# p.val.threshold = 0.2
# ind.top.pval = which(p.val.list.phen.pass1[2:n.phen.pass1] <= p.val.threshold )+1
# ind.p.val.threshold = c(1, ind.top.pval)
# }
# if( length(ind.top.pval) < 1){
# p.val.threshold = 0.25
# ind.top.pval = which(p.val.list.phen.pass1[2:n.phen.pass1] <= p.val.threshold )+1
# ind.p.val.threshold = c(1, ind.top.pval)
# }
# if( length(ind.top.pval) < 1){
# p.val.threshold = 0.3
# ind.top.pval = which(p.val.list.phen.pass1[2:n.phen.pass1] <= p.val.threshold )+1
# ind.p.val.threshold = c(1, ind.top.pval)
# }
# if( length( ind.top.pval) < 1 ) {
# ind.top = which(!is.na(p.val.list.phen.pass1[-1]))+1
# ind.p.val.threshold = c( 1, ind.top )
# }
n.phen.pass2 = length(ind.roc.threshold)
# phen.descs.pass2 = phen.descs2.pass1[1:n.phen.pass2]
cls.list2.pass2 = cls.list2.pass1[ind.roc.threshold,]
phen.names.pass2 = phen.names.pass1[ind.roc.threshold]
# phen.names.pass2[1] = "PATHWAY.MUT + COPY.NUMBER"
cls.labels2.pass2 = cls.labels2.pass1[ind.roc.threshold,]
# browser()
cls.list2.pass2.2 = ifelse( cls.list2.pass2 == "WT", 0, 1)
cls.list2.pass2.3 = ifelse( cls.list2.pass2 == "DEL" | cls.list2.pass2 == "AMP", 1, 0)
if( n.phen.pass2 > 2 ){
pathway.mut.pass2 = ifelse( apply(cls.list2.pass2.2[2:n.phen.pass2,], MARGIN=2, FUN=sum) >= 1, "MUT", "WT")
# copy.number.pass2 = ifelse( apply(cls.list2.pass2.3[3:n.phen.pass2,], MARGIN=2, FUN=sum) >= 1, "ALT", "WT")
} else{
pathway.mut.pass2 = ifelse( cls.list2.pass2.2[2,] == 1, "MUT", "WT")
# copy.number.pass2 = ifelse( cls.list2.pass2.3[3,] == 1, "ALT", "WT")
}
# browser()
bin.class.pass2 = ifelse( pathway.mut.pass2 == "MUT", 1, 0 )
# copy.class.pass2 = ifelse( copy.number.pass2 == "ALT", 1, 0)
# if( length(unique(bin.class.pass2)) == 1) {
# bin.class.pass2 = copy.class.pass2
# print( "Calculating p-value with respect to copy number alterations")
# }
cls.list2.pass2[1,] = pathway.mut.pass2
# cls.list2.pass2[2,] = copy.number.pass2
roc.list.pass2 = vector( length=n.models, mode="numeric" )
p.val.list.pass2 = vector( length=n.models, mode="numeric" )
# browser()
model.descs2.pass2 = vector(length = n.models, mode="character")
for (i in 1:n.models) {
m.score <- m2.pass1[i,]
m.score.norm <- (m.score - min(m.score))/(max(m.score) - min(m.score))
# browser()
if (length(unique(bin.class.pass2)) > 1) {
perf.auc <- roc.area(bin.class.pass2, m.score.norm)
roc <- signif(perf.auc$A, digits=3)
p.val <- signif(perf.auc$p.value, digits=3)
roc.list.pass2[i] = perf.auc$A
p.val.list.pass2[i] = perf.auc$p.value
} else {
roc <- p.val <- "-"
roc.list.pass2[i] = NA
p.val.list.pass2[i] = NA
}
print(paste(format(rownames(m2.pass1)[i], width=30), "ROC=", roc, " p-val=", p.val))
model.descs2.pass2[i] <- paste(roc, " (", p.val, ")")
}
# browser()
m.order.pass2 = order(roc.list.pass2, decreasing=TRUE, na.last=TRUE)
model.descs2.pass2 = model.descs2.pass2[m.order.pass2]
# loc.pass2 = m.order.pass2[1]
m2.pass2 <- m2.pass1[m.order.pass2, ]
model.names.pass2 <- rownames(m2.pass2)
# print(c("loc.pass2:", loc.pass2))
s.order.pass2 <- order(m2.pass2[1,], decreasing = TRUE)
m2.pass2 <- m2.pass2[, s.order.pass2]
sample.names2.pass2 <- colnames(m2.pass2)
if (is.vector(cls.labels)) {
cls.labels2.pass2 <- cls.labels2.pass2[s.order.pass2]
cls.list2.pass2 <- cls.list2.pass2[s.order.pass2]
} else {
cls.labels2.pass2 <- cls.labels2.pass2[, s.order.pass2]
cls.list2.pass2 <- cls.list2.pass2[, s.order.pass2]
}
# browser()
# browser()
m.score.pass2 <- m2.pass2[1,]
m.score.norm.pass2 <- (m.score.pass2 - min(m.score.pass2))/(max(m.score.pass2) - min(m.score.pass2))
roc.list.phen.pass2 = vector(mode="numeric", length=n.phen.pass2)
phen.descs.pass2 = vector(mode="character", length=n.phen.pass2)
for( i in 1:n.phen.pass2 ){
bin.gene = ifelse( cls.list2.pass2[i,]=="WT", 0, 1)
if (length(unique(bin.gene)) > 1) {
perf.auc <- roc.area(bin.gene, m.score.norm.pass2)
# if( perf.auc$A < 0.5 ){
## browser()
# roc = signif(1 - perf.auc$A, digits=3)
# p.val = signif(1 - perf.auc$A, digits=3)
# abnormality = unique(cls.list2.pass2[i,])[which(unique(cls.list2.pass2[i,]) != "WT")]
# cls.list2.pass2 = ifelse( cls.list2.pass2[i,] == "WT", abnormality, "WT" )
# phen.names.pass2[i] = paste(phen.names.pass2[i], "-opposite")
# } else{
roc <- signif(perf.auc$A, digits=3)
p.val <- signif(perf.auc$p.value, digits=3)
roc.list.phen.pass2[i] = perf.auc$A
# }
} else {
roc <- "-"
p.val <- "-"
roc.list.phen.pass2[i] = NA
}
print(paste(format(phen.names.pass2[i], width=12), "ROC=", roc, " p-val=", p.val))
# p.val.list[i] = p.val
phen.descs.pass2[i] <- paste(roc, " (", p.val, ")")
}
# browser()
g.order.pass2 = c(1, order(roc.list.phen.pass2[2:n.phen.pass2], decreasing=TRUE, na.last=TRUE)+1) # skip PATHWAY.MUT
phen.descs2.pass2 = phen.descs.pass2[g.order.pass2]
cls.list2.pass2 = cls.list2.pass2[g.order.pass2,]
phen.names.pass2 = phen.names.pass2[g.order.pass2]
# browser()
# Recompute cls.list2 as some mutations or copy numbers may have been removed
# Recompute cls.phen and cls.labels2 as order may have changed
cls.phen2.pass2 <- NULL
if (is.vector(cls.labels)) {
classes <- unique(as.vector(cls.list2.pass2))
cls.phen2.pass2 <- classes
cls.labels2.pass2 <- match(cls.list2.pass2, cls.phen2.pass2)
} else {
for (kk in 1:length(cls.list2.pass2[, 1])) {
classes <- unique(cls.list2.pass2[kk,])
# cls.phen2[[kk]] <- classes
cls.phen2.pass2 <- c(cls.phen2.pass2, classes)
cls.labels2.pass2[kk,] <- match(cls.list2.pass2[kk,], classes)
}
}
cls.labels2.pass2 = cls.labels2.pass2[1:n.phen.pass2,]
# browser()
# correl <- cor(t(m2))[, loc]
# m.order <- order(correl, decreasing=decreasing.order)
# correl2 <- correl[m.order]
# model.descs2 <- paste(model.descs[m.order], signif(correl2, digits=3))
phen.list.pass2 <- unlist(cls.phen2.pass2)
# colors.list <- ifelse(unlist(cls.phen2) == target.class,
# ifelse(unlist(cls.phen2) == "DEL" | unlist(cls.phen2) == "AMP",
# ifelse(unlist(cls.phen2) == "DEL", cls.phen.colors[3], cls.phen.colors[4]), cls.phen.colors[1]), cls.phen.colors[2])
colors.list.pass2 = rep( "gray", length(phen.list.pass2))
colors.list.pass2[phen.list.pass2=="MUT"] = cls.phen.colors[1]
colors.list.pass2[phen.list.pass2=="DEL"] = cls.phen.colors[3]
colors.list.pass2[phen.list.pass2=="AMP"] = cls.phen.colors[4]
colors.list.pass2[phen.list.pass2=="ALT"] = cls.phen.colors[5]
MSIG.HeatMapPlot.9(V = m2.pass2, row.names = model.names.pass2,
row.names2 = model.descs2.pass2,
col.labels = cls.labels2.pass2,
col.classes = cls.phen2.pass2,
phen.cmap = colors.list.pass2, phen.names = phen.names.pass2,
phen.names2 = phen.descs2.pass2,
col.names = sample.names2.pass2, main = paste(tissue, "- Phase 2: only ROC >=", roc.threshold,"from 1st pass (ROC)"),
xlab=" ", ylab=" ", row.norm = row.norm,
cmap.type = cmap.type, char.rescale = char.rescale, legend=F)
### 3rd Pass ###
print( "--- Begin Pass 3 ---")
# browser()
m2.pass3 = m2.pass2
model.names.pass3 = rownames(m2.pass3)
sample.names2.pass3 = colnames(m2.pass3)
# model.descs2.pass3 = model.descs2.pass2
n.phen.pass3 = 40
# phen.descs.pass2 = phen.descs2.pass1[1:n.phen.pass2]
cls.list2.pass3 = cls.list[, s.order.pass1][, s.order.pass2]
cls.labels2.pass3 = cls.labels[, s.order.pass1][, s.order.pass2]
# browser()
phen.names.pass3 = phen.names
m.score.pass3 <- m2.pass3[1,]
m.score.norm.pass3 <- (m.score.pass3 - min(m.score.pass3))/(max(m.score.pass3) - min(m.score.pass3))
roc.list.phen.pass3 = vector(mode="numeric", length=n.phen)
phen.descs.pass3 = vector(mode="character", length=n.phen)
p.val.list.phen.pass3 = vector(mode="numeric", length=n.phen)
for( i in 1:n.phen ){
bin.gene = ifelse( cls.list2.pass3[i,]=="WT", 0, 1)
if (length(unique(bin.gene)) > 1) {
perf.auc <- roc.area(bin.gene, m.score.norm.pass3)
# if( perf.auc$A < 0.5 ){
## browser()
# roc = signif(1 - perf.auc$A, digits=3)
# p.val = signif(1- perf.auc$p.value, digits=3)
# abnormality = unique(cls.list2.pass3[i,])[which(unique(cls.list2.pass3[i,]) != "WT")]
# cls.list2.pass3[i,] = ifelse( cls.list2.pass3[i,] == "WT", abnormality, "WT" )
# phen.names.pass3[i] = paste(phen.names.pass3[i], "-opposite", sep="")
# roc.list.phen.pass3[i] = 1-perf.auc$A
# p.val.list.phen.pass3[i] = 1- perf.auc$p.value
# } else{
roc <- signif(perf.auc$A, digits=3)
p.val <- signif(perf.auc$p.value, digits=3)
roc.list.phen.pass3[i] = perf.auc$A
p.val.list.phen.pass3[i] = perf.auc$p.value
# }
} else {
roc <- NA
p.val <- NA
roc.list.phen.pass3[i] = NA
p.val.list.phen.pass3[i] = NA
}
# print(paste("ROC=", roc, " p-val=", p.val))
# p.val.list[i] = p.val
phen.descs.pass3[i] <- paste(roc, " (", p.val, ")")
}
# browser()
# p.val.threshold = 0.1
roc.threshold = 0.65
len = length(which(roc.list.phen.pass3[-1:-2] >= roc.threshold)) + 2
# len = length(which(p.val.list.phen.pass3[-1:-2] <= p.val.threshold))+2
# if( len == 2 ){
# p.val.threshold = 0.15
# len = length(which(p.val.list.phen.pass3[-1:-2] <= p.val.threshold))+2
# }
# if( len == 2 ){
# p.val.threshold = 0.2
# len = length(which(p.val.list.phen.pass3[-1:-2] <= p.val.threshold))+2
# }
if( len > 40 ) len = 40
# g.order.pass3.1 = c(1, 2, order(p.val.list.phen.pass3[3:n.phen], decreasing=FALSE, na.last=TRUE)+2 )
g.order.pass3 = c(1, 2, order(p.val.list.phen.pass3[-1:-2], decreasing=FALSE, na.last=TRUE)+2 )[1:len] # skip PATHWAY.MUT and COPY.NUMBER
phen.descs2.pass3 = phen.descs.pass3[g.order.pass3]
cls.list2.pass3 = cls.list2.pass3[g.order.pass3,]
cls.labels2.pass3 = cls.labels2.pass3[g.order.pass3,]
phen.names.pass3 = phen.names.pass3[g.order.pass3]
cls.list.mut = ifelse(cls.list2.pass3[-1:-2,] == "MUT", 1, 0)
cls.list.alt = ifelse(cls.list2.pass3[-1:-2,] == "DEL" | cls.list2.pass3[-1:-2,] == "AMP", 1, 0)
# browser()
if( !is.vector(cls.list.alt) ){
cls.list.mut.sum = apply(cls.list.mut, MARGIN=2, FUN=sum)
cls.list.alt.sum = apply(cls.list.alt, MARGIN=2, FUN=sum)
cls.list.mut.sum = ifelse(cls.list.mut.sum + cls.list.alt.sum > 0, 1, 0)
cls.list2.pass3[1,] = ifelse( cls.list.mut.sum >= 1, "MUT", "WT")
cls.list2.pass3[2,] = ifelse( cls.list.alt.sum >= 1, "ALT", "WT")
bin.class.pass3 = cls.list.mut.sum
} else{
cls.list2.pass3[1,] = ifelse(cls.list.mut == 1, "MUT", "WT")
cls.list2.pass3[2,] = ifelse(cls.list.alt == 1, "ALT", "WT")
bin.class.pass3 = cls.list.mut
}
for( i in 1:2 ){ # Recalculate ROC and p-value for PATHWAY.MUT and COPY.NUMBER
bin.gene = ifelse( cls.list2.pass3[i,]=="WT", 0, 1)
if (length(unique(bin.gene)) > 1) {
perf.auc <- roc.area(bin.gene, m.score.norm.pass3)
# if( perf.auc$A < 0.5 ){
## browser()
# roc = signif(1 - perf.auc$A, digits=3)
# p.val = signif(1- perf.auc$p.value, digits=3)
# abnormality = unique(cls.list2.pass3[i,])[which(unique(cls.list2.pass3[i,]) != "WT")]
# cls.list2.pass3[i,] = ifelse( cls.list2.pass3[i,] == "WT", abnormality, "WT" )
# phen.names.pass3[i] = paste(phen.names.pass3[i], "-opposite", sep="")
# roc.list.phen.pass3[i] = 1-perf.auc$A
# p.val.list.phen.pass3[i] = 1- perf.auc$p.value
# } else{
roc <- signif(perf.auc$A, digits=3)
p.val <- signif(perf.auc$p.value, digits=3)
roc.list.phen.pass3[i] = perf.auc$A
p.val.list.phen.pass3[i] = perf.auc$p.value
# }
} else {
roc <- NA
p.val <- NA
roc.list.phen.pass3[i] = NA
p.val.list.phen.pass3[i] = NA
}
print(paste(format(phen.names.pass3[i], width=12), "ROC=", roc, " p-val=", p.val))
# p.val.list[i] = p.val
phen.descs2.pass3[i] <- paste(roc, " (", p.val, ")")
}
# browser()
model.descs2.pass3 = vector(length = n.models, mode="character")
roc.list.pass3 = vector(length=n.models, mode="double")
p.val.list.pass3 = vector(length=n.models, mode="double")
for (i in 1:n.models) {
m.score <- m2.pass3[i,]
m.score.norm <- (m.score - min(m.score))/(max(m.score) - min(m.score))
# browser()
if (length(unique(bin.class.pass3)) > 1) {
perf.auc <- roc.area(bin.class.pass3, m.score.norm)
roc <- signif(perf.auc$A, digits=3)
p.val <- signif(perf.auc$p.value, digits=3)
roc.list.pass3[i] = perf.auc$A
p.val.list.pass3[i] = perf.auc$p.value
} else {
roc <- p.val <- "-"
roc.list.pass3[i] = NA
p.val.list.pass3[i] = NA
}
print(paste(format(rownames(m2.pass3)[i], width=30), "ROC=", roc, " p-val=", p.val))
model.descs2.pass3[i] <- paste(roc, " (", p.val, ")")
}
m.order.pass3 = order(roc.list.pass3, decreasing=TRUE)
m2.pass3 = m2.pass3[m.order.pass3,]
model.descs2.pass3 = model.descs2.pass3[m.order.pass3]
s.order.pass3 = order(m2.pass3[1,], decreasing=TRUE)
m2.pass3 = m2.pass3[,s.order.pass3]
sample.names2.pass3 = colnames(m2.pass3)
model.names.pass3 = rownames(m2.pass3)
cls.phen2.pass3 <- NULL
if (is.vector(cls.labels)) {
classes <- unique(as.vector(cls.list2.pass3))
cls.phen2.pass3 <- classes
cls.labels2.pass3 <- match(cls.list2.pass3, cls.phen2.pass3)
} else {
# browser()
for (kk in 1:length(cls.list2.pass3[, 1])) {
# browser()
classes <- unique(cls.list2.pass3[kk,])
# cls.phen2[[kk]] <- classes
cls.phen2.pass3 <- c(cls.phen2.pass3, classes)
cls.labels2.pass3[kk,] <- match(cls.list2.pass3[kk,], classes)
}
}
# cls.labels2.pass3 = cls.labels2.pass3[1:n.phen.pass3,]
# browser()
# correl <- cor(t(m2))[, loc]
# m.order <- order(correl, decreasing=decreasing.order)
# correl2 <- correl[m.order]
# model.descs2 <- paste(model.descs[m.order], signif(correl2, digits=3))
phen.list.pass3 <- unlist(cls.phen2.pass3)
# colors.list <- ifelse(unlist(cls.phen2) == target.class,
# ifelse(unlist(cls.phen2) == "DEL" | unlist(cls.phen2) == "AMP",
# ifelse(unlist(cls.phen2) == "DEL", cls.phen.colors[3], cls.phen.colors[4]), cls.phen.colors[1]), cls.phen.colors[2])
colors.list.pass3 = rep( "gray", length(phen.list.pass3))
colors.list.pass3[phen.list.pass3=="MUT"] = cls.phen.colors[1]
colors.list.pass3[phen.list.pass3=="DEL"] = cls.phen.colors[3]
colors.list.pass3[phen.list.pass3=="AMP"] = cls.phen.colors[4]
colors.list.pass3[phen.list.pass3=="ALT"] = cls.phen.colors[5]
phen.names.pass3[1] = "PATHWAY.MUT+COPY.NUMBER"
# browser()
MSIG.HeatMapPlot.9(V = m2.pass3, row.names = model.names.pass3,
row.names2 = model.descs2.pass3,
col.labels = cls.labels2.pass3,
col.classes = cls.phen2.pass3,
phen.cmap = colors.list.pass3, phen.names = phen.names.pass3,
phen.names2 = phen.descs2.pass3,
col.names = sample.names2.pass3, main = paste(tissue, "- 3rd Pass: Top signature from 2nd pass with all genes ( ROC >=", roc.threshold, ") (ROC)"),
xlab=" ", ylab=" ", row.norm = row.norm,
cmap.type = cmap.type, char.rescale = char.rescale, legend=F)
dev.off()
if (!is.na(output.dataset)) {
V.GCT <- m2
colnames(V.GCT) <- sample.names2
row.names(V.GCT) <- model.names2
write.gct(gct.data.frame = V.GCT, descs = model.descs2, filename =output.dataset)
}
}
OPAM.sort.projection.by.score.5 <- function(
input.ds,
input.cls,
tissue = "NA",
results.dir,
normalize.score = T,
normalization.type = "zero.one",
model = "NA",
target.phen = NA,
target.class = NA,
user.colors = NA,
decreasing.order = T,
output.dataset = NA,
char.rescale = 1,
cmap.type = 3,
row.norm = T,
u.gene.names.known = "NA"
)
# Calls MSIG.HeatMapPlot.9 and makes a plot sorted by the highest-scoring
# signatures and abnormalities (gene mutations or copy number alterations)
# i.e. doesn't require a "model" to score by as OPAM.sort.projection.by.score.2 does.
# However, it *will* use "model" if it cannot calculate p-values on the gene signatures, which
# happens when every cell line has a genomic aberration.
#
# Runs 3 passes on the data:
# 1st pass: looks at the genes and copy number alterations specified by u.gene.names.known
# 2nd pass: looks at only the top abnormalities (using a p-value cutoff) from the 1st pass, and adjusts
# the PATHWAY.MUT vector accordingly (only according to the genes, not by copy number data)
# 3rd pass: Takes the winning signature from the 2nd pass and then looks all the genes available
#
# Very similar to OPAM.sort.projection.by.score.4, however this version uses rec.area instead of
# roc.area to calculate REC/ROC scores and p-values for PATHWAY.MUT, the vector of total genomic aberrations
# in all samples
{
library(gtools)
library(verification)
library(ROCR)
library(MASS)
library(RColorBrewer)
library(heatmap.plus)
dataset <- MSIG.Gct2Frame(filename = input.ds) # Read gene expression dataset (GCT format)
m <- data.matrix(dataset$ds)
model.names <- dataset$row.names
# model.descs <- dataset$descs
Ns <- length(m[1,])
dim(m)
sample.names <- dataset$names
n.models <- length(m[,1])
temp <- strsplit(input.ds, split="/") # Extract test file name
s <- length(temp[[1]])
test.file.name <- temp[[1]][s]
temp <- strsplit(test.file.name, split=".gct")
test.file.prefix <- temp[[1]][1]
char.res <- 0.013 * n.models + 0.65
# normalize scores
if (normalize.score == T) {
if (normalization.type == "zero.one") {
for (i in 1:n.models) {
m[i,] <- (m[i,] - min(m[i,]))/(max(m[i,]) - min(m[i,]))
}
} else if (normalization.type == "z.score") {
for (i in 1:n.models) {
m[i,] <- (m[i,] - mean(m[i,]))/sd(m[i,])
}
} else if (normalization.type == "r.z.score") {
for (i in 1:n.models) {
m[i,] <- (m[i,] - median(m[i,]))/mad(m[i,])
}
}
}
CLS <- MSIG.ReadPhenFile.2(file = input.cls) # Read phenotype file (CLS format)
cls.labels <- CLS$class.v
cls.phen <- CLS$phen
cls.list <- CLS$class.list
if (is.vector(cls.labels)) {
n.phen <- 1
} else {
n.phen <- length(cls.labels[,1])
}
if (!is.na(user.colors[1])) {
c.test <- user.colors
} else {
if (!is.null(CLS$col.phen)) {
c.test <- CLS$col.phen
} else {
c.test <- c(brewer.pal(n=7, name="Set2"), brewer.pal(n=7, name="Dark2"), brewer.pal(n=7, name="Set1"),
brewer.pal(n=8, name="Accent"), brewer.pal(n=9, name="Spectral"), brewer.pal(n=8, name="Set3"),
brewer.pal(n=8, name="BuGn"),
brewer.pal(n=7, name="Set2"), brewer.pal(n=7, name="Dark2"), brewer.pal(n=7, name="Set1"),
brewer.pal(n=8, name="Accent"), brewer.pal(n=10, name="Spectral"), brewer.pal(n=8, name="Set3"),
brewer.pal(n=8, name="BuGn"),
brewer.pal(n=7, name="Set2"), brewer.pal(n=7, name="Dark2"), brewer.pal(n=7, name="Set1"),
brewer.pal(n=8, name="Accent"), brewer.pal(n=10, name="Spectral"), brewer.pal(n=8, name="Set3"),
brewer.pal(n=8, name="BuGn"))
}
}
if (!is.null(CLS$phen.names)) {
phen.names <- CLS$phen.names
} else {
phen.names <- "NA"
}
cls.phen.index <- unlist(cls.phen)
cls.phen.colors <- c.test[1:length(cls.phen.index)]
# print("cls.phen.colors:")
# print(cls.phen.colors)
n.classes <- vector(length=n.phen, mode="numeric")
if (n.phen == 1) {
max.classes <- length(cls.phen)
n.classes[1] <- max.classes
} else {
max.classes <- max(unlist(lapply(cls.phen, FUN=length)))
for (i in 1:n.phen) {
n.classes[i] <- length(cls.phen[[i]])
}
}
# model.names.original = model.names
# m.original = m
phen.pass1 = c( "PATHWAY.MUT", u.gene.names.known)
n.phen.pass1 = length(u.gene.names.known)+1
ind.phen.pass1 = which( phen.names %in% phen.pass1 )
phen.pass1 = phen.names[ind.phen.pass1]
roc.list.pass1 = vector( length=n.models, mode="numeric" )
p.val.list.pass1 = vector( length=n.models, mode="numeric" )
cls.list.pass1 = cls.list[ind.phen.pass1,]
cls.list.pass1.2 = ifelse(cls.list.pass1 == "WT", 0, 1)
cls.labels.pass1 = cls.labels[ind.phen.pass1,]
# browser()
if (!is.na(target.phen)) {
bin.class.pass1 = apply( cls.list.pass1.2[-1,], MARGIN=2, FUN=sum)
# Normalize bin.class.pass1
if( length(unique(bin.class.pass1)) > 1){
bin.class.pass1 = ( bin.class.pass1 - min(bin.class.pass1))/(max(bin.class.pass1) - min(bin.class.pass1))
} else if ( length(unique(bin.class.pass1)) > 1){
bin.class = rep(1, length(cls.list[1,]))
}
# bin.class.pass1 <- ifelse(cls.list[target.phen,] == target.class, 1, 0)
# bin.class.pass1 <- ifelse(apply( cls.list.pass1.2[2:n.phen.pass1,], MARGIN=2, FUN=sum) > 0, 1, 0)
# cls.labels.pass1[1,] = bin.class.pass1
cls.list.pass1[1,] = ifelse(bin.class.pass1 > 0, "MUT", "WT")
# if( length(unique(bin.class.pass1)) == 1) {
# cls.list.3 = ifelse( cls.list == "DEL" | cls.list == "AMP", 1, 0)
# copy.number.pass1 = ifelse( apply(cls.list.3[3:n.phen,], MARGIN=2, FUN=sum) >= 1, "ALT", "WT")
# copy.class.pass1 = ifelse( copy.number.pass1 == "ALT", 1, 0)
# bin.class.pass1 = copy.class.pass1
# print( "Calculating p-value with respect to copy number alterations")
# }
} else {
bin.class.pass1 <- ifelse(cls.list[1,] == cls.list2[1,1], 1, 0)
}
# browser()
model.descs2.pass1 = vector(length = n.models, mode="character")
for (i in 1:n.models) {
m.score <- m[i,]
m.score.norm <- (m.score - min(m.score))/(max(m.score) - min(m.score))
# browser()
if (length(unique(bin.class.pass1)) > 1) {
perf.auc <- rec.area(bin.class.pass1, m.score.norm)
roc <- signif(perf.auc$A, digits=3)
p.val <- signif(perf.auc$p.value, digits=3)
roc.list.pass1[i] = perf.auc$A
p.val.list.pass1[i] = perf.auc$p.value
} else {
roc <- p.val <- "-"
roc.list.pass1[i] = NA
p.val.list.pass1[i] = NA
}
# print(paste("REC=", roc, " p-val=", p.val))
model.descs2.pass1[i] <- paste(roc, " (", p.val, ")")
}
# browser()
if( is.na(roc.list.pass1[1]) ){
loc <- match(model, model.names)
s.order.pass1 <- order(m[loc,], decreasing = decreasing.order)
# loc = s.order.pass1[1]
m2.pass1 <- m[, s.order.pass1]
correl <- cor(t(m2.pass1))[, loc]
m.order.pass1 <- order(correl, decreasing=T)
m2.pass1 <- m2.pass1[m.order.pass1, ]
} else{
m.order.pass1 = order(roc.list.pass1, decreasing=TRUE, na.last=TRUE)
# m.order.pass1 = order(p.val.list.pass1, decreasing=FALSE, na.last=TRUE)
m2.pass1 <- m[m.order.pass1, ]
s.order.pass1 <- order(m2.pass1[1,], decreasing = TRUE )
m2.pass1 <- m2.pass1[, s.order.pass1]
}
# m2.pass1 <- m2.pass1[m.order.pass1, ]
model.descs2.pass1 = model.descs2.pass1[m.order.pass1]
sample.names2.pass1 <- colnames(m2.pass1)
model.names.pass1 <- rownames(m2.pass1)
# browser()
if (is.vector(cls.labels)) {
cls.labels2.pass1 <- cls.labels.pass1[s.order.pass1]
cls.list2.pass1 <- cls.list.pass1[s.order.pass1]
} else {
cls.labels2.pass1 <- cls.labels.pass1[, s.order.pass1]
cls.list2.pass1 <- cls.list.pass1[, s.order.pass1]
}
#browser()
# browser()
m.score.pass1 <- m2.pass1[1,]
m.score.norm.pass1 <- (m.score.pass1 - min(m.score.pass1))/(max(m.score.pass1) - min(m.score.pass1))
roc.list.phen.pass1 = vector(mode="numeric", length=n.phen)
p.val.list.phen.pass1 = vector(mode="numeric", length=n.phen)
phen.descs.pass1 = vector(mode="character", length=n.phen)
for( i in 1:n.phen.pass1 ){
bin.gene = ifelse( cls.list2.pass1[i,]=="WT", 0, 1)
if (length(unique(bin.gene)) > 1) {
perf.auc <- roc.area(bin.gene, m.score.norm.pass1)
# if( perf.auc$A < 0.5 ){
## browser()
# roc = signif(1 - perf.auc$A, digits=3)
# p.val = signif(1- perf.auc$p.val, digits=3)
# abnormality = unique(cls.list2.pass1[i,])[which(unique(cls.list2.pass1[i,]) != "WT")]
## cls.list2.pass1[i,] = ifelse( cls.list2.pass1[i,] == "WT", abnormality, "WT" )
# phen.names[i] = paste(phen.names[i], "-opposite", sep="")
# roc.list.phen.pass1[i] = 1 - perf.auc$A
# p.val.list.phen.pass1[i] = perf.auc$p.val # Don't want to use these "opposite" genomic aberrations in Pass 2
# # because they make PATHWAY.MUT+COPY.NUMBER too dense
# } else{
roc <- signif(perf.auc$A, digits=3)
p.val <- signif(perf.auc$p.value, digits=3)
roc.list.phen.pass1[i] = perf.auc$A
p.val.list.phen.pass1[i] = perf.auc$p.val
# }
} else {
roc <- "-"
p.val <- "-"
roc.list.phen.pass1[i] = NA
p.val.list.phen.pass1[i] = NA
}
# print(paste("ROC=", roc, " p-val=", p.val))
# p.val.list[i] = p.val
phen.descs.pass1[i] <- paste(roc, " (", p.val, ")")
}
# browser()
g.order.pass1 = c(1, order(roc.list.phen.pass1[2:n.phen.pass1], decreasing=TRUE, na.last=TRUE)+1) # keep PATHWAY.MUT and COPY.NUMBER as 1 and 2
roc.list.phen.pass1 = roc.list.phen.pass1[g.order.pass1]
p.val.list.phen.pass1 = p.val.list.phen.pass1[g.order.pass1]
phen.descs2.pass1 = phen.descs.pass1[g.order.pass1][1:n.phen.pass1]
cls.list2.pass1 = cls.list2.pass1[g.order.pass1,][1:n.phen.pass1,]
phen.names.pass1 = phen.pass1[g.order.pass1][1:n.phen.pass1]
# Recompute cls.list2 as some mutations or copy numbers may have been removed
# Recompute cls.phen and cls.labels2 as order may have changed
cls.phen2.pass1 <- NULL
if (is.vector(cls.labels)) {
classes <- unique(cls.list2.pass1)
cls.phen2.pass1 <- classes
cls.labels2.pass1 <- match(cls.list2.pass1, cls.phen2.pass1)
} else {
for (kk in 1:length(cls.list2.pass1[, 1])) {
classes <- unique(cls.list2.pass1[kk,])
# cls.phen2[[kk]] <- classes
cls.phen2.pass1 <- c(cls.phen2.pass1, classes)
cls.labels2.pass1[kk,] <- match(cls.list2.pass1[kk,], classes)
}
}
cls.labels2.pass1 = cls.labels2.pass1[1:n.phen.pass1,]
# browser()
# correl <- cor(t(m2))[, loc]
# m.order <- order(correl, decreasing=decreasing.order)
# correl2 <- correl[m.order]
# model.descs2 <- paste(model.descs[m.order], signif(correl2, digits=3))
phen.list.pass1 <- unlist(cls.phen2.pass1)
# colors.list <- ifelse(unlist(cls.phen2) == target.class,
# ifelse(unlist(cls.phen2) == "DEL" | unlist(cls.phen2) == "AMP",
# ifelse(unlist(cls.phen2) == "DEL", cls.phen.colors[3], cls.phen.colors[4]), cls.phen.colors[1]), cls.phen.colors[2])
colors.list.pass1 = rep( "gray", length(phen.list.pass1))
colors.list.pass1[phen.list.pass1=="MUT"] = cls.phen.colors[1]
colors.list.pass1[phen.list.pass1=="DEL"] = cls.phen.colors[3]
colors.list.pass1[phen.list.pass1=="AMP"] = cls.phen.colors[4]
colors.list.pass1[phen.list.pass1=="ALT"] = cls.phen.colors[5]
# print("cls.phen2:")
# print(unlist(cls.phen2))
#
# print("cls.phen:")
# print(unlist(cls.phen))
#
# print("colors.list:")
# print(colors.list)
# browser()
filename <- paste(results.dir, test.file.prefix, ".3Passes.REC_ks", sep="")
# pdf(file=paste(filename, ".pdf", sep=""), height = 11, width = 9)
pdf(file=paste(filename, ".pdf", sep=""), height = 11, width = 17 )
# windows(width=12, height=8)
MSIG.HeatMapPlot.9(V = m2.pass1, row.names = model.names.pass1,
row.names2 = model.descs2.pass1,
col.labels = cls.labels2.pass1,
col.classes = cls.phen2.pass1,
phen.cmap = colors.list.pass1, phen.names = phen.names.pass1,
phen.names2 = phen.descs2.pass1,
col.names = sample.names2.pass1, main = paste(tissue, "- 1st Pass: Known KRAS Pathway Abnormalities (REC)"),
xlab=" ", ylab=" ", row.norm = row.norm,
cmap.type = cmap.type, char.rescale = char.rescale, legend=F)
### Begin Pass 2 ###
print( "--- Begin Pass 2 ---")
# browser()
p.val.threshold = 0.1
ind.top.pval = which(p.val.list.phen.pass1[2:n.phen.pass1] <= p.val.threshold )+1
if( length(ind.top.pval) > 0 ){
ind.p.val.threshold = c(1, ind.top.pval)
} else if( length(ind.top.pval) < 1 ) {
p.val.threshold = 0.15
ind.top.pval = which(p.val.list.phen.pass1[2:n.phen.pass1] <= p.val.threshold )+1
ind.p.val.threshold = c(1, ind.top.pval) }
if( length(ind.top.pval) < 1){
p.val.threshold = 0.2
ind.top.pval = which(p.val.list.phen.pass1[2:n.phen.pass1] <= p.val.threshold )+1
ind.p.val.threshold = c(1, ind.top.pval)
}
if( length(ind.top.pval) < 1){
p.val.threshold = 0.25
ind.top.pval = which(p.val.list.phen.pass1[2:n.phen.pass1] <= p.val.threshold )+1
ind.p.val.threshold = c(1, ind.top.pval)
}
if( length(ind.top.pval) < 1){
p.val.threshold = 0.3
ind.top.pval = which(p.val.list.phen.pass1[2:n.phen.pass1] <= p.val.threshold )+1
ind.p.val.threshold = c(1, ind.top.pval)
}
if( length( ind.top.pval) < 1 ) {
ind.top = which(!is.na(p.val.list.phen.pass1[-1]))+1
ind.p.val.threshold = c( 1, ind.top )
}
n.phen.pass2 = length(ind.p.val.threshold)
# phen.descs.pass2 = phen.descs2.pass1[1:n.phen.pass2]
cls.list2.pass2 = cls.list2.pass1[ind.p.val.threshold,]
phen.names.pass2 = phen.names.pass1[ind.p.val.threshold]
# phen.names.pass2[1] = "PATHWAY.MUT + COPY.NUMBER"
cls.labels2.pass2 = cls.labels2.pass1[ind.p.val.threshold,]
# browser()
cls.list2.pass2.2 = ifelse( cls.list2.pass2 == "WT", 0, 1)
cls.list2.pass2.3 = ifelse( cls.list2.pass2 == "DEL" | cls.list2.pass2 == "AMP", 1, 0)
if( n.phen.pass2 > 2 ){
pathway.mut.pass2 = apply(cls.list2.pass2.2[2:n.phen.pass2,], MARGIN=2, FUN=sum)
bin.class.pass2 = pathway.mut.pass2/length(pathway.mut.pass2)
bin.class.pass2 = ( bin.class.pass2 - min(bin.class.pass2))/(max(bin.class.pass2) - min(bin.class.pass2))
cls.list2.pass2[1,] = ifelse( bin.class.pass2 > 0, "MUT", "WT")
# copy.number.pass2 = ifelse( apply(cls.list2.pass2.3[3:n.phen.pass2,], MARGIN=2, FUN=sum) >= 1, "ALT", "WT")
} else{
pathway.mut.pass2 = ifelse( cls.list2.pass2.2[2,] == 1, "MUT", "WT")
bin.class.pass2 = ifelse( pathway.mut.pass2 == "MUT", 1, 0 )
cls.list2.pass2[1,] = pathway.mut.pass2
# copy.number.pass2 = ifelse( cls.list2.pass2.3[3,] == 1, "ALT", "WT")
}
# browser()
# bin.class.pass2 =
# copy.class.pass2 = ifelse( copy.number.pass2 == "ALT", 1, 0)
# if( length(unique(bin.class.pass2)) == 1) {
# bin.class.pass2 = copy.class.pass2
# print( "Calculating p-value with respect to copy number alterations")
# }
# cls.list2.pass2[2,] = copy.number.pass2
roc.list.pass2 = vector( length=n.models, mode="numeric" )
p.val.list.pass2 = vector( length=n.models, mode="numeric" )
# browser()
model.descs2.pass2 = vector(length = n.models, mode="character")
for (i in 1:n.models) {
m.score <- m2.pass1[i,]
m.score.norm <- (m.score - min(m.score))/(max(m.score) - min(m.score))
# browser()
if (length(unique(bin.class.pass2)) > 1) {
perf.auc <- rec.area(bin.class.pass2, m.score.norm)
roc <- signif(perf.auc$A, digits=3)
p.val <- signif(perf.auc$p.value, digits=3)
roc.list.pass2[i] = perf.auc$A
p.val.list.pass2[i] = perf.auc$p.value
} else {
roc <- p.val <- "-"
roc.list.pass2[i] = NA
p.val.list.pass2[i] = NA
}
print(paste("REC=", roc, " p-val=", p.val))
model.descs2.pass2[i] <- paste(roc, " (", p.val, ")")
}
# browser()
m.order.pass2 = order(roc.list.pass2, decreasing=TRUE, na.last=TRUE)
# m.order.pass2 = order(p.val.list.pass2, decreasing=FALSE, na.last=TRUE)
model.descs2.pass2 = model.descs2.pass2[m.order.pass2]
# loc.pass2 = m.order.pass2[1]
m2.pass2 <- m2.pass1[m.order.pass2, ]
model.names.pass2 <- rownames(m2.pass2)
# print(c("loc.pass2:", loc.pass2))
s.order.pass2 <- order(m2.pass2[1,], decreasing = TRUE)
m2.pass2 <- m2.pass2[, s.order.pass2]
sample.names2.pass2 <- colnames(m2.pass2)
if (is.vector(cls.labels)) {
cls.labels2.pass2 <- cls.labels2.pass2[s.order.pass2]
cls.list2.pass2 <- cls.list2.pass2[s.order.pass2]
} else {
cls.labels2.pass2 <- cls.labels2.pass2[, s.order.pass2]
cls.list2.pass2 <- cls.list2.pass2[, s.order.pass2]
}
# browser()
# browser()
m.score.pass2 <- m2.pass2[1,]
m.score.norm.pass2 <- (m.score.pass2 - min(m.score.pass2))/(max(m.score.pass2) - min(m.score.pass2))
roc.list.phen.pass2 = vector(mode="numeric", length=n.phen.pass2)
phen.descs.pass2 = vector(mode="character", length=n.phen.pass2)
for( i in 1:n.phen.pass2 ){
bin.gene = ifelse( cls.list2.pass2[i,]=="WT", 0, 1)
if (length(unique(bin.gene)) > 1) {
perf.auc <- roc.area(bin.gene, m.score.norm.pass2)
# if( perf.auc$A < 0.5 ){
## browser()
# roc = signif(1 - perf.auc$A, digits=3)
# p.val = signif(1 - perf.auc$A, digits=3)
# abnormality = unique(cls.list2.pass2[i,])[which(unique(cls.list2.pass2[i,]) != "WT")]
# cls.list2.pass2 = ifelse( cls.list2.pass2[i,] == "WT", abnormality, "WT" )
# phen.names.pass2[i] = paste(phen.names.pass2[i], "-opposite")
# } else{
roc <- signif(perf.auc$A, digits=3)
p.val <- signif(perf.auc$p.value, digits=3)
roc.list.phen.pass2[i] = perf.auc$A
# }
} else {
roc <- "-"
p.val <- "-"
roc.list.phen.pass2[i] = NA
}
print(paste("ROC=", roc, " p-val=", p.val))
# p.val.list[i] = p.val
phen.descs.pass2[i] <- paste(roc, " (", p.val, ")")
}
# browser()
g.order.pass2 = c(1, order(roc.list.phen.pass2[2:n.phen.pass2], decreasing=TRUE, na.last=TRUE)+1) # skip PATHWAY.MUT
phen.descs2.pass2 = phen.descs.pass2[g.order.pass2]
cls.list2.pass2 = cls.list2.pass2[g.order.pass2,]
phen.names.pass2 = phen.names.pass2[g.order.pass2]
# browser()
# Recompute cls.list2 as some mutations or copy numbers may have been removed
# Recompute cls.phen and cls.labels2 as order may have changed
cls.phen2.pass2 <- NULL
if (is.vector(cls.labels)) {
classes <- unique(as.vector(cls.list2.pass2))
cls.phen2.pass2 <- classes
cls.labels2.pass2 <- match(cls.list2.pass2, cls.phen2.pass2)
} else {
for (kk in 1:length(cls.list2.pass2[, 1])) {
classes <- unique(cls.list2.pass2[kk,])
# cls.phen2[[kk]] <- classes
cls.phen2.pass2 <- c(cls.phen2.pass2, classes)
cls.labels2.pass2[kk,] <- match(cls.list2.pass2[kk,], classes)
}
}
cls.labels2.pass2 = cls.labels2.pass2[1:n.phen.pass2,]
# browser()
# correl <- cor(t(m2))[, loc]
# m.order <- order(correl, decreasing=decreasing.order)
# correl2 <- correl[m.order]
# model.descs2 <- paste(model.descs[m.order], signif(correl2, digits=3))
phen.list.pass2 <- unlist(cls.phen2.pass2)
# colors.list <- ifelse(unlist(cls.phen2) == target.class,
# ifelse(unlist(cls.phen2) == "DEL" | unlist(cls.phen2) == "AMP",
# ifelse(unlist(cls.phen2) == "DEL", cls.phen.colors[3], cls.phen.colors[4]), cls.phen.colors[1]), cls.phen.colors[2])
colors.list.pass2 = rep( "gray", length(phen.list.pass2))
colors.list.pass2[phen.list.pass2=="MUT"] = cls.phen.colors[1]
colors.list.pass2[phen.list.pass2=="DEL"] = cls.phen.colors[3]
colors.list.pass2[phen.list.pass2=="AMP"] = cls.phen.colors[4]
colors.list.pass2[phen.list.pass2=="ALT"] = cls.phen.colors[5]
MSIG.HeatMapPlot.9(V = m2.pass2, row.names = model.names.pass2,
row.names2 = model.descs2.pass2,
col.labels = cls.labels2.pass2,
col.classes = cls.phen2.pass2,
phen.cmap = colors.list.pass2, phen.names = phen.names.pass2,
phen.names2 = phen.descs2.pass2,
col.names = sample.names2.pass2, main = paste(tissue, "- 2nd Pass: only p-values <=", p.val.threshold,"from 1st pass (REC)"),
xlab=" ", ylab=" ", row.norm = row.norm,
cmap.type = cmap.type, char.rescale = char.rescale, legend=F)
### 3rd Pass ###
print( "--- Begin Pass 3 ---")
# browser()
m2.pass3 = m2.pass2
model.names.pass3 = rownames(m2.pass3)
sample.names2.pass3 = colnames(m2.pass3)
# model.descs2.pass3 = model.descs2.pass2
n.phen.pass3 = 40
# phen.descs.pass2 = phen.descs2.pass1[1:n.phen.pass2]
cls.list2.pass3 = cls.list[, s.order.pass1][, s.order.pass2]
cls.labels2.pass3 = cls.labels[, s.order.pass1][, s.order.pass2]
# browser()
phen.names.pass3 = phen.names
m.score.pass3 <- m2.pass3[1,]
m.score.norm.pass3 <- (m.score.pass3 - min(m.score.pass3))/(max(m.score.pass3) - min(m.score.pass3))
roc.list.phen.pass3 = vector(mode="numeric", length=n.phen)
phen.descs.pass3 = vector(mode="character", length=n.phen)
p.val.list.phen.pass3 = vector(mode="numeric", length=n.phen)
for( i in 1:n.phen ){
bin.gene = ifelse( cls.list2.pass3[i,]=="WT", 0, 1)
if (length(unique(bin.gene)) > 1) {
perf.auc <- roc.area(bin.gene, m.score.norm.pass3)
# if( perf.auc$A < 0.5 ){
## browser()
# roc = signif(1 - perf.auc$A, digits=3)
# p.val = signif(1- perf.auc$p.value, digits=3)
# abnormality = unique(cls.list2.pass3[i,])[which(unique(cls.list2.pass3[i,]) != "WT")]
# cls.list2.pass3[i,] = ifelse( cls.list2.pass3[i,] == "WT", abnormality, "WT" )
# phen.names.pass3[i] = paste(phen.names.pass3[i], "-opposite", sep="")
# roc.list.phen.pass3[i] = 1-perf.auc$A
# p.val.list.phen.pass3[i] = 1- perf.auc$p.value
# } else{
roc <- signif(perf.auc$A, digits=3)
p.val <- signif(perf.auc$p.value, digits=3)
roc.list.phen.pass3[i] = perf.auc$A
p.val.list.phen.pass3[i] = perf.auc$p.value
# }
} else {
roc <- NA
p.val <- NA
roc.list.phen.pass3[i] = NA
p.val.list.phen.pass3[i] = NA
}
# print(paste("ROC=", roc, " p-val=", p.val))
# p.val.list[i] = p.val
phen.descs.pass3[i] <- paste(roc, " (", p.val, ")")
}
# browser()
p.val.threshold = 0.1
len = length(which(p.val.list.phen.pass3[-1:-2] <= p.val.threshold))+2
if( len == 2 ){
p.val.threshold = 0.15
len = length(which(p.val.list.phen.pass3[-1:-2] <= p.val.threshold))+2
}
if( len == 2 ){
p.val.threshold = 0.2
len = length(which(p.val.list.phen.pass3[-1:-2] <= p.val.threshold))+2
}
if( len>40 ) len = 40
# g.order.pass3.1 = c(1, 2, order(p.val.list.phen.pass3[3:n.phen], decreasing=FALSE, na.last=TRUE)+2 )
g.order.pass3 = c(1, 2, order(p.val.list.phen.pass3[-1:-2], decreasing=FALSE, na.last=TRUE)+2 )[1:len] # skip PATHWAY.MUT and COPY.NUMBER
phen.descs2.pass3 = phen.descs.pass3[g.order.pass3]
cls.list2.pass3 = cls.list2.pass3[g.order.pass3,]
cls.labels2.pass3 = cls.labels2.pass3[g.order.pass3,]
phen.names.pass3 = phen.names.pass3[g.order.pass3]
cls.list.mut = ifelse(cls.list2.pass3[-1:-2,] == "MUT", 1, 0)
cls.list.alt = ifelse(cls.list2.pass3[-1:-2,] == "DEL" | cls.list2.pass3[-1:-2,] == "AMP", 1, 0)
# browser()
if( !is.vector(cls.list.alt) ){
cls.list.mut.sum = apply(cls.list.mut, MARGIN=2, FUN=sum)
cls.list.alt.sum = apply(cls.list.alt, MARGIN=2, FUN=sum)
bin.class.pass3 = cls.list.mut.sum + cls.list.alt.sum
bin.class.pass3 = ( bin.class.pass3 - min(bin.class.pass3))/(max(bin.class.pass3) - min(bin.class.pass3))
cls.list.mut.sum = ifelse(cls.list.mut.sum + cls.list.alt.sum > 0, 1, 0)
cls.list2.pass3[1,] = ifelse( cls.list.mut.sum >= 1, "MUT", "WT")
cls.list2.pass3[2,] = ifelse( cls.list.alt.sum >= 1, "ALT", "WT")
} else{
cls.list2.pass3[2,] = ifelse(cls.list.alt == 1, "ALT", "WT")
bin.class.pass3 = cls.list.mut+cls.list.alt
bin.class.pass3 = ( bin.class.pass3 - min(bin.class.pass3))/(max(bin.class.pass3) - min(bin.class.pass3))
cls.list2.pass3[1,] = ifelse(bin.class.pass3 > 0 , "MUT", "WT")
}
# browser()
for( i in 1:2 ){ # Recalculate ROC and p-value for PATHWAY.MUT and COPY.NUMBER
bin.gene = ifelse( cls.list2.pass3[i,]=="WT", 0, 1)
if (length(unique(bin.gene)) > 1) {
perf.auc <- rec.area(bin.gene, m.score.norm.pass3)
# if( perf.auc$A < 0.5 ){
## browser()
# roc = signif(1 - perf.auc$A, digits=3)
# p.val = signif(1- perf.auc$p.value, digits=3)
# abnormality = unique(cls.list2.pass3[i,])[which(unique(cls.list2.pass3[i,]) != "WT")]
# cls.list2.pass3[i,] = ifelse( cls.list2.pass3[i,] == "WT", abnormality, "WT" )
# phen.names.pass3[i] = paste(phen.names.pass3[i], "-opposite", sep="")
# roc.list.phen.pass3[i] = 1-perf.auc$A
# p.val.list.phen.pass3[i] = 1- perf.auc$p.value
# } else{
roc <- signif(perf.auc$A, digits=3)
p.val <- signif(perf.auc$p.value, digits=3)
roc.list.phen.pass3[i] = perf.auc$A
p.val.list.phen.pass3[i] = perf.auc$p.value
# }
} else {
roc <- NA
p.val <- NA
roc.list.phen.pass3[i] = NA
p.val.list.phen.pass3[i] = NA
}
print(paste("ROC=", roc, " p-val=", p.val))
# p.val.list[i] = p.val
phen.descs2.pass3[i] <- paste(roc, " (", p.val, ")")
}
# browser()
model.descs2.pass3 = vector(length = n.models, mode="character")
for (i in 1:n.models) {
m.score <- m2.pass3[i,]
m.score.norm <- (m.score - min(m.score))/(max(m.score) - min(m.score))
# browser()
if (length(unique(bin.class.pass3)) > 1) {
perf.auc <- rec.area(bin.class.pass3, m.score.norm)
roc <- signif(perf.auc$A, digits=3)
p.val <- signif(perf.auc$p.value, digits=3)
roc.list.pass2[i] = perf.auc$A
p.val.list.pass2[i] = perf.auc$p.value
} else {
roc <- p.val <- "-"
roc.list.pass2[i] = NA
p.val.list.pass2[i] = NA
}
print(paste("REC=", roc, " p-val=", p.val))
model.descs2.pass3[i] <- paste(roc, " (", p.val, ")")
}
cls.phen2.pass3 <- NULL
if (is.vector(cls.labels)) {
classes <- unique(as.vector(cls.list2.pass3))
cls.phen2.pass3 <- classes
cls.labels2.pass3 <- match(cls.list2.pass3, cls.phen2.pass3)
} else {
# browser()
for (kk in 1:length(cls.list2.pass3[, 1])) {
# browser()
classes <- unique(cls.list2.pass3[kk,])
# cls.phen2[[kk]] <- classes
cls.phen2.pass3 <- c(cls.phen2.pass3, classes)
cls.labels2.pass3[kk,] <- match(cls.list2.pass3[kk,], classes)
}
}
# cls.labels2.pass3 = cls.labels2.pass3[1:n.phen.pass3,]
# browser()
# correl <- cor(t(m2))[, loc]
# m.order <- order(correl, decreasing=decreasing.order)
# correl2 <- correl[m.order]
# model.descs2 <- paste(model.descs[m.order], signif(correl2, digits=3))
phen.list.pass3 <- unlist(cls.phen2.pass3)
# colors.list <- ifelse(unlist(cls.phen2) == target.class,
# ifelse(unlist(cls.phen2) == "DEL" | unlist(cls.phen2) == "AMP",
# ifelse(unlist(cls.phen2) == "DEL", cls.phen.colors[3], cls.phen.colors[4]), cls.phen.colors[1]), cls.phen.colors[2])
colors.list.pass3 = rep( "gray", length(phen.list.pass3))
colors.list.pass3[phen.list.pass3=="MUT"] = cls.phen.colors[1]
colors.list.pass3[phen.list.pass3=="DEL"] = cls.phen.colors[3]
colors.list.pass3[phen.list.pass3=="AMP"] = cls.phen.colors[4]
colors.list.pass3[phen.list.pass3=="ALT"] = cls.phen.colors[5]
phen.names.pass3[1] = "PATHWAY.MUT+COPY.NUMBER"
# browser()
MSIG.HeatMapPlot.9(V = m2.pass3, row.names = model.names.pass3,
row.names2 = model.descs2.pass3,
col.labels = cls.labels2.pass3,
col.classes = cls.phen2.pass3,
phen.cmap = colors.list.pass3, phen.names = phen.names.pass3,
phen.names2 = phen.descs2.pass3,
col.names = sample.names2.pass3, main = paste(tissue, "- 3rd Pass: Top signature from 2nd pass with all genes ( p-value <=", p.val.threshold, ") (REC)"),
xlab=" ", ylab=" ", row.norm = row.norm,
cmap.type = cmap.type, char.rescale = char.rescale, legend=F)
dev.off()
if (!is.na(output.dataset)) {
V.GCT <- m2
colnames(V.GCT) <- sample.names2
row.names(V.GCT) <- model.names2
write.gct(gct.data.frame = V.GCT, descs = model.descs2, filename =output.dataset)
}
}
OPAM.sort.projection.by.score.6 <- function(
input.ds,
input.cls,
tissue = "NA",
results.dir,
normalize.score = T,
normalization.type = "zero.one",
model = "NA",
target.phen = NA,
target.class = NA,
user.colors = NA,
decreasing.order = T,
output.dataset = NA,
char.rescale = 1,
cmap.type = 3,
row.norm = T,
u.gene.names.known = "NA"
)
# Calls MSIG.HeatMapPlot.9 and makes a plot sorted by the highest-scoring
# signatures and abnormalities (gene mutations or copy number alterations)
# i.e. doesn't require a "model" to score by as OPAM.sort.projection.by.score.2 does.
# However, it *will* use "model" if it cannot calculate p-values on the gene signatures, which
# happens when every cell line has a genomic aberration.
#
# Runs 3 passes on the data:
# 1st pass: looks at the genes and copy number alterations specified by u.gene.names.known
# 2nd pass: looks at only the top abnormalities (using a p-value cutoff) from the 1st pass, and adjusts
# the PATHWAY.MUT vector accordingly (only according to the genes, not by copy number data)
# 3rd pass: Takes the winning signature from the 2nd pass and then looks all the genes available
#
# Very similar to OPAM.sort.projection.by.score.4, however this version uses mutual.inf instead of
# roc.area to calculate mutual information scores and p-values for PATHWAY.MUT, the vector of total genomic aberrations
# in all samples
{
library(gtools)
library(verification)
library(ROCR)
library(MASS)
library(RColorBrewer)
library(heatmap.plus)
dataset <- MSIG.Gct2Frame(filename = input.ds) # Read gene expression dataset (GCT format)
m <- data.matrix(dataset$ds)
model.names <- dataset$row.names
# model.descs <- dataset$descs
Ns <- length(m[1,])
dim(m)
sample.names <- dataset$names
n.models <- length(m[,1])
temp <- strsplit(input.ds, split="/") # Extract test file name
s <- length(temp[[1]])
test.file.name <- temp[[1]][s]
temp <- strsplit(test.file.name, split=".gct")
test.file.prefix <- temp[[1]][1]
char.res <- 0.013 * n.models + 0.65
# normalize scores
if (normalize.score == T) {
if (normalization.type == "zero.one") {
for (i in 1:n.models) {
m[i,] <- (m[i,] - min(m[i,]))/(max(m[i,]) - min(m[i,]))
}
} else if (normalization.type == "z.score") {
for (i in 1:n.models) {
m[i,] <- (m[i,] - mean(m[i,]))/sd(m[i,])
}
} else if (normalization.type == "r.z.score") {
for (i in 1:n.models) {
m[i,] <- (m[i,] - median(m[i,]))/mad(m[i,])
}
}
}
CLS <- MSIG.ReadPhenFile.2(file = input.cls) # Read phenotype file (CLS format)
cls.labels <- CLS$class.v
cls.phen <- CLS$phen
cls.list <- CLS$class.list
if (is.vector(cls.labels)) {
n.phen <- 1
} else {
n.phen <- length(cls.labels[,1])
}
if (!is.na(user.colors[1])) {
c.test <- user.colors
} else {
if (!is.null(CLS$col.phen)) {
c.test <- CLS$col.phen
} else {
c.test <- c(brewer.pal(n=7, name="Set2"), brewer.pal(n=7, name="Dark2"), brewer.pal(n=7, name="Set1"),
brewer.pal(n=8, name="Accent"), brewer.pal(n=9, name="Spectral"), brewer.pal(n=8, name="Set3"),
brewer.pal(n=8, name="BuGn"),
brewer.pal(n=7, name="Set2"), brewer.pal(n=7, name="Dark2"), brewer.pal(n=7, name="Set1"),
brewer.pal(n=8, name="Accent"), brewer.pal(n=10, name="Spectral"), brewer.pal(n=8, name="Set3"),
brewer.pal(n=8, name="BuGn"),
brewer.pal(n=7, name="Set2"), brewer.pal(n=7, name="Dark2"), brewer.pal(n=7, name="Set1"),
brewer.pal(n=8, name="Accent"), brewer.pal(n=10, name="Spectral"), brewer.pal(n=8, name="Set3"),
brewer.pal(n=8, name="BuGn"))
}
}
if (!is.null(CLS$phen.names)) {
phen.names <- CLS$phen.names
} else {
phen.names <- "NA"
}
cls.phen.index <- unlist(cls.phen)
cls.phen.colors <- c.test[1:length(cls.phen.index)]
# print("cls.phen.colors:")
# print(cls.phen.colors)
n.classes <- vector(length=n.phen, mode="numeric")
if (n.phen == 1) {
max.classes <- length(cls.phen)
n.classes[1] <- max.classes
} else {
max.classes <- max(unlist(lapply(cls.phen, FUN=length)))
for (i in 1:n.phen) {
n.classes[i] <- length(cls.phen[[i]])
}
}
print("--- Begin Pass 1 ---")
# model.names.original = model.names
# m.original = m
phen.pass1 = c( "PATHWAY.MUT", u.gene.names.known)
n.phen.pass1 = length(u.gene.names.known)+1
ind.phen.pass1 = which( phen.names %in% phen.pass1 )
phen.pass1 = phen.names[ind.phen.pass1]
phen.pass1[1] = "SUMMARY"
MI.list.pass1 = vector( length=n.models, mode="numeric" )
# p.val.list.pass1 = vector( length=n.models, mode="numeric" )
cls.list.pass1 = cls.list[ind.phen.pass1,]
cls.list.pass1.2 = ifelse(cls.list.pass1 == "WT", 0, 1)
cls.labels.pass1 = cls.labels[ind.phen.pass1,]
browser()
if (!is.na(target.phen)) {
bin.class.pass1 = apply( cls.list.pass1.2[-1,], MARGIN=2, FUN=sum)
# Normalize bin.class.pass1
if( length(unique(bin.class.pass1)) > 1){
bin.class.pass1 = ( bin.class.pass1 - min(bin.class.pass1))/(max(bin.class.pass1) - min(bin.class.pass1))
} else if ( length(unique(bin.class.pass1)) == 1){
bin.class = rep(1, length(cls.list[1,]))
}
# bin.class.pass1 <- ifelse(cls.list[target.phen,] == target.class, 1, 0)
# bin.class.pass1 <- ifelse(apply( cls.list.pass1.2[2:n.phen.pass1,], MARGIN=2, FUN=sum) > 0, 1, 0)
# cls.labels.pass1[1,] = bin.class.pass1
cls.list.pass1[1,] = ifelse(bin.class.pass1 > 0, "MUT", "WT")
# if( length(unique(bin.class.pass1)) == 1) {
# cls.list.3 = ifelse( cls.list == "DEL" | cls.list == "AMP", 1, 0)
# copy.number.pass1 = ifelse( apply(cls.list.3[3:n.phen,], MARGIN=2, FUN=sum) >= 1, "ALT", "WT")
# copy.class.pass1 = ifelse( copy.number.pass1 == "ALT", 1, 0)
# bin.class.pass1 = copy.class.pass1
# print( "Calculating p-value with respect to copy number alterations")
# }
} else {
bin.class.pass1 <- ifelse(cls.list[1,] == cls.list2[1,1], 1, 0)
}
# browser()
# MI.ref.models.pass1 = mutual.inf.2(bin.class.pass1, bin.class.pass1)$MI
# print(paste("MI.ref.models.pass1 =", MI.ref.models.pass1))
# browser()
model.descs2.pass1 = vector(length = n.models, mode="character")
for (i in 1:n.models) {
m.score <- m[i,]
m.score.norm <- (m.score - min(m.score))/(max(m.score) - min(m.score))
# browser()
if (length(unique(bin.class.pass1)) > 1) {
# browser()
MI <- (mutual.inf.2(bin.class.pass1, m.score.norm)$MI)# /MI.ref.models.pass1
# roc <- signif(perf.auc$A, digits=3)
# p.val <- signif(perf.auc$p.value, digits=3)
MI.list.pass1[i] = MI
MI.signif <- signif(MI, digits=3)
# p.val.list.pass1[i] = perf.auc$p.value
} else {
MI.signif <- "-"
MI.list.pass1[i] = NA
# p.val.list.pass1[i] = NA
}
# browser()
# print(paste("REC=", roc, " p-val=", p.val))
print(paste( format(rownames(m)[i], width=30), "mutual.inf =", MI.signif))
# browser()
model.descs2.pass1[i] <- paste(MI.signif)
}
# browser()
if( is.na(MI.list.pass1[1]) ){
loc <- match(model, model.names)
s.order.pass1 <- order(m[loc,], decreasing = decreasing.order)
# loc = s.order.pass1[1]
# s.order.pass1 = 1:Ns
m2.pass1 <- m[, s.order.pass1]
correl <- cor(t(m2.pass1))[, loc]
m.order.pass1 <- order(correl, decreasing=T)
# m.order.pass1 = 1:n.models
m2.pass1 <- m2.pass1[m.order.pass1, ]
} else{
m.order.pass1 = order(MI.list.pass1, decreasing=TRUE, na.last=TRUE)
#m.order.pass1 = 1:n.models
m2.pass1 <- m[m.order.pass1, ]
s.order.pass1 <- order(m2.pass1[1,], decreasing = TRUE )
# s.order.pass1 = 1:Ns
m2.pass1 <- m2.pass1[, s.order.pass1]
}
bin.class.pass1 = bin.class.pass1[s.order.pass1]
m2.pass1 <- m2.pass1[m.order.pass1, ]
model.descs2.pass1 = model.descs2.pass1[m.order.pass1]
sample.names2.pass1 <- colnames(m2.pass1)
model.names.pass1 <- rownames(m2.pass1)
# browser()
if (is.vector(cls.labels)) {
cls.labels2.pass1 <- cls.labels.pass1[s.order.pass1]
cls.list2.pass1 <- cls.list.pass1[s.order.pass1]
} else {
cls.labels2.pass1 <- cls.labels.pass1[, s.order.pass1]
cls.list2.pass1 <- cls.list.pass1[, s.order.pass1]
}
#browser()
# pathway.name <- "KRAS_ALL_UP"
# pathway <- m[1,]
# pathway0 <- ifelse(pathway < median(pathway), 0, 1) # disctretized version
# MI.ref.genes.pass1 <- mutual.inf.2(m[1,], m[1,])$MI
# browser()
m.score.pass1 <- m2.pass1[1,]
m.score.norm.pass1 <- (m.score.pass1 - min(m.score.pass1))/(max(m.score.pass1) - min(m.score.pass1))
# m.score.pass1 = ifelse( m.score.pass1 < median(m.score.pass1), -1, 1) # discretized version
# MI.ref.genes.pass1 <- mutual.inf.2(m.score.norm.pass1, m.score.norm.pass1)$MI
# print(paste("MI.ref.genes.pass1 =", MI.ref.genes.pass1))
MI.list.phen.pass1 = vector(mode="numeric", length=n.phen.pass1)
# p.val.list.phen.pass1 = vector(mode="numeric", length=n.phen)
phen.descs.pass1 = vector(mode="character", length=n.phen.pass1)
if( length(unique(bin.class.pass1)) > 1){
MI <-(mutual.inf.2(bin.class.pass1, m.score.norm.pass1)$MI)#/MI.ref.genes.pass1
MI.signif <- signif(MI, digits=3)
MI.list.phen.pass1[1] = MI
} else{
MI.signif <- "-"
MI.list.phen.pass1[1] = NA
}
print(paste(format(phen.pass1[1], width=12), "mutual.inf =", MI.signif))
phen.descs.pass1[1] <- paste(MI.signif)
# print(m.score.pass1)
for( i in 2:n.phen.pass1 ){
# browser()
bin.gene = ifelse( cls.list2.pass1[i,]=="WT", 0, 1)
# add random noise so the quantile calculation in mutual.inf doesn't return 0
if (length(unique(bin.gene)) > 1) {
# print(bin.gene)
# browser()
MI <- (mutual.inf.2(bin.gene, m.score.norm.pass1)$MI)#/MI.ref.genes.pass1
MI.signif <- signif(MI, digits=3)
# if( perf.auc$A < 0.5 ){
## browser()
# roc = signif(1 - perf.auc$A, digits=3)
# p.val = signif(1- perf.auc$p.val, digits=3)
# abnormality = unique(cls.list2.pass1[i,])[which(unique(cls.list2.pass1[i,]) != "WT")]
## cls.list2.pass1[i,] = ifelse( cls.list2.pass1[i,] == "WT", abnormality, "WT" )
# phen.names[i] = paste(phen.names[i], "-opposite", sep="")
# roc.list.phen.pass1[i] = 1 - perf.auc$A
# p.val.list.phen.pass1[i] = perf.auc$p.val # Don't want to use these "opposite" genomic aberrations in Pass 2
# # because they make PATHWAY.MUT+COPY.NUMBER too dense
# } else{
# roc <- signif(perf.auc$A, digits=3)
# p.val <- signif(perf.auc$p.value, digits=3)
MI.list.phen.pass1[i] = MI
# p.val.list.phen.pass1[i] = perf.auc$p.val
# }
} else {
MI.signif <- "-"
# p.val <- "-"
MI.list.phen.pass1[i] = NA
# p.val.list.phen.pass1[i] = NA
}
# browser()
# print(paste("ROC=", roc, " p-val=", p.val))
print(paste(format(phen.pass1[i], width=12), "mutual.inf =", MI.signif))
# p.val.list[i] = p.val
phen.descs.pass1[i] <- paste(MI.signif)
}
# browser()
#g.order.pass1 = 1:n.phen.pass1
g.order.pass1 = c(1, order(MI.list.phen.pass1[2:n.phen.pass1], decreasing=TRUE, na.last=TRUE)+1) # keep PATHWAY.MUT as 1
# MI.list.phen.pass1 = MI.list.phen.pass1[g.order.pass1]
# p.val.list.phen.pass1 = p.val.list.phen.pass1[g.order.pass1]
MI.list.phen.pass1 = MI.list.phen.pass1[g.order.pass1]
phen.descs2.pass1 = phen.descs.pass1[g.order.pass1]#[1:n.phen.pass1]
cls.list2.pass1 = cls.list2.pass1[g.order.pass1,]#[1:n.phen.pass1,]
phen.names.pass1 = phen.pass1[g.order.pass1]#[1:n.phen.pass1]
# Recompute cls.list2 as some mutations or copy numbers may have been removed
# Recompute cls.phen and cls.labels2 as order may have changed
cls.phen2.pass1 <- NULL
if (is.vector(cls.labels)) {
classes <- unique(cls.list2.pass1)
cls.phen2.pass1 <- classes
cls.labels2.pass1 <- match(cls.list2.pass1, cls.phen2.pass1)
} else {
for (kk in 1:length(cls.list2.pass1[, 1])) {
classes <- unique(cls.list2.pass1[kk,])
# cls.phen2[[kk]] <- classes
cls.phen2.pass1 <- c(cls.phen2.pass1, classes)
cls.labels2.pass1[kk,] <- match(cls.list2.pass1[kk,], classes)
}
}
cls.labels2.pass1 = cls.labels2.pass1[1:n.phen.pass1,]
# browser()
# correl <- cor(t(m2))[, loc]
# m.order <- order(correl, decreasing=decreasing.order)
# correl2 <- correl[m.order]
# model.descs2 <- paste(model.descs[m.order], signif(correl2, digits=3))
phen.list.pass1 <- unlist(cls.phen2.pass1)
colors.list.pass1 = rep( "gray", length(phen.list.pass1))
colors.list.pass1[phen.list.pass1=="MUT"] = cls.phen.colors[1]
colors.list.pass1[phen.list.pass1=="DEL"] = cls.phen.colors[3]
colors.list.pass1[phen.list.pass1=="AMP"] = cls.phen.colors[4]
colors.list.pass1[phen.list.pass1=="ALT"] = cls.phen.colors[5]
# browser()
# colors.list.pass1[1,] = grey(bin.class.pass1)
# print("cls.phen2:")
# print(unlist(cls.phen2))
#
# print("cls.phen:")
# print(unlist(cls.phen))
#
# print("colors.list:")
# print(colors.list)
browser()
filename <- paste(results.dir, test.file.prefix, ".Phase1-2.MI", sep="")
# pdf(file=paste(filename, ".pdf", sep=""), height = 11, width = 9)
pdf(file=paste(filename, ".pdf", sep=""), height = 11, width = 17 )
# windows(width=12, height=8)
MSIG.HeatMapPlot.10(V = m2.pass1,
pathway.mut = bin.class.pass1,
row.names = model.names.pass1,
row.names2 = model.descs2.pass1,
col.labels = cls.labels2.pass1,
col.classes = cls.phen2.pass1,
phen.cmap = colors.list.pass1,
phen.names = phen.names.pass1,
phen.names2 = phen.descs2.pass1,
col.names = sample.names2.pass1,
main = paste(tissue, "- Phase 1: Known KRAS Pathway Abnormalities (MI)"),
xlab=" ", ylab=" ", row.norm = row.norm,
cmap.type = cmap.type, char.rescale = char.rescale, legend=F)
### Begin Pass 2 ###
print( "--- Begin Phase 2 ---")
# browser()
MI.thresholds = c(0.2, 0.1, 0.08, 0.05, 0.03, 0.025, 0.02, 0.015, 0.01, 0)
# MI.threshold = 0.03
ind.top.MI = vector(mode="integer")
MI.i = 1
while( length(ind.top.MI) < 1)
{
MI.i = MI.i + 1
ind.top.MI = which( MI.list.phen.pass1[-1] >= MI.thresholds[MI.i] ) + 1
}
ind.MI.threshold = c(1, ind.top.MI)
# ind.top.MI = which(MI.list.phen.pass1[-1] >= MI.threshold.vector[1] )+1
# if( length(ind.top.MI) > 0 ){
# ind.MI.threshold = c(1, ind.top.MI)
# }
# if( length(ind.top.MI) < 1 ){
# MI.threshold = 0.025
# ind.top.MI = which(MI.list.phen.pass1[-1] >= MI.threshold )+1
# ind.MI.threshold = c(1, ind.top.MI)
# }
# if( length(ind.top.MI) < 1 ){
# MI.threshold = 0.02
# ind.top.MI = which(MI.list.phen.pass1[-1] >= MI.threshold )+1
# ind.MI.threshold = c(1, ind.top.MI)
# }
# if( length(ind.top.MI) < 1 ){
# MI.threshold = 0.015
# ind.top.MI = which(MI.list.phen.pass1[-1] >= MI.threshold )+1
# ind.MI.threshold = c(1, ind.top.MI)
# }
# if( length(ind.top.MI) < 1 ){
# MI.threshold = 0.01
# ind.top.MI = which(MI.list.phen.pass1[-1] >= MI.threshold )+1
# ind.MI.threshold = c(1, ind.top.MI)
# }
# if( length(ind.top.MI) < 1 ) {
# MI.threshold = 0
# ind.top.MI = which(MI.list.phen.pass1[-1] > 0 )+1
# ind.MI.threshold = c(1, ind.top.MI) }
# if( length(ind.top.MI) < 1){
# MI.threshold = 0.2
# ind.top.MI = which(MI.list.phen.pass1[2:n.phen.pass1] <= MI.threshold )+1
# ind.MI.threshold = c(1, ind.top.MI)
# }
# if( length(ind.top.MI) < 1){
# MI.threshold = 0.25
# ind.top.MI = which(MI.list.phen.pass1[2:n.phen.pass1] <= MI.threshold )+1
# ind.MI.threshold = c(1, ind.top.MI)
# }
# if( length(ind.top.MI) < 1){
# MI.threshold = 0.3
# ind.top.MI = which(MI.list.phen.pass1[2:n.phen.pass1] <= MI.threshold )+1
# ind.MI.threshold = c(1, ind.top.MI)
# }
# if( length( ind.top.MI) < 1 ) {
# ind.top = which(!is.na(MI.list.phen.pass1[-1]))+1
# ind.MI.threshold = c( 1, ind.top )
# }
n.phen.pass2 = length(ind.MI.threshold)
# phen.descs.pass2 = phen.descs2.pass1[1:n.phen.pass2]
cls.list2.pass2 = cls.list2.pass1[ind.MI.threshold,]
phen.names.pass2 = phen.names.pass1[ind.MI.threshold]
# phen.names.pass2[1] = "PATHWAY.MUT + COPY.NUMBER"
cls.labels2.pass2 = cls.labels2.pass1[ind.MI.threshold,]
# browser()
cls.list2.pass2.2 = ifelse( cls.list2.pass2 == "WT", 0, 1)
cls.list2.pass2.3 = ifelse( cls.list2.pass2 == "DEL" | cls.list2.pass2 == "AMP", 1, 0)
if( n.phen.pass2 > 2 ){
pathway.mut.pass2 = apply(cls.list2.pass2.2[2:n.phen.pass2,], MARGIN=2, FUN=sum)
bin.class.pass2 = pathway.mut.pass2/length(pathway.mut.pass2)
bin.class.pass2 = ( bin.class.pass2 - min(bin.class.pass2))/(max(bin.class.pass2) - min(bin.class.pass2))
bin.class.pass2.noisy = bin.class.pass2
cls.list2.pass2[1,] = ifelse( bin.class.pass2 > 0, "MUT", "WT")
# copy.number.pass2 = ifelse( apply(cls.list2.pass2.3[3:n.phen.pass2,], MARGIN=2, FUN=sum) >= 1, "ALT", "WT")
} else{
pathway.mut.pass2 = ifelse( cls.list2.pass2.2[2,] == 1, "MUT", "WT")
bin.class.pass2 = ifelse( pathway.mut.pass2 == "MUT", 1, 0 ) #+ runif(Ns, min=-.05, max=.05)
# bin.class.pass2.noisy = bin.class.pass2 + runif(Ns, min=-.05, max=.05)
# bin.class.pass2.noisy = ( bin.class.pass2.noisy - min(bin.class.pass2.noisy))/(max(bin.class.pass2.noisy) - min(bin.class.pass2.noisy))
cls.list2.pass2[1,] = pathway.mut.pass2
# copy.number.pass2 = ifelse( cls.list2.pass2.3[3,] == 1, "ALT", "WT")
}
# browser()
# bin.class.pass2 =
# copy.class.pass2 = ifelse( copy.number.pass2 == "ALT", 1, 0)
# if( length(unique(bin.class.pass2)) == 1) {
# bin.class.pass2 = copy.class.pass2
# print( "Calculating p-value with respect to copy number alterations")
# }
# cls.list2.pass2[2,] = copy.number.pass2
MI.list.pass2 = vector( length=n.models, mode="numeric" )
# MI.ref.models.pass2 = mutual.inf.2(bin.class.pass2, bin.class.pass2)$MI
# print(paste("MI.ref.models.pass2 =", MI.ref.models.pass2))
# p.val.list.pass2 = vector( length=n.models, mode="numeric" )
# browser()
model.descs2.pass2 = vector(length = n.models, mode="character")
for (i in 1:n.models) {
m.score <- m2.pass1[i,]
m.score.norm <- (m.score - min(m.score))/(max(m.score) - min(m.score))
# browser()
if (length(unique(bin.class.pass2)) > 1) {
MI <- (mutual.inf.2(bin.class.pass2, m.score.norm)$MI)#/MI.ref.models.pass2
MI.signif <- signif(MI, digits=3)
# p.val <- signif(perf.auc$p.value, digits=3)
MI.list.pass2[i] = MI
# p.val.list.pass2[i] = perf.auc$p.value
} else {
MI.signif <- "-"
MI.list.pass2[i] = NA
# p.val.list.pass2[i] = NA
}
print(paste(format(rownames(m2.pass1)[i], width=30),"mutual.inf =", MI.signif))
model.descs2.pass2[i] <- paste(MI.signif)
}
# browser()
m.order.pass2 = order(MI.list.pass2, decreasing=TRUE, na.last=TRUE)
# m.order.pass2 = order(p.val.list.pass2, decreasing=FALSE, na.last=TRUE)
model.descs2.pass2 = model.descs2.pass2[m.order.pass2]
# loc.pass2 = m.order.pass2[1]
m2.pass2 <- m2.pass1[m.order.pass2, ]
model.names.pass2 <- rownames(m2.pass2)
# print(c("loc.pass2:", loc.pass2))
s.order.pass2 <- order(m2.pass2[1,], decreasing = TRUE)
m2.pass2 <- m2.pass2[, s.order.pass2]
bin.class.pass2 = bin.class.pass2[s.order.pass2]
# bin.class.pass2.noisy = bin.class.pass2.noisy[s.order.pass2]
sample.names2.pass2 <- colnames(m2.pass2)
if (is.vector(cls.labels)) {
cls.labels2.pass2 <- cls.labels2.pass2[s.order.pass2]
cls.list2.pass2 <- cls.list2.pass2[s.order.pass2]
} else {
cls.labels2.pass2 <- cls.labels2.pass2[, s.order.pass2]
cls.list2.pass2 <- cls.list2.pass2[, s.order.pass2]
}
# browser()
# browser()
m.score.pass2 <- m2.pass2[1,]
m.score.norm.pass2 <- (m.score.pass2 - min(m.score.pass2))/(max(m.score.pass2) - min(m.score.pass2))
# MI.ref.genes.pass2 = mutual.inf.2(m.score.norm.pass2, m.score.norm.pass2)$MI
# print(paste("MI.ref.genes.pass2 =", MI.ref.genes.pass2))
MI.list.phen.pass2 = vector(mode="numeric", length=n.phen.pass2)
phen.descs.pass2 = vector(mode="character", length=n.phen.pass2)
if( length(unique(bin.class.pass2)) > 1){
MI <- (mutual.inf.2(bin.class.pass2, m.score.norm.pass2)$MI)#/MI.ref.genes.pass2
MI.signif <- signif(MI, digits=3)
MI.list.phen.pass1[1] = MI
} else{
MI.signif <- "-"
MI <- NA
MI.list.phen.pass2[1] = MI
}
print(paste(format(phen.names.pass2[1], width=12), "mutual.inf =", MI.signif))
phen.descs.pass2[1] <- paste(MI.signif)
if( n.phen.pass2 == 2 ){
phen.descs.pass2[2] <- paste(MI.signif)
MI.list.phen.pass2[2] = MI
g.order.pass2 = c(1,2)
} else{
for( i in 2:n.phen.pass2 ){
bin.gene = ifelse( cls.list2.pass2[i,]=="WT", 0, 1)
if (length(unique(bin.gene)) > 1) {
MI <- (mutual.inf.2(bin.gene, m.score.norm.pass2)$MI)#/MI.ref.genes.pass2
# if( perf.auc$A < 0.5 ){
## browser()
# roc = signif(1 - perf.auc$A, digits=3)
# p.val = signif(1 - perf.auc$A, digits=3)
# abnormality = unique(cls.list2.pass2[i,])[which(unique(cls.list2.pass2[i,]) != "WT")]
# cls.list2.pass2 = ifelse( cls.list2.pass2[i,] == "WT", abnormality, "WT" )
# phen.names.pass2[i] = paste(phen.names.pass2[i], "-opposite")
# } else{
MI.signif <- signif(MI, digits=3)
# p.val <- signif(perf.auc$p.value, digits=3)
MI.list.phen.pass2[i] = MI
# }
} else {
MI <- "-"
# p.val <- "-"
MI.list.phen.pass2[i] = NA
}
print(paste(format(phen.names.pass2[i], width=12),"mutual.inf =", MI.signif))
# p.val.list[i] = p.val
phen.descs.pass2[i] <- paste(MI.signif)
}
# browser()
g.order.pass2 = c(1, order(MI.list.phen.pass2[2:n.phen.pass2], decreasing=TRUE, na.last=TRUE)+1) # skip PATHWAY.MUT
}
phen.descs2.pass2 = phen.descs.pass2[g.order.pass2]
cls.list2.pass2 = cls.list2.pass2[g.order.pass2,]
phen.names.pass2 = phen.names.pass2[g.order.pass2]
# browser()
# Recompute cls.list2 as some mutations or copy numbers may have been removed
# Recompute cls.phen and cls.labels2 as order may have changed
cls.phen2.pass2 <- NULL
if (is.vector(cls.labels)) {
classes <- unique(as.vector(cls.list2.pass2))
cls.phen2.pass2 <- classes
cls.labels2.pass2 <- match(cls.list2.pass2, cls.phen2.pass2)
} else {
for (kk in 1:length(cls.list2.pass2[, 1])) {
classes <- unique(cls.list2.pass2[kk,])
# cls.phen2[[kk]] <- classes
cls.phen2.pass2 <- c(cls.phen2.pass2, classes)
cls.labels2.pass2[kk,] <- match(cls.list2.pass2[kk,], classes)
}
}
cls.labels2.pass2 = cls.labels2.pass2[1:n.phen.pass2,]
# browser()
# correl <- cor(t(m2))[, loc]
# m.order <- order(correl, decreasing=decreasing.order)
# correl2 <- correl[m.order]
# model.descs2 <- paste(model.descs[m.order], signif(correl2, digits=3))
phen.list.pass2 <- unlist(cls.phen2.pass2)
# colors.list <- ifelse(unlist(cls.phen2) == target.class,
# ifelse(unlist(cls.phen2) == "DEL" | unlist(cls.phen2) == "AMP",
# ifelse(unlist(cls.phen2) == "DEL", cls.phen.colors[3], cls.phen.colors[4]), cls.phen.colors[1]), cls.phen.colors[2])
colors.list.pass2 = rep( "gray", length(phen.list.pass2))
colors.list.pass2[phen.list.pass2=="MUT"] = cls.phen.colors[1]
colors.list.pass2[phen.list.pass2=="DEL"] = cls.phen.colors[3]
colors.list.pass2[phen.list.pass2=="AMP"] = cls.phen.colors[4]
colors.list.pass2[phen.list.pass2=="ALT"] = cls.phen.colors[5]
MSIG.HeatMapPlot.10(V = m2.pass2,
pathway.mut = bin.class.pass2,
row.names = model.names.pass2,
row.names2 = model.descs2.pass2,
col.labels = cls.labels2.pass2,
col.classes = cls.phen2.pass2,
phen.cmap = colors.list.pass2, phen.names = phen.names.pass2,
phen.names2 = phen.descs2.pass2,
col.names = sample.names2.pass2, main = paste(tissue, "- Phase 2: only mutual information >=", MI.thresholds[MI.i],"from Phase 1 (MI)"),
xlab=" ", ylab=" ", row.norm = row.norm,
cmap.type = cmap.type, char.rescale = char.rescale, legend=F)
### 3rd Pass ###
# print( "--- Begin Pass 3 ---")
## browser()
# m2.pass3 = m2.pass2
# model.names.pass3 = rownames(m2.pass3)
# sample.names2.pass3 = colnames(m2.pass3)
## model.descs2.pass3 = model.descs2.pass2
# n.phen.pass3 = 40
## phen.descs.pass2 = phen.descs2.pass1[1:n.phen.pass2]
# cls.list2.pass3 = cls.list[, s.order.pass1][, s.order.pass2]
# cls.labels2.pass3 = cls.labels[, s.order.pass1][, s.order.pass2]
#
## browser()
# phen.names.pass3 = phen.names
# m.score.pass3 <- m2.pass3[1,]
# m.score.norm.pass3 <- (m.score.pass3 - min(m.score.pass3))/(max(m.score.pass3) - min(m.score.pass3))
# MI.list.phen.pass3 = vector(mode="numeric", length=n.phen)
# phen.descs.pass3 = vector(mode="character", length=n.phen)
## p.val.list.phen.pass3 = vector(mode="numeric", length=n.phen)
# for( i in 3:n.phen ){
# bin.gene = ifelse( cls.list2.pass3[i,]=="WT", 0, 1)
# if (length(unique(bin.gene)) > 1) {
# (MI <- mutual.inf.2(bin.gene #+ runif(Ns, min=-.01, max=.01)
# , m.score.norm.pass3)$MI)/MI.ref
## if( perf.auc$A < 0.5 ){
# ## browser()
## roc = signif(1 - perf.auc$A, digits=3)
## p.val = signif(1- perf.auc$p.value, digits=3)
## abnormality = unique(cls.list2.pass3[i,])[which(unique(cls.list2.pass3[i,]) != "WT")]
## cls.list2.pass3[i,] = ifelse( cls.list2.pass3[i,] == "WT", abnormality, "WT" )
## phen.names.pass3[i] = paste(phen.names.pass3[i], "-opposite", sep="")
## roc.list.phen.pass3[i] = 1-perf.auc$A
## p.val.list.phen.pass3[i] = 1- perf.auc$p.value
## } else{
# MI.signif <- signif(MI, digits=3)
## p.val <- signif(perf.auc$p.value, digits=3)
# MI.list.phen.pass3[i] = MI
## p.val.list.phen.pass3[i] = perf.auc$p.value
## }
# } else {
# MI.signif <- NA
## p.val <- NA
# MI.list.phen.pass3[i] = NA
## p.val.list.phen.pass3[i] = NA
# }
## print(paste("ROC=", roc, " p-val=", p.val))
#
## p.val.list[i] = p.val
# phen.descs.pass3[i] <- paste(MI.signif)
# }
## browser()
## MI.threshold = 0.20
## len = length(which(MI.list.phen.pass3[-1:-2] >= MI.threshold))+2
## if( len>40 )
# len=40
# ind.u = match(order(unique(MI.list.phen.pass3[-1:-2]), decreasing=FALSE, na.last=TRUE), MI.list.phen.pass3[-1:-2])
## if( len == 2 ){
## MI.threshold = 0.15
## len = length(which(MI.list.phen.pass3[-1:-2] >= MI.threshold))+2
## }
## if( len == 2 ){
## MI.threshold = 0.2
## len = length(which(MI.list.phen.pass3[-1:-2] >= MI.threshold))+2
## }
## g.order.pass3.1 = c(1, 2, order(p.val.list.phen.pass3[3:n.phen], decreasing=FALSE, na.last=TRUE)+2 )
# g.order.pass3 = c(1, 2, order(MI.list.phen.pass3[-1:-2], decreasing=FALSE, na.last=TRUE)+2 )[1:len] # skip PATHWAY.MUT and COPY.NUMBER
# phen.descs2.pass3 = phen.descs.pass3[g.order.pass3]
# cls.list2.pass3 = cls.list2.pass3[g.order.pass3,]
# cls.labels2.pass3 = cls.labels2.pass3[g.order.pass3,]
# phen.names.pass3 = phen.names.pass3[g.order.pass3]
#
#
# cls.list.mut = ifelse(cls.list2.pass3[-1:-2,] == "MUT", 1, 0)
# cls.list.alt = ifelse(cls.list2.pass3[-1:-2,] == "DEL" | cls.list2.pass3[-1:-2,] == "AMP", 1, 0)
#
## browser()
# if( !is.vector(cls.list.alt) ){
# cls.list.mut.sum = apply(cls.list.mut, MARGIN=2, FUN=sum)
# cls.list.alt.sum = apply(cls.list.alt, MARGIN=2, FUN=sum)
# bin.class.pass3 = cls.list.mut.sum + cls.list.alt.sum
# bin.class.pass3 = ( bin.class.pass3 - min(bin.class.pass3))/(max(bin.class.pass3) - min(bin.class.pass3))
# cls.list.mut.sum = ifelse(cls.list.mut.sum + cls.list.alt.sum > 0, 1, 0)
# cls.list2.pass3[1,] = ifelse( cls.list.mut.sum >= 1, "MUT", "WT")
# cls.list2.pass3[2,] = ifelse( cls.list.alt.sum >= 1, "ALT", "WT")
#
#
# } else{
#
# cls.list2.pass3[2,] = ifelse(cls.list.alt == 1, "ALT", "WT")
# bin.class.pass3 = cls.list.mut+cls.list.alt #+ runif(Ns, min=-.1, max=.1)
# bin.class.pass3 = ( bin.class.pass3 - min(bin.class.pass3))/(max(bin.class.pass3) - min(bin.class.pass3))
# cls.list2.pass3[1,] = ifelse(bin.class.pass3 > 0 , "MUT", "WT")
# }
#
## browser()
#
# if( length(unique(bin.class.pass3)) > 1){
# MI <- mutual.inf.2(bin.class.pass3, m.score.norm.pass3)$MI
# MI.signif <- signif(MI, digits=3)
# MI.list.phen.pass3[1] = MI
# } else{
# MI.signif <- "-"
# MI.list.phen.pass3[1] = NA
# }
# print(paste(format(phen.names.pass3[1], width=12), "mutual.inf =", MI.signif))
# phen.descs2.pass3[1] <- paste(MI.signif)
# for( i in 2 ){ # Recalculate MI for PATHWAY.MUT and COPY.NUMBER
# bin.gene = ifelse( cls.list2.pass3[i,]=="WT", 0, 1)
# if (length(unique(bin.gene)) > 1) {
# MI <- (mutual.inf.2(bin.gene #+ runif(Ns, min=-.01, max=.01)
# , m.score.norm.pass3)$MI)/MI.ref
## if( perf.auc$A < 0.5 ){
# ## browser()
## roc = signif(1 - perf.auc$A, digits=3)
## p.val = signif(1- perf.auc$p.value, digits=3)
## abnormality = unique(cls.list2.pass3[i,])[which(unique(cls.list2.pass3[i,]) != "WT")]
## cls.list2.pass3[i,] = ifelse( cls.list2.pass3[i,] == "WT", abnormality, "WT" )
## phen.names.pass3[i] = paste(phen.names.pass3[i], "-opposite", sep="")
## roc.list.phen.pass3[i] = 1-perf.auc$A
## p.val.list.phen.pass3[i] = 1- perf.auc$p.value
## } else{
# MI.signif <- signif(MI, digits=3)
## p.val <- signif(perf.auc$p.value, digits=3)
# MI.list.phen.pass3[i] = MI
## p.val.list.phen.pass3[i] = perf.auc$p.value
## }
# } else {
# MI <- NA
## p.val <- NA
# MI.list.phen.pass3[i] = NA
## p.val.list.phen.pass3[i] = NA
# }
# print(paste(format(phen.names.pass3[i], width=12), "mutual.inf =", MI.signif))
#
## p.val.list[i] = p.val
# phen.descs2.pass3[i] <- paste(MI.signif)
# }
#
## browser()
# model.descs2.pass3 = vector(length = n.models, mode="character")
# MI.list.pass3 = vector( length=n.models, mode="character")
# for (i in 1:n.models) {
# m.score <- m2.pass3[i,]
# m.score.norm <- (m.score - min(m.score))/(max(m.score) - min(m.score))
## browser()
# if (length(unique(bin.class.pass3)) > 1) {
# MI <- mutual.inf.2(bin.class.pass3, m.score.norm)$MI
# MI.signif <- signif(MI, digits=3)
## p.val <- signif(perf.auc$p.value, digits=3)
# MI.list.pass3[i] = MI
## p.val.list.pass2[i] = perf.auc$p.value
# } else {
# MI.signif <- "-"
# MI.list.pass3[i] = NA
## p.val.list.pass2[i] = NA
# }
# print(paste(format(rownames(m2.pass3)[i], width=30), "mutual.inf =", MI.signif))
#
# model.descs2.pass3[i] <- paste(MI.signif)
# }
# m.order.pass3 = order(MI.list.pass3, na.last=TRUE, decreasing=TRUE)
# m2.pass3 = m2.pass3[m.order.pass3,]
# model.descs2.pass3 = model.descs2.pass3[m.order.pass3]
# s.order.pass3 = order(m2.pass3, decreasing=TRUE)
# m2.pass3 = m2.pass3[,s.order.pass3]
# bin.class.pass3 = bin.class.pass1[s.order.pass3]
# model.names.pass3 = rownames(m2.pass3)
# sample.names2.pass3 = colnames(m2.pass3)
#
# cls.phen2.pass3 <- NULL
# if (is.vector(cls.labels)) {
# classes <- unique(as.vector(cls.list2.pass3))
# cls.phen2.pass3 <- classes
# cls.labels2.pass3 <- match(cls.list2.pass3, cls.phen2.pass3)
# } else {
## browser()
# for (kk in 1:length(cls.list2.pass3[, 1])) {
## browser()
# classes <- unique(cls.list2.pass3[kk,])
## cls.phen2[[kk]] <- classes
# cls.phen2.pass3 <- c(cls.phen2.pass3, classes)
# cls.labels2.pass3[kk,] <- match(cls.list2.pass3[kk,], classes)
# }
# }
## cls.labels2.pass3 = cls.labels2.pass3[1:n.phen.pass3,]
#
#
## browser()
## correl <- cor(t(m2))[, loc]
## m.order <- order(correl, decreasing=decreasing.order)
## correl2 <- correl[m.order]
#
## model.descs2 <- paste(model.descs[m.order], signif(correl2, digits=3))
# phen.list.pass3 <- unlist(cls.phen2.pass3)
#
## colors.list <- ifelse(unlist(cls.phen2) == target.class,
## ifelse(unlist(cls.phen2) == "DEL" | unlist(cls.phen2) == "AMP",
## ifelse(unlist(cls.phen2) == "DEL", cls.phen.colors[3], cls.phen.colors[4]), cls.phen.colors[1]), cls.phen.colors[2])
# colors.list.pass3 = rep( "gray", length(phen.list.pass3))
# colors.list.pass3[phen.list.pass3=="MUT"] = cls.phen.colors[1]
# colors.list.pass3[phen.list.pass3=="DEL"] = cls.phen.colors[3]
# colors.list.pass3[phen.list.pass3=="AMP"] = cls.phen.colors[4]
# colors.list.pass3[phen.list.pass3=="ALT"] = cls.phen.colors[5]
# phen.names.pass3[1] = "PATHWAY.MUT+COPY.NUMBER"
## browser()
# MSIG.HeatMapPlot.10(V = m2.pass3,
# pathway.mut = bin.class.pass3,
# row.names = model.names.pass3,
# row.names2 = model.descs2.pass3,
# col.labels = cls.labels2.pass3,
# col.classes = cls.phen2.pass3,
# phen.cmap = colors.list.pass3, phen.names = phen.names.pass3,
# phen.names2 = phen.descs2.pass3,
# col.names = sample.names2.pass3, main = paste(tissue, "- Phase 3: Top signature from Phase 2 with all genes (Top 40 genes) (MI)"),
# xlab=" ", ylab=" ", row.norm = row.norm,
# cmap.type = cmap.type, char.rescale = char.rescale, legend=F)
dev.off()
if (!is.na(output.dataset)) {
V.GCT <- m2
colnames(V.GCT) <- sample.names2
row.names(V.GCT) <- model.names2
write.gct(gct.data.frame = V.GCT, descs = model.descs2, filename =output.dataset)
}
}
OPAM.sort.projection.by.score.7 <- function(
# input.ds,
signatures = "NA",
input.all.pathways.ds,
input.cls,
tissue = "NA",
results.dir,
normalize.score = T,
normalization.type = "zero.one",
model = "NA",
target.phen = NA,
target.class = NA,
user.colors = NA,
decreasing.order = T,
output.dataset = NA,
char.rescale = 1,
cmap.type = 3,
row.norm = T,
u.gene.names.known = "NA",
add.amp.del = FALSE,
#n.random.signatures = 10,
multiple.tissues = FALSE,
cls.has.chrom.locs = FALSE,
file.suffix = "",
skip.iterative = FALSE,
add.mut = FALSE,
n.iter = 5,
pdf.height = 11,
pdf.width = 17,
do.mRMR = F,
skip.step2 = FALSE,
todd.version = FALSE
)
# Calls MSIG.HeatMapPlot.9 and makes a plot sorted by the highest-scoring
# signatures and abnormalities (gene mutations or copy number alterations)
# i.e. doesn't require a "model" to score by as OPAM.sort.projection.by.score.2 does.
# However, it *will* use "model" if it cannot calculate p-values on the gene signatures, which
# happens when every cell line has a genomic aberration.
#
# Runs 3 passes on the data:
# 1st pass: looks at the genes and copy number alterations specified by u.gene.names.known
# 2nd pass: looks at only the top abnormalities (using a p-value cutoff) from the 1st pass, and adjusts
# the PATHWAY.MUT vector accordingly (only according to the genes, not by copy number data)
# 3rd pass: Takes the winning signature from the 2nd pass and then looks all the genes available
#
# Very similar to OPAM.sort.projection.by.score.4, however this version uses mutual.inf instead of
# roc.area to calculate mutual information scores and p-values for PATHWAY.MUT, the vector of total genomic aberrations
# in all samples
#
# Differs from OPAM.sort.projection.by.score.6 by requiring the gct file of expression in
# all pathways by the input tissue ("input.all.pathways.ds")
{
library(gtools)
library(verification)
library(ROCR)
library(MASS)
library(RColorBrewer)
library(heatmap.plus)
dataset.all <- MSIG.Gct2Frame( filename = input.all.pathways.ds)
m.all <- data.matrix(dataset.all$ds)
model.names.all <- dataset.all$row.names
Ns = length(m.all[1,])
sample.names = dataset.all$names
if( multiple.tissues ){
tissue.type <- vector(length=Ns, mode="character")
# temp = strsplit(sample.names, split="_")
for (k in 1:Ns) {
temp <- strsplit(sample.names[k], split="_")
tissue.type[k] <- paste(temp[[1]][2:length(temp[[1]])], collapse="_")
}
tissue.names = unique(tissue.type)
tissue.labels = match(tissue.type, tissue.names)
} else{
tissue.names = tissue
tissue.labels = rep(1, Ns)
}
if( is.na(signatures[1]) ){
stop("Must provide a vector of signature names to evaluate, or specify 'ALL'")
}
## Remove "Commented out" signatures (with # at beginning of name)
if( length(grep("^#", signatures)) > 0){
signatures = signatures[-grep("^#", signatures)]
}
if( signatures[1] == "ALL"){
model.names = model.names.all
m = m.all
model.descs = dataset.all$descs
} else{
model.names = signatures
model.ind = match(signatures, model.names.all)
m = m.all[model.ind,]
model.descs = dataset.all$descs[model.ind]
if( length(model.ind) == 1 ){
m = t(as.matrix(m))
rownames(m) = model.names
}
rm(list=c("m.all", "dataset.all"))
# browser()
}
n.models <- length(m[,1])
temp <- strsplit(input.all.pathways.ds, split="/") # Extract test file name
s <- length(temp[[1]])
test.file.name <- temp[[1]][s]
temp <- strsplit(test.file.name, split=".gct")
test.file.prefix <- temp[[1]][1]
char.res <- 0.013 * n.models + 0.65
# normalize scores
if (normalize.score == T) {
if (normalization.type == "zero.one") {
for (i in 1:n.models) {
m[i,] <- (m[i,] - min(m[i,]))/(max(m[i,]) - min(m[i,]))
}
} else if (normalization.type == "z.score") {
for (i in 1:n.models) {
m[i,] <- (m[i,] - mean(m[i,]))/sd(m[i,])
}
} else if (normalization.type == "r.z.score") {
for (i in 1:n.models) {
m[i,] <- (m[i,] - median(m[i,]))/mad(m[i,])
}
}
}
CLS <- MSIG.ReadPhenFile.2(file = input.cls) # Read phenotype file (CLS format)
cls.labels <- CLS$class.v
cls.phen <- CLS$phen
cls.list <- CLS$class.list
if(cls.list[1,1] == "0" | cls.list[1,1] == "1"){
cls.list = ifelse(cls.list=="1", "MUT", "WT")
}
#browser()
if (is.vector(cls.labels)) {
n.phen <- 1
} else {
n.phen <- length(cls.labels[,1])
}
if (!is.na(user.colors[1])) {
c.test <- user.colors
} else {
if (!is.null(CLS$col.phen)) {
c.test <- CLS$col.phen
} else {
c.test <- c(brewer.pal(n=7, name="Set2"), brewer.pal(n=7, name="Dark2"), brewer.pal(n=7, name="Set1"),
brewer.pal(n=8, name="Accent"), brewer.pal(n=9, name="Spectral"), brewer.pal(n=8, name="Set3"),
brewer.pal(n=8, name="BuGn"),
brewer.pal(n=7, name="Set2"), brewer.pal(n=7, name="Dark2"), brewer.pal(n=7, name="Set1"),
brewer.pal(n=8, name="Accent"), brewer.pal(n=10, name="Spectral"), brewer.pal(n=8, name="Set3"),
brewer.pal(n=8, name="BuGn"),
brewer.pal(n=7, name="Set2"), brewer.pal(n=7, name="Dark2"), brewer.pal(n=7, name="Set1"),
brewer.pal(n=8, name="Accent"), brewer.pal(n=10, name="Spectral"), brewer.pal(n=8, name="Set3"),
brewer.pal(n=8, name="BuGn"))
}
}
if (!is.null(CLS$phen.names)) {
phen.names <- CLS$phen.names
} else {
phen.names <- "NA"
}
cls.phen.index <- unlist(cls.phen)
cls.phen.colors <- c.test[1:length(cls.phen.index)]
# print("cls.phen.colors:")
# print(cls.phen.colors)
n.classes <- vector(length=n.phen, mode="numeric")
if (n.phen == 1) {
max.classes <- length(cls.phen)
n.classes[1] <- max.classes
} else {
max.classes <- max(unlist(lapply(cls.phen, FUN=length)))
n.classes = unlist(lapply(cls.phen, length))
# for (i in 1:n.phen) {
# n.classes[i] <- length(cls.phen[[i]])
# }
}
pdf.options(height=pdf.height, width=pdf.width, colormodel="rgb", bg="transparent")
phen.names = c("SUMMARY", phen.names)
cls.list = rbind(rep("WT", length=length(cls.list[1,])),
#rep("WT", length=length(cls.list[1,])),
cls.list)
cls.labels = rbind(rep(1, length=length(cls.labels[1,])),
#rep(1, length=length(cls.labels[1,])),
cls.labels)
if( !todd.version ){
print("--- Begin Pass 1 ---")
# browser()
## Remove "Commented out" gene names (with # at beginning of name)
if( length(grep("^#", u.gene.names.known)) > 0){
u.gene.names.known = u.gene.names.known[-grep("^#", u.gene.names.known)]
}
if ( add.amp.del ){
u.gene.names.known = c( u.gene.names.known, paste(u.gene.names.known, "_AMP", sep=""),
paste(u.gene.names.known, "_DEL", sep="") )
}
if(add.mut){
u.gene.names.known = paste(u.gene.names.known, "_MUT", sep="")
}
phen.pass1 = c( u.gene.names.known )
## Find chromosomal locations of genes specified
## See "if( find.chromosomal.locations)" for more transparent code
if( cls.has.chrom.locs ){
library(org.Hs.eg.db)
phen.pass1.split = strsplit(phen.pass1, split="_")
phen.pass1.noampdel = unlist( lapply(phen.pass1.split, function(x) x[1]))
phen.pass1.egIDs = mget(phen.pass1.noampdel, org.Hs.egALIAS2EG, ifnotfound=NA)
# phen.pass1.no.chrom.loc = which(lapply(lapply(phen.pass1.egIDs, is.na), sum) > 0)
phen.pass1.locs.list = lapply(phen.pass1.egIDs, mget, org.Hs.egMAP)
phen.pass1.locs = vector(mode="character", length=length(phen.pass1))
# phen.pass1.locs[phen.pass1.no.chrom.loc] = "NA"
phen.pass1.locs = unlist(lapply(phen.pass1.locs.list, function(x) paste(unlist(x), collapse="_")))
phen.pass1.w.locs = paste(phen.pass1.noampdel, ".", phen.pass1.locs, sep="")
phen.pass1.ampdel.suffix = unlist(lapply(phen.pass1.split, function(x) x[2]))
#phen.pass1.no.suffix = which(is.na(phen.pass1.ampdel.suffix))
#phen.pass1[phen.pass1.no.suffix] = phen.pass1.w.locs[phen.pass1.no.suffix]
#phen.pass1[-phen.pass1.no.suffix] = paste(phen.pass1.w.locs[-phen.pass1.no.suffix], "_", phen.pass1.ampdel.suffix[-phen.pass1.no.suffix], sep="")
phen.pass1 = paste(phen.pass1.w.locs, "_", phen.pass1.ampdel.suffix, sep="")
}
## Was originally immediately after "ind.phen.pass1 = ..." but since now want to find the chromosomal
## locations of the genes, have to first find the indices of the genes specified at the onset of the
## program, THEN find all the chromosomal locations
#browser()
phen.pass1 = c("SUMMARY", phen.pass1)
ind.phen.pass1 = which( phen.names %in% phen.pass1 )
# phen.names[1] = "SUMMARY"
phen.pass1 = phen.names[ind.phen.pass1]
n.phen.pass1 = length(phen.pass1)
MI.list.pass1 = vector( length=n.models, mode="numeric" )
cls.list.pass1 = cls.list[ind.phen.pass1,]
cls.list.pass1.2 = ifelse(cls.list.pass1 == "WT", 0, 1)
cls.labels.pass1 = cls.labels[ind.phen.pass1,]
# browser()
if (!is.na(target.phen)) {
if( length( phen.pass1) > 2 ){
bin.class.pass1 = apply( cls.list.pass1.2[-1,], MARGIN=2, FUN=sum)
} else{ bin.class.pass1 = cls.list.pass1.2[2,] }
# Normalize bin.class.pass1
if( length(unique(bin.class.pass1)) > 1){
bin.class.pass1 = normalize(bin.class.pass1) #( bin.class.pass1 - min(bin.class.pass1))/(max(bin.class.pass1) - min(bin.class.pass1))
} else if ( length(unique(bin.class.pass1)) == 1){
bin.class = rep(1, length(cls.list[1,]))
}
cls.list.pass1[1,] = ifelse(bin.class.pass1 > 0, "MUT", "WT")
} else {
bin.class.pass1 <- ifelse(cls.list[1,] == cls.list2[1,1], 1, 0)
}
#browser()
### Make initial heatmap ###
cls.phen2.pass1 <- NULL
if (is.vector(cls.labels)) {
classes <- unique(cls.list.pass1)
cls.phen2.pass1 <- classes
cls.labels.pass1 <- match(cls.list.pass1, cls.phen2.pass1)
} else {
for (kk in 1:length(cls.list.pass1[, 1])) {
classes <- unique(cls.list.pass1[kk,])
# cls.phen2[[kk]] <- classes
cls.phen2.pass1 <- c(cls.phen2.pass1, classes)
cls.labels.pass1[kk,] <- match(cls.list.pass1[kk,], classes)
}
}
cls.labels.pass1 = cls.labels.pass1[1:length(phen.pass1),]
phen.list.pass1 <- unlist(cls.phen2.pass1)
colors.list = rep( "gray", length(phen.list.pass1))
colors.list[phen.list.pass1=="MUT"] = cls.phen.colors[1]
filename <- paste(results.dir, test.file.prefix, file.suffix, ".Step0", sep="")
pdf(file=paste(filename, ".pdf", sep=""), height = pdf.height, width = pdf.width )
#quartz(height = 11, width = 17)
if( multiple.tissues ){
#browser()
#quartz(height = 11, width = 17)
MSIG.HeatMapPlot.10.multiple.tissues(V = m,
pathway.mut = bin.class.pass1,
row.names = model.names,
col.labels = cls.labels.pass1,
col.classes = cls.phen2.pass1,
phen.cmap = colors.list,
phen.names = phen.pass1,
col.names = sample.names,
main = paste(tissue, "- Initial Heatmap ('Step 0')"),
xlab=" ", ylab=" ", row.norm = row.norm,
cmap.type = cmap.type, char.rescale = char.rescale, legend=F,
tissue.names = tissue.names,
tissue.labels = tissue.labels)
} else{
MSIG.HeatMapPlot.10(V = m,
pathway.mut = bin.class.pass1,
row.names = model.names,
col.labels = cls.labels.pass1,
col.classes = cls.phen2.pass1,
phen.cmap = colors.list,
phen.names = phen.pass1,
col.names = sample.names,
main = paste(tissue, "- Initial Heatmap ('Step 0')"),
xlab=" ", ylab=" ", row.norm = row.norm,
cmap.type = cmap.type, char.rescale = char.rescale, legend=F)
}
dev.off()
model.descs2.pass1 = vector(length = n.models, mode="character")
if( length(unique(bin.class.pass1)) > 1 ){
if( n.models > 1 ){
MI.results = mutual.inf.3.v2(bin.class.pass1, m,
target.vector.name="SUMMARY",
tissue=tissue)
MI.list.pass1 = MI.results$MI
model.descs2.pass1 <- sapply(MI.results$MI, FUN=signif, 3)
m.order.pass1 = order(MI.list.pass1, decreasing=TRUE, na.last=TRUE)
m2.pass1 <- m[m.order.pass1, ]
s.order.pass1 <- order(m2.pass1[1,], decreasing = TRUE )
m2.pass1 <- m2.pass1[, s.order.pass1]
} else{
#browser()
MI.ref = mutual.inf.2(bin.class.pass1, bin.class.pass1)
MI.list.pass1 = MI.results =
mutual.inf.2(bin.class.pass1, m[1,])/MI.ref
model.descs2.pass1 <- signif(MI.results, digits=3)
m2.pass1 <- m ; m.order.pass1 = 1
s.order.pass1 <- order(m2.pass1[1,], decreasing = TRUE )
m2.pass1 <- t(as.matrix(m[, s.order.pass1]))
rownames(m2.pass1) = model.names
}
} else{
MI.list.pass1 = rep(NA, n.models)
FDR.list.pass1 = rep(NA, n.models)
model.descs2.pass1 = rep(" - (FDR = - )", n.models)
if( n.models > 1 ){
loc <- match(model, model.names)
s.order.pass1 <- order(m[loc,], decreasing = decreasing.order)
m2.pass1 <- m[, s.order.pass1]
correl <- cor(t(m2.pass1))[, loc]
m.order.pass1 <- order(correl, decreasing=T)
m2.pass1 <- m2.pass1[m.order.pass1, ]
} else{
m2.pass1 <- t(as.matrix(m[, s.order.pass1]))
rownames(m2.pass1) = model.names
m.order.pass1 = 1 }
}
MI.list.pass1 = MI.list.pass1[m.order.pass1]
bin.class.pass1 = bin.class.pass1[s.order.pass1]
model.descs2.pass1.all = model.descs2.pass1
model.descs2.pass1 = model.descs2.pass1[m.order.pass1]
sample.names2.pass1 <- colnames(m2.pass1)
model.names.pass1 <- rownames(m2.pass1)
print(matrix(c(model.names.pass1, model.descs2.pass1), ncol=2), quote=F)
if (is.vector(cls.labels)) {
cls.labels2.pass1 <- cls.labels.pass1[s.order.pass1]
cls.list2.pass1 <- cls.list.pass1[s.order.pass1]
} else {
cls.labels2.pass1 <- cls.labels.pass1[, s.order.pass1]
cls.list2.pass1 <- cls.list.pass1[, s.order.pass1]
}
tissue.labels.pass1 = tissue.labels[s.order.pass1]
sample.names2 <- colnames(m2.pass1)
winning.model.ind.pass1 = which(model.names.pass1[1] == rownames(m2.pass1))
MI.list.phen.pass1 = vector(mode="numeric", length=n.phen.pass1)
phen.descs.pass1 = vector(mode="character", length=n.phen.pass1)
if( length(unique(bin.class.pass1)) > 1){
MI.signif <- signif(MI.list.pass1[1], digits=3)
MI.list.phen.pass1[1] = MI.list.pass1[1]
phen.descs.pass1[1] = model.descs2.pass1[1]
} else{
MI.signif <- "-"
MI.list.phen.pass1[1] = NA
}
print(paste(format(phen.pass1[1], width=12), "mutual.inf =", MI.signif
))
print(proc.time()-t1)
print(date())
phen.descs.pass1[1] <- paste(MI.signif,
sep="")
# browser()
if( n.phen.pass1 > 2 ){
bin.gene.matrix = ifelse(cls.list2.pass1[-1,]=="WT", 0, 1)
MI.results = mutual.inf.3.v2(
m2.pass1[winning.model.ind.pass1,],
bin.gene.matrix)
MI.list.phen.pass1[-1] = MI.results$MI
phen.descs.pass1[-1] = sapply(MI.results$MI, FUN=signif, 3)
g.order.pass1 = c(1, order(MI.list.phen.pass1[-1], decreasing=TRUE, na.last=TRUE)+1)
MI.list.phen.pass1 = MI.list.phen.pass1[g.order.pass1]
phen.descs2.pass1 = phen.descs.pass1[g.order.pass1]
cls.list2.pass1 = cls.list2.pass1[g.order.pass1,]
phen.names.pass1 = phen.pass1[g.order.pass1]
} else{
# bin.gene.matrix = ifelse(cls.list2.pass1[-1,]=="WT", 0, 1)
# MI.ref = mutual.inf.2(m2.pass1[winning.model.ind.pass1,],
# m2.pass1[winning.model.ind.pass1,])
# MI.results = mutual.inf.2(
# m2.pass1[winning.model.ind.pass1,],
# bin.gene.matrix)/MI.ref
# MI.list.phen.pass1[-1] = MI.results
phen.descs.pass1[-1] = phen.descs.pass1[1]
g.order.pass1 = 1:2
# MI.list.phen.pass1 = MI.list.phen.pass1[g.order.pass1]
phen.descs2.pass1 = phen.descs.pass1
# cls.list2.pass1 = cls.list2.pass1[g.order.pass1,]
phen.names.pass1 = phen.pass1
}
print(matrix(c(phen.names.pass1, phen.descs2.pass1), ncol=2), quote=F)
print(proc.time()-t1)
print(date())
# Recompute cls.list2 as some mutations or copy numbers may have been removed
# Recompute cls.phen and cls.labels2 as order may have changed
cls.phen2.pass1 <- NULL
if (is.vector(cls.labels)) {
classes <- unique(cls.list2.pass1)
cls.phen2.pass1 <- classes
cls.labels2.pass1 <- match(cls.list2.pass1, cls.phen2.pass1)
} else {
for (kk in 1:length(cls.list2.pass1[, 1])) {
classes <- unique(cls.list2.pass1[kk,])
# cls.phen2[[kk]] <- classes
cls.phen2.pass1 <- c(cls.phen2.pass1, classes)
cls.labels2.pass1[kk,] <- match(cls.list2.pass1[kk,], classes)
}
}
cls.labels2.pass1 = cls.labels2.pass1[1:n.phen.pass1,]
phen.list.pass1 <- unlist(cls.phen2.pass1)
colors.list.pass1 = rep( "gray", length(phen.list.pass1))
colors.list.pass1[phen.list.pass1=="MUT"] = cls.phen.colors[1]
colors.list.pass1[phen.list.pass1=="DEL"] = cls.phen.colors[3]
colors.list.pass1[phen.list.pass1=="AMP"] = cls.phen.colors[4]
colors.list.pass1[phen.list.pass1=="ALT"] = cls.phen.colors[5]
filename <- paste(results.dir, test.file.prefix, file.suffix, ".Step1.MI-HXY", sep="")
# pdf(file=paste(filename, ".pdf", sep=""), height = 11, width = 9)
pdf(file=paste(filename, ".pdf", sep=""), height = pdf.height, width = pdf.width )
#browser()
# windows(width=12, height=8)
if( multiple.tissues ){
MSIG.HeatMapPlot.10.multiple.tissues(V = m2.pass1,
pathway.mut = bin.class.pass1,
row.names = model.names.pass1,
row.names2 = model.descs2.pass1,
col.labels = cls.labels2.pass1,
col.classes = cls.phen2.pass1,
phen.cmap = colors.list.pass1,
phen.names = phen.names.pass1,
phen.names2 = c(" ", phen.descs2.pass1),
col.names = sample.names2.pass1,
main = paste(tissue, "- Step 1: Known KRAS Pathway Abnormalities (MI)"),
xlab=" ", ylab=" ", row.norm = row.norm,
cmap.type = cmap.type, char.rescale = char.rescale, legend=F,
tissue.names = tissue.names,
tissue.labels = tissue.labels)
} else{
MSIG.HeatMapPlot.10(V = m2.pass1,
pathway.mut = bin.class.pass1,
row.names = model.names.pass1,
row.names2 = model.descs2.pass1,
col.labels = cls.labels2.pass1,
col.classes = cls.phen2.pass1,
phen.cmap = colors.list.pass1,
phen.names = phen.names.pass1,
phen.names2 = phen.descs2.pass1,
col.names = sample.names2.pass1,
main = paste(tissue, "- Step 1: Known KRAS Pathway Abnormalities (MI)"),
xlab=" ", ylab=" ", row.norm = row.norm,
cmap.type = cmap.type, char.rescale = char.rescale, legend=F)
}
dev.off()
# stop("Don't do Step 2!")
### Begin Pass 2 ###
if( n.phen.pass1 == 2 || skip.step2 == T )
print( "--- Begin Step 2 ---")
if( is.na(MI.list.pass1[1]) || is.na(MI.list.phen.pass1[1]) ){
dev.off()
return()
}
MI.thresholds = c(0.2, 0.1, 0.08, 0.05, 0.03, 0.025, 0.02, 0.015, 0.01, 0)
# MI.threshold = 0.03
ind.top.MI = vector(mode="integer")
MI.i = 0
# FDR.i = 0
# browser()
while( length(ind.top.MI) < 1)
{
MI.i = MI.i + 1
# FDR.i = FDR.i + 1
if( MI.i > length(MI.thresholds)){
dev.off()
print("Selected genomic aberrations do not have
positive mutual information with a low enough false
discovery rate with the selected pathways")
return()
}
ind.top.MI = which( MI.list.phen.pass1[-1] >= MI.thresholds[MI.i]) +1 #& MI.list.phen.pass1[-1] > 0
# ) + 1
}
ind.MI.threshold = c(1, ind.top.MI)
n.phen.pass2 = length(ind.MI.threshold)
cls.list2.pass2 = cls.list2.pass1[ind.MI.threshold,]
phen.names.pass2 = phen.names.pass1[ind.MI.threshold]
cls.labels2.pass2 = cls.labels2.pass1[ind.MI.threshold,]
# browser()
cls.list2.pass2.2 = ifelse( cls.list2.pass2 == "WT", 0, 1)
cls.list2.pass2.3 = ifelse( cls.list2.pass2 == "DEL" | cls.list2.pass2 == "AMP", 1, 0)
if( n.phen.pass2 > 2 ){
pathway.mut.pass2 = apply(cls.list2.pass2.2[2:n.phen.pass2,], MARGIN=2, FUN=sum)
bin.class.pass2 = pathway.mut.pass2/length(pathway.mut.pass2)
bin.class.pass2 = ( bin.class.pass2 - min(bin.class.pass2))/(max(bin.class.pass2) - min(bin.class.pass2))
cls.list2.pass2[1,] = ifelse( bin.class.pass2 > 0, "MUT", "WT")
} else{
pathway.mut.pass2 = ifelse( cls.list2.pass2.2[2,] == 1, "MUT", "WT")
bin.class.pass2 = ifelse( pathway.mut.pass2 == "MUT", 1, 0 ) #+ runif(Ns, min=-.05, max=.05)
cls.list2.pass2[1,] = pathway.mut.pass2
}
MI.list.pass2 = vector( length=n.models, mode="double" )
#browser()
#### Print Step 2's initial heatmap ###
cls.phen2.pass1.5 <- NULL
cls.labels2.pass1.5 = cls.labels2.pass2
if (is.vector(cls.labels)) {
classes <- unique(cls.list2.pass1)
cls.phen2.pass1.5 <- classes
cls.labels2.pass1.5 <- match(cls.list2.pass1, cls.phen2.pass1.5)
} else {
for (kk in 1:length(cls.list2.pass2[, 1])) {
classes <- unique(cls.list2.pass1[kk,])
# cls.phen2[[kk]] <- classes
cls.phen2.pass1.5 <- c(cls.phen2.pass1.5, classes)
cls.labels2.pass1.5[kk,] <- match(cls.list2.pass2[kk,], classes)
}
}
# cls.labels2.pass1.5 = cls.labels2.pass1.5[1:n.phen.pass2,]
phen.list.pass1.5 <- unlist(cls.phen2.pass1.5)
colors.list.pass1.5 = rep( "gray", n.phen.pass1)
colors.list.pass1.5[phen.list.pass1.5=="MUT"] = cls.phen.colors[1]
filename <- paste(results.dir, test.file.prefix, file.suffix, ".Step1.5.Heatmap", sep="")
pdf(file=paste(filename, ".pdf", sep=""), height = pdf.height, width = pdf.width )
if( multiple.tissues ){
MSIG.HeatMapPlot.10.multiple.tissues(V = m2.pass1,
pathway.mut = bin.class.pass2,
row.names = model.names.pass1,
col.labels = cls.labels2.pass1.5,
col.classes = cls.phen2.pass1.5,
phen.cmap = colors.list.pass1.5,
phen.names = phen.names.pass1[ind.MI.threshold],
col.names = sample.names2.pass1,
main = paste(tissue, "- Post-Step 1, Pre-Step 2 (Step 1.5)"),
xlab=" ", ylab=" ", row.norm = row.norm,
cmap.type = cmap.type, char.rescale = char.rescale, legend=F,
tissue.names = tissue.names,
tissue.labels = tissue.labels)
} else{
MSIG.HeatMapPlot.10(V = m2.pass1,
pathway.mut = bin.class.pass2,
row.names = model.names.pass1,
col.labels = cls.labels2.pass1.5,
col.classes = cls.phen2.pass1.5,
phen.cmap = colors.list.pass1.5,
phen.names = phen.names.pass1[ind.MI.threshold],
col.names = sample.names2.pass1,
main = paste(tissue, "- Post-Step 1, Pre-Step 2 (Step 1.5)"),
xlab=" ", ylab=" ", row.norm = row.norm,
cmap.type = cmap.type, char.rescale = char.rescale, legend=F) }
dev.off()
# browser()
#pdf(file=paste(tissue, n.randomizations, "randomizations.Step2", "pdf", sep="."))
model.descs2.pass2 = vector(length = n.models, mode="character")
if( length(unique(bin.class.pass2)) > 1 ){
if( n.models > 1 ){
MI.results = mutual.inf.3.v2(bin.class.pass2, m2.pass1,
target.vector.name="SUMMARY",
)
MI.list.pass2 = MI.results$MI
model.descs2.pass2 = sapply(MI.results$MI, signif, 3)
m.order.pass2 = order(MI.list.pass2, decreasing=TRUE, na.last=TRUE)
m2.pass2 <- m2.pass1[m.order.pass2, ]
s.order.pass2 <- order(m2.pass2[1,], decreasing = TRUE )
m2.pass2 <- m2.pass2[, s.order.pass2]
} else{
MI.ref = mutual.inf.2(bin.class.pass2, bin.class.pass2)
MI.list.pass2 = MI.results =
mutual.inf.2(bin.class.pass2,
m2.pass1[1,])/MI.ref
#MI.list.pass2 = MI.results$MI
model.descs2.pass2 = signif(MI.list.pass2, digits=3)
m.order.pass2 = 1 #order(MI.list.pass2, decreasing=TRUE, na.last=TRUE)
m2.pass2 <- m2.pass1#[m.order.pass2, ]
s.order.pass2 <- order(m2.pass2[1,], decreasing = TRUE )
m2.pass2 <- t(as.matrix(m2.pass2[, s.order.pass2]))
rownames(m2.pass2) = model.names.pass1
}
} else{
MI.list.pass2 = rep(NA, n.models)
model.descs2.pass2 = rep(" - ", n.models)
if( n.models > 1 ){
loc <- match(model, model.names)
s.order.pass2 <- order(m2.pass1[loc,], decreasing = decreasing.order)
m2.pass2 <- m2.pass1[, s.order.pass2]
correl <- cor(t(m2.pass2))[, loc]
m.order.pass2 <- order(correl, decreasing=T)
m2.pass2 <- m2.pass2[m.order.pass2, ]
} else{
#loc <- match(model, model.names)
s.order.pass2 <- order(m2.pass1[1,], decreasing = decreasing.order)
m2.pass2 <- t(as.matrix(m2.pass1[, s.order.pass2]))
rownames(m2.pass2) = model.names.pass1
m.order.pass2 = 1
# correl <- cor(t(m2.pass2))[, loc]
# m.order.pass2 <- order(correl, decreasing=T)
# m2.pass2 <- m2.pass2[m.order.pass2, ]
}
}
MI.list.pass2 = MI.list.pass2[m.order.pass2]
bin.class.pass2 = bin.class.pass2[s.order.pass2]
tissue.labels = tissue.labels[s.order.pass2]
model.descs2.pass2 = model.descs2.pass2[m.order.pass2]
sample.names2.pass2 <- colnames(m2.pass2)
model.names.pass2 <- rownames(m2.pass2)
print(matrix(c(model.names.pass2, model.descs2.pass2), ncol=2), quote=F)
# browser()
if (is.vector(cls.labels)) {
cls.labels2.pass2 <- cls.labels2.pass2[s.order.pass2]
cls.list2.pass2 <- cls.list2.pass2[s.order.pass2]
} else {
cls.labels2.pass2 <- cls.labels2.pass2[, s.order.pass2]
cls.list2.pass2 <- cls.list2.pass2[, s.order.pass2]
}
tissue.labels.pass2 = tissue.labels.pass1[s.order.pass2]
sample.names2 <- colnames(m2.pass2)
winning.model.ind.pass2 = which(model.names.pass2[1] == rownames(m2.pass2))
MI.list.phen.pass2 = vector(mode="numeric", length=n.phen.pass2)
phen.descs.pass2 = vector(mode="character", length=n.phen.pass2)
if( length(unique(bin.class.pass2)) > 1){
MI.signif <- signif(MI.list.pass2[1], digits=3)
MI.list.phen.pass2[1] = MI.list.pass2[1]
} else{
MI.signif <- "-"
MI.list.phen.pass2[1] = NA
}
print(paste(format(phen.names.pass2[1], width=12), "mutual.inf =", MI.signif#, " FDR =", FDR.signif
))
print(proc.time()-t1)
print(date())
phen.descs.pass2[1] <- paste(MI.signif) #, " (FDR = ", FDR.signif, ")", sep="")
if( n.phen.pass2 == 2 ){
phen.descs.pass2[2] <- phen.descs.pass2[1]
# FDR.list.phen.pass2[2] = FDR.list.phen.pass2[1]
MI.list.phen.pass2[2] = MI.list.phen.pass2[1]
g.order.pass2 = c(1,2)
print(paste(format(phen.names.pass2[2], width=12), "mutual.inf =", MI.signif)) #, " FDR =", FDR.signif))
} else{
bin.gene.matrix = ifelse(cls.list2.pass2[-1,]=="WT", 0, 1)
n.aberrations = apply(bin.gene.matrix, MARGIN=1, FUN=sum)
MI.results = mutual.inf.3.v2(
m2.pass2[winning.model.ind.pass2,],
bin.gene.matrix,
target.vector.name=phen.pass2,
# n.randomizations = n.randomizations
)
MI.list.phen.pass2[-1] = MI.results$MI
phen.descs.pass2[-1] = sapply(MI.results$MI, signif, 3)
ind.zeros = which(n.aberrations==0) + 1
MI.list.phen.pass2[ind.zeros] = NA
# FDR.list.phen.pass2[ind.zeros] = NA
phen.descs.pass2[ind.zeros] = " - "
g.order.pass2 = c(1, order(MI.list.phen.pass2[-1], decreasing=TRUE, na.last=TRUE)+1) # skip PATHWAY.MUT
}
#dev.off()
phen.descs2.pass2 = phen.descs.pass2[g.order.pass2]
cls.list2.pass2 = cls.list2.pass2[g.order.pass2,]
phen.names.pass2 = phen.names.pass2[g.order.pass2]
# browser()
# Recompute cls.list2 as some mutations or copy numbers may have been removed
print(matrix(c(phen.names.pass2, phen.descs2.pass2), ncol=2), quote=F)
# Recompute cls.phen and cls.labels2 as order may have changed
cls.phen2.pass2 <- NULL
if (is.vector(cls.labels)) {
classes <- unique(as.vector(cls.list2.pass2))
cls.phen2.pass2 <- classes
cls.labels2.pass2 <- match(cls.list2.pass2, cls.phen2.pass2)
} else {
for (kk in 1:length(cls.list2.pass2[, 1])) {
classes <- unique(cls.list2.pass2[kk,])
# cls.phen2[[kk]] <- classes
cls.phen2.pass2 <- c(cls.phen2.pass2, classes)
cls.labels2.pass2[kk,] <- match(cls.list2.pass2[kk,], classes)
}
}
cls.labels2.pass2 = cls.labels2.pass2[1:n.phen.pass2,]
phen.list.pass2 <- unlist(cls.phen2.pass2)
colors.list.pass2 = rep( "gray", length(phen.list.pass2))
colors.list.pass2[phen.list.pass2=="MUT"] = cls.phen.colors[1]
colors.list.pass2[phen.list.pass2=="DEL"] = cls.phen.colors[3]
colors.list.pass2[phen.list.pass2=="AMP"] = cls.phen.colors[4]
colors.list.pass2[phen.list.pass2=="ALT"] = cls.phen.colors[5]
filename <- paste(results.dir, test.file.prefix, file.suffix, ".Step2.MI-HXY", sep="")
pdf(file=paste(filename, ".pdf", sep=""), height = pdf.height, width = pdf.width )
if( multiple.tissues ){
MSIG.HeatMapPlot.10.multiple.tissues(V = m2.pass2,
pathway.mut = bin.class.pass2,
row.names = model.names.pass2,
row.names2 = model.descs2.pass2,
col.labels = cls.labels2.pass2,
col.classes = cls.phen2.pass2,
phen.cmap = colors.list.pass2, phen.names = phen.names.pass2,
phen.names2 = c(" ", phen.descs2.pass2),
col.names = sample.names2.pass2, main = paste(tissue,
"- Step 2: only MI >=", MI.thresholds[MI.i],"from Step 1 (MI)"),
xlab=" ", ylab=" ", row.norm = row.norm,
cmap.type = cmap.type, char.rescale = char.rescale, legend=F,
tissue.names = tissue.names,
tissue.labels = tissue.labels)
} else{
MSIG.HeatMapPlot.10(V = m2.pass2,
pathway.mut = bin.class.pass2,
row.names = model.names.pass2,
row.names2 = model.descs2.pass2,
col.labels = cls.labels2.pass2,
col.classes = cls.phen2.pass2,
phen.cmap = colors.list.pass2, phen.names = phen.names.pass2,
phen.names2 = phen.descs2.pass2,
col.names = sample.names2.pass2, main = paste(tissue,
"- Step 2: only MI >=", MI.thresholds[MI.i],"from Step 1 (MI)"),
xlab=" ", ylab=" ", row.norm = row.norm,
cmap.type = cmap.type, char.rescale = char.rescale, legend=F)
}
dev.off()
} else{ print("'todd.version' on -- skipping Steps 1 and 2 and simply 'filling in' from scratch")}
### 3rd Pass ###
print( "--- Begin Pass 3 (Iterative Method)---")
print("2 in explained vector = previous explained vector 1 = new additions")
if( todd.version ){
model.names.pass2 = rownames(m)
m2.pass3 = m2.pass2 = m
cls.list2.pass3 = cls.list2.pass2 = cls.list
cls.labels2.pass3 = cls.labels2.pass2 = cls.labels
phen.names.pass2 = phen.names
file.suffix = paste(file.suffix, "_todd.version", sep="")
} else{
m2.pass3 = m2.pass2
cls.list2.pass3 = cls.list[, s.order.pass1][, s.order.pass2]
cls.labels2.pass3 = cls.labels[,s.order.pass1][,s.order.pass2]
}
model.names.pass3 = rownames(m2.pass3)
sample.names2.pass3 = colnames(m2.pass3)
n.phen.pass3 = 40
top.genes.ind = NULL
top.genes.names = NULL
top.genes.vectors = NULL
top.genes.MI = NULL
top.diffs = NULL
explained.vectors = NULL
bin.gene.matrix.3 = ifelse(cls.list2.pass3[-1,]=="WT", 0, 1)
mid.point <- which.min(abs(m2.pass2[1,] - quantile(m2.pass2[1,], 0.5)))
grey.and.black = c("#C0C0C0", "#000000")
pathway.name = model.names.pass2[1]
MI.ref = mutual.inf.2(m2.pass2[1,], m2.pass2[1,])
mycol <- vector(length=512, mode = "numeric")
for (k in 1:256) mycol[k] <- rgb(255, k - 1, k - 1, maxColorValue=255)
for (k in 257:512) mycol[k] <- rgb(511 - (k - 1), 511 - (k - 1), 255, maxColorValue=255)
mycol <- rev(mycol)
explained.initial = ifelse(
cls.list2.pass2[1,] =="WT", 0, 1)
explained.vectors = rbind(explained.vectors, explained.initial)
explained = explained.initial
explained.MI.initial = mutual.inf.2(explained, m2.pass2[1,])/MI.ref
print(paste("explained.MI.initial =", explained.MI.initial))
print(explained)
cex.axis = 1
ncolors <- length(mycol)
if(!skip.iterative){
samples.without.mut = ifelse(
cls.list2.pass2[2,] =="WT", 1, 0)
#browser()
wo.mut.or.blue = ifelse(c(samples.without.mut[1:mid.point],
rep(1, length=(Ns - mid.point)) )==1, TRUE, FALSE)
wo.mut.and.red = ifelse(c(samples.without.mut[1:mid.point],
rep(0, length=(Ns - mid.point)) )==1, TRUE, FALSE)
pdf(file=paste(results.dir, test.file.prefix, file.suffix, ".Step3_iterative.pdf", sep=""),
height=8.5, width=11)
#par(mar = c(1, 15, 1, 5))
## If we had naively searched the space without removing the explained cell lines
MI.results = mutual.inf.3.v2(m2.pass2[1,], bin.gene.matrix.3)
#browser()
MI.order = order(MI.results$MI, decreasing=TRUE, na.last=TRUE)+1
top10.names = c( #paste(c(phen.names.pass2[-1], "(from Step 2)"), collapse=" " ),
phen.names[MI.order[1:40]])
top10.MI = c( #signif(explained.MI.initial, digits=4),
signif(MI.results$MI[MI.order[1:40]-1], digits=4))
top10.labels = rbind(#explained+1,
cls.labels2.pass3[MI.order[1:40],])
par(mar = c(1, 15, 1, 5))
nf <- layout(matrix(c(1, 2), 2, 1, byrow=T), 1, c(1, 8), FALSE)
max.v <- max(max(m2.pass2[1,]), -min(m2.pass2[1,]))
V1 <- c( (ncolors/2)*normalize(m2.pass2[1,1:mid.point]) + ncolors/2,
(ncolors/2)*normalize(m2.pass2[1,(mid.point+1):Ns]))
image(1:length(m2.pass2[1,]), 1:1, as.matrix(V1),
zlim = c(0, ncolors), col=mycol, axes=FALSE,
main="Naive Step 3 without exclusion of Step 2 aberrations", sub = "", xlab= "", ylab="")
axis(2, at=1:1, labels=pathway.name, adj= 0.5, tick=FALSE, las = 1, cex.axis=1, font.axis=1, line=-1)
V1 <- top10.labels
V1 <- apply(V1, MARGIN=2, FUN=rev)
# max.v <- max(max(V1), -min(V1))
# V1 <- ceiling(ncolors * (V1 - (- max.v))/(1.001*(max.v - (- max.v))))
image(1:length(m2.pass2[1,]), 1:dim(V1)[1], t(V1),
zlim = c(0, length(grey.and.black)), col=grey.and.black, axes=FALSE,
main="", #paste("step:", i),
sub = "", xlab= "", ylab="")
axis(2, at=1:dim(V1)[1], labels=rev(top10.names), adj= 0.5, tick=FALSE, las = 1, cex.axis=1,
font.axis=1, line=-1)
axis(4, at=1:dim(V1)[1], labels=rev(top10.MI), adj= 0.5, tick=FALSE, las = 1, cex.axis=1,
font.axis=1, line=-1)
if( todd.version ){
print(top10.labels[1:5,])
browser()
explained.prev = top10.labels[1,]-1
explained.MI.prev = top10.MI[1]
} else{
explained.prev = explained
explained.MI.prev = explained.MI.initial
}
for( i in 1:n.iter){
print(paste("iteration:", i))
MI.results = mutual.inf.3.v2(
m2.pass2[1,wo.mut.or.blue],
bin.gene.matrix.3[,wo.mut.or.blue] )
MI.order = order(MI.results$MI, decreasing=TRUE, na.last=TRUE)+1
top10.names = phen.names[MI.order[1:10]]
top10.MI = MI.results$MI[MI.order[1:10]-1]
top10.labels = cls.labels.pass3[MI.order[1:10],wo.mut.or.blue]
top.genes.ind = c(top.genes.ind, MI.order[1] )
num.redundant = length(which(MI.results$MI == MI.results$MI[MI.order[1]-1]))-1
top.genes.names = c( top.genes.names, paste(phen.names[MI.order[1]], "+", num.redundant,
ifelse(num.redundant==1, "other", "others")))
mut = bin.gene.matrix.3[(MI.order[1]-1),]
explained = ifelse(mut+explained.prev>0, 1, 0)
explained.MI = mutual.inf.2( m2.pass2[1,wo.mut.or.blue], explained[wo.mut.or.blue])/MI.ref
MI.diff = explained.MI - explained.MI.prev
print(paste("Explained.MI = ", explained.MI,
" MI.diff = ", ifelse(MI.diff<0, "-", "+"),
signif(abs(MI.diff), digits=4), sep=""))
explained.vectors = rbind(explained.vectors, explained)
print(2*explained.prev + mut)
top.diffs = c(top.diffs, MI.diff)
top.genes.vectors = rbind(top.genes.vectors, mut)
top.genes.MI = c(top.genes.MI, paste(signif(MI.results$MI[MI.order[1]-1], digits=4),
sep="" ))
par(mar = c(1, 12, 1, 12))
nf <- layout(matrix(c(1, 2), 2, 1, byrow=T), 1, c(1, 8), FALSE)
max.v <- max(max(m2.pass2[1,wo.mut.or.blue]), -min(m2.pass2[1,wo.mut.or.blue]))
V1 <- c( (ncolors/2)*normalize(m2.pass2[1,1:mid.point]) + ncolors/2,
(ncolors/2)*normalize(m2.pass2[1,(mid.point+1):Ns]))[wo.mut.or.blue]
image(1:length(m2.pass2[1,wo.mut.or.blue]), 1:1, as.matrix(V1),
zlim = c(0, ncolors), col=mycol, axes=FALSE, main="", sub = "", xlab= "", ylab="")
axis(2, at=1:1, labels=pathway.name, adj= 0.5, tick=FALSE, las = 1, cex.axis=1, font.axis=1, line=-1)
V1 <- rbind( explained[wo.mut.or.blue]+1, top10.labels)
V1 <- apply(V1, MARGIN=2, FUN=rev)
image(1:length(m2.pass2[1,wo.mut.or.blue]), 1:dim(V1)[1], t(V1),
zlim = c(0, length(grey.and.black)), col=grey.and.black,
axes=FALSE, main=paste("iteration:", i), sub = "", xlab= "", ylab="")
axis(2, at=1:dim(V1)[1], labels=rev(c("explained with top result", top10.names)),
adj= 0.5, tick=FALSE, las = 1, cex.axis=1, font.axis=1, line=-1)
axis(4, at=1:dim(V1)[1], labels=rev(c( paste(signif(explained.MI, digits=4),
sep="" ),
signif(top10.MI,digits=4))),
adj= 0.5, tick=FALSE, las = 1, cex.axis=1, font.axis=1, line=-1)
samples.without.mut[wo.mut.or.blue] = samples.without.mut[wo.mut.or.blue] - mut[wo.mut.or.blue]
wo.mut.or.blue = (samples.without.mut | m2.pass2[1,] <= median(m2.pass2[1,]))
wo.mut.and.red = (samples.without.mut & m2.pass2[1,] > median(m2.pass2[1,]))
explained.MI.prev = explained.MI
explained.prev = ifelse(mut+explained.prev>0, 1, 0)
print(proc.time()-t1)
print(date())
}
explained = ifelse(apply(rbind(ifelse(cls.list2.pass2[1,]=="WT", 0,1),
top.genes.vectors), MARGIN=2, FUN=sum)>=1, 1, 0)
explained.MI = mutual.inf.2(m2.pass2[1,], explained)/MI.ref
top.genes.MI = signif(mutual.inf.3.v2(m2.pass2[1,], top.genes.vectors)$MI, digits=4)
par(mar = c(1, 12, 1, 12))
nf <- nf <- layout(matrix(c(1, 2), 2, 1, byrow=T), heights = c(1, 8), respect = FALSE)
max.v <- max(max(m2.pass2[1,]), -min(m2.pass2[1,]))
V1 <- c( (ncolors/2)*normalize(m2.pass2[1,1:mid.point]) + ncolors/2,
(ncolors/2)*normalize(m2.pass2[1,(mid.point+1):Ns]))
image(1:length(m2.pass2[1,]), 1:1, as.matrix(V1),
zlim = c(0, ncolors), col=mycol, axes=FALSE,
main="Final results from iterations (removing cell lines)", sub = "", xlab= "", ylab="")
axis(2, at=1:1, labels=pathway.name, adj= 0.5, tick=FALSE, las = 1, cex.axis=1, font.axis=1, line=-1)
V1 <- rbind(ifelse(cls.list2.pass2[1,]=="WT", 0,1), top.genes.vectors, explained)+1
V1 <- apply(V1, MARGIN=2, FUN=rev)
image(1:length(m2.pass2[1,]), 1:dim(V1)[1], t(V1),
zlim = c(0, length(grey.and.black)), col=grey.and.black, axes=FALSE,
sub = "", xlab= "", ylab="")
axis(2, at=1:dim(V1)[1], labels=rev(c(paste(phen.names.pass2[-1], collapse=" "),
top.genes.names, "explained")), adj= 0.5, tick=FALSE,
las = 1, cex.axis=1, font.axis=1, line=-1)
axis(4, at=1:dim(V1)[1], labels=rev( c(signif(explained.MI.initial, digits=4),
paste(top.genes.MI, sep=""),
signif(explained.MI,digits=4))), adj= 0.5, tick=FALSE,
las = 1, cex.axis=1, font.axis=1, line=-1)
explained.vectors.MI = mutual.inf.3.v2(m2.pass2[1,], explained.vectors)$MI
MI.diffs = explained.vectors.MI[-1] - explained.vectors.MI[1:n.iter]
par(mar = c(1, 12, 1, 12))
nf <- nf <- layout(matrix(c(1, 2), 2, 1, byrow=T), heights = c(1, 8), respect = FALSE)
max.v <- max(max(m2.pass2[1,]), -min(m2.pass2[1,]))
V1 <- c( (ncolors/2)*normalize(m2.pass2[1,1:mid.point]) + ncolors/2,
(ncolors/2)*normalize(m2.pass2[1,(mid.point+1):Ns]))
image(1:length(m2.pass2[1,]), 1:1, as.matrix(V1),
zlim = c(0, ncolors), col=mycol, axes=FALSE, main="Final results from iterations (removing cell lines)", sub = "", xlab= "", ylab="")
axis(2, at=1:1, labels=pathway.name, adj= 0.5, tick=FALSE, las = 1, cex.axis=1, font.axis=1, line=-1)
V1 = apply(explained.vectors+1, MARGIN=2, FUN=rev)
image(1:length(m2.pass2[1,]), 1:dim(V1)[1], t(V1),
zlim = c(0, length(grey.and.black)), col=grey.and.black, axes=FALSE,
# main=paste("step:", i),
sub = "", xlab= "", ylab="")
left.labels = c("INITIAL cumulative", paste("cumulative, iter: ", 1:(n.iter-1), " ", sep=""),
paste("FINAL cumulative, iter: ", n.iter, " ",sep=""))
right.labels = c( paste(" ", signif(explained.vectors.MI[1],digits=4), sep=""),
paste( " ", signif(explained.vectors.MI[2:(n.iter+1)], digits=4),
" (",ifelse(MI.diffs < 0, "-", "+"),
signif(abs(MI.diffs), digits=4),")", sep="")
)
axis(2, at=1:dim(V1)[1], labels=rev(left.labels), adj= 0.5, tick=FALSE, las = 1, cex.axis=1,
font.axis=1, line=-1)
axis(4, at=1:dim(V1)[1], labels=rev(right.labels), adj= 0.5, tick=FALSE,
las = 1, cex.axis=1, font.axis=1, line=-1)
cls.labels2.pass3 = rbind(cls.labels2.pass2[1,], top.genes.vectors+1)
cls.list2.pass3 = rbind(cls.list2.pass2[1,], ifelse(top.genes.vectors==0, "WT", "MUT"))
MI.results = mutual.inf.3.v2( explained,
m2.pass2) ## must subtract 1 from the indices because bin.gene.matrix.3
## doesn't include SUMMARY
#target.vector.name=phen.pass1[ind.master],
# n.randomizations = n.randomizations)
# g.order.pass3.top40
phen.descs.pass3 = ifelse( is.nan(MI.results$MI), " - ", signif(MI.results$MI, digits=3))
# phen.descs2.pass3 = phen.descs.pass3[g.order.pass3]
print(proc.time()-t1)
print(date())
dev.off()
}else{ print("skipping iterative method!")}
if( do.mRMR == T){
print("--- Begin Step 3 (min redundancy Max Relevance) ---")
explained.MI.initial = mutual.inf.2(explained.initial, m2.pass2[1,])/MI.ref
print(paste("explained.MI.initial =", explained.MI.initial))
print(explained.initial)
relevance = mutual.inf.3.v2( m2.pass2[1,], bin.gene.matrix.3, pos.and.neg=T)$MI
redundancy = mutual.inf.3.v2( explained.initial, bin.gene.matrix.3, pos.and.neg=F)$MI
print(proc.time()-t1)
print(date())
#browser()
MI.D = relevance - redundancy # Mutual Information Difference
MI.D.string = "MI.D=rel-red"
MI.D.order = order(MI.D, decreasing=TRUE, na.last=TRUE)
top.MI.D = MI.D[MI.D.order[1]]
top.ind.MI.D = which(MI.D == top.MI.D)
top.gene.MI.D = paste(phen.names[ top.ind.MI.D[1]+2 ], "+", length(top.ind.MI.D)-1, "others")
## Plot and iterate with MI.D first
print("Plot and iterate with MI.D first")
#quartz(height=8.5, width=11)
pdf(file=paste(results.dir, test.file.prefix, file.suffix,
".Step3_", MI.D.string, ".pdf", sep=""), height=8.5, width=11)
explained.with.top.MI.D = ifelse(explained.initial + bin.gene.matrix.3[MI.D.order[1],]>=1, 1, 0)
explained.with.top.MI.D.MI = mutual.inf.2(explained.with.top.MI.D, m2.pass2[1,])/MI.ref
MI.diff = explained.with.top.MI.D.MI - explained.MI.initial
# top.diffs = c(top.diffs, MI.diff)
print(paste("Explained.MI = ", explained.with.top.MI.D.MI,
" MI.diff = ", ifelse(MI.diff<0, "-", "+"), signif(abs(MI.diff), digits=4), sep=""))
print(ifelse(cls.list2.pass2[1,]=="WT", 0,2) + bin.gene.matrix.3[MI.D.order[1],])
top10.names = c("explained with top result ", paste(phen.names[ MI.D.order[1:10]+2 ], " "))
top10.MI = c( paste(" MI = ",
signif(explained.with.top.MI.D.MI, digits=4),
" diff:", ifelse(MI.diff<0, "-", "+"), signif(abs(MI.diff), digits=4), sep=""),
paste(" MI:", signif(relevance[MI.D.order[1:10]], digits=4),
" MI.D:", signif(MI.D[MI.D.order[1:10]], digits=4), sep=""))
# top.genes.MI = c(top.genes.MI, paste(" ", signif(top.MI.D, digits=4),
# #" (", ifelse(MI.diff<0, "-", "+"), signif(abs(MI.diff), digits=4), ")",
# sep="") )
# top.genes.names = c(top.genes.names,
# paste(phen.names[ top.ind.MI.D[1]+2 ], "+", length(top.ind.MI.D)-1, "others "))
# top.genes.vectors = rbind(bin.gene.matrix.3[MI.D.order[1],])
top10.labels = rbind( ifelse(cls.list2.pass2[1,]=="WT", 0,1),
explained.with.top.MI.D, bin.gene.matrix.3[MI.D.order[1:10],]) + 1
par(mar = c(1, 12, 1, 12))
#par(mar = c(1, 10, 1, 5))
nf <- layout(matrix(c(1, 2), 2, 1, byrow=T), 1, c(1, 5), FALSE)
max.v <- max(max(m2.pass2[1,]), -min(m2.pass2[1,]))
V1 <- c( (ncolors/2)*normalize(m2.pass2[1,1:mid.point]) + ncolors/2,
(ncolors/2)*normalize(m2.pass2[1,(mid.point+1):Ns]))
image(1:length(m2.pass2[1,]), 1:1, as.matrix(V1),
zlim = c(0, ncolors), col=mycol, axes=FALSE, main=MI.D.string, sub = "", xlab= "", ylab="")
axis(2, at=1:1, labels=pathway.name, adj= 0.5, tick=FALSE, las = 1, cex.axis=1, font.axis=1, line=-1)
V1 <- top10.labels
V1 <- apply(V1, MARGIN=2, FUN=rev)
image(1:length(m2.pass2[1,]), 1:dim(V1)[1], t(V1),
zlim = c(0, length(grey.and.black)), col=grey.and.black, axes=FALSE,
main=paste("iteration:", 0), sub = "", xlab= "", ylab="")
axis(2, at=1:dim(V1)[1], labels=rev(c(paste(phen.names.pass2[-1], collapse=" "), top10.names)),
adj= 0.5, tick=FALSE, las = 1, cex.axis=1, font.axis=1, line=-1)
axis(4, at=1:dim(V1)[1], labels=rev(c(phen.descs.pass2[1], top10.MI)),
adj= 0.5, tick=FALSE, las = 1, cex.axis=1, font.axis=1, line=-1)
dev.off()
#browser()
## Plot MI.Q next
# browser()
print("Plot MI.Q next")
MI.Q = (relevance)/(redundancy) # Mutual Information Quotient
MI.Q.string = "MI.Q=(rel)|(red)"
MI.Q.order = order(MI.Q, decreasing=TRUE, na.last=TRUE)
top.MI.Q = MI.Q[MI.Q.order[1]]
top.ind.MI.Q = which(MI.Q == top.MI.Q)
top.gene.MI.Q = paste(phen.names[ top.ind.MI.Q[1]+2 ], "+", length(top.ind.MI.Q)-1, "others")
pdf(file=paste(results.dir, test.file.prefix,
file.suffix, ".Step3", MI.Q.string, ".pdf", sep=""), height=8.5, width=11)
explained.with.top.MI.Q = ifelse(explained.initial + bin.gene.matrix.3[MI.Q.order[1],]>=1, 1, 0)
explained.with.top.MI.Q.MI = mutual.inf.2(explained.with.top.MI.Q, m2.pass2[1,])/MI.ref
MI.diff = explained.with.top.MI.Q.MI - explained.MI.initial
#top.diffs = c(top.diffs, MI.diff)
print(paste("Explained.MI =", explained.with.top.MI.Q.MI,
" MI.diff =", ifelse(MI.diff<0, "-", "+"), signif(abs(MI.diff), digits=4), sep=""))
print(ifelse(cls.list2.pass2[1,]=="WT", 0,2) + bin.gene.matrix.3[MI.Q.order[1],])
top10.names = c("explained with top result", paste(phen.names[ MI.Q.order[1:10]+2 ], " "))
top10.MI = c( paste(" MI:",
signif(explained.with.top.MI.Q.MI, digits=4),
" diff:", ifelse(MI.diff<0, "-", "+"), signif(abs(MI.diff), digits=4), sep=""),
paste(" MI:", signif(relevance[MI.Q.order[1:10]], digits=4),
" MI.Q:", signif(MI.Q[MI.Q.order[1:10]], digits=4), sep=""))
top10.labels = rbind( ifelse(cls.list2.pass2[1,]=="WT", 0,1),
explained.with.top.MI.Q, bin.gene.matrix.3[MI.Q.order[1:10],]) +1
#top.genes.MI = c(top.genes.MI, paste(" ", signif(top.MI.Q, digits=4),
#" (", ifelse(MI.diff<0, "-", "+"), signif(abs(MI.diff), digits=4), ")",
# sep=""))
#top.genes.names = c(top.genes.names,
# paste(phen.names[ top.ind.MI.Q[1]+2 ], "+", length(top.ind.MI.Q)-1, "others "))
#top.genes.vectors = rbind(bin.gene.matrix.3[MI.Q.order[1],])
#quartz(height=8.5, width=11)
par(mar = c(1, 12, 1, 12))
#par(mar = c(1, 10, 1, 5))
nf <- layout(matrix(c(1, 2), 2, 1, byrow=T), 1, c(1, 5), FALSE)
max.v <- max(max(m2.pass2[1,]), -min(m2.pass2[1,]))
V1 <- c( (ncolors/2)*normalize(m2.pass2[1,1:mid.point]) + ncolors/2, (ncolors/2)*normalize(m2.pass2[1,(mid.point+1):Ns]))
image(1:length(m2.pass2[1,]), 1:1, as.matrix(V1),
zlim = c(0, ncolors), col=mycol, axes=FALSE, main=MI.Q.string, sub = "", xlab= "", ylab="")
axis(2, at=1:1, labels=pathway.name, adj= 0.5, tick=FALSE, las = 1, cex.axis=1, font.axis=1, line=-1)
V1 <- top10.labels
V1 <- apply(V1, MARGIN=2, FUN=rev)
image(1:length(m2.pass2[1,]), 1:dim(V1)[1], t(V1),
zlim = c(0, length(grey.and.black)), col=grey.and.black, axes=FALSE, main=paste("iteration:", 0), sub = "", xlab= "", ylab="")
axis(2, at=1:dim(V1)[1], labels=rev(c(paste(phen.names.pass2[-1], collapse=" "), top10.names)), adj= 0.5, tick=FALSE, las = 1, cex.axis=1, font.axis=1, line=-1)
axis(4, at=1:dim(V1)[1], labels=rev(c(phen.descs.pass2[1], top10.MI)), adj= 0.5, tick=FALSE, las = 1, cex.axis=1, font.axis=1, line=-1)
dev.off()
} else{ print("Skipped min redundancy Max Relevance!") }
if (!is.na(output.dataset)) {
# V.GCT <- m.all
print("Figure out why model.descs2.pass1 does not correspond to the rows of m.all")
browser()
# colnames(V.GCT) <- sample.names2
# row.names(V.GCT) <- model.names.pass1
write.gct(gct.data.frame = m.all, descs = model.descs2.pass1, filename =output.dataset)
}
}
OPAM.sort.projection.by.score.8 <- function(
# input.ds,
# signatures = "NA",
input.all.pathways.ds,
input.cls,
tissue = "NA",
results.dir,
normalize.score = T,
# normalization.type = "zero.one",
model = "NA",
target.phen = NA,
target.class = NA,
user.colors = NA,
decreasing.order = T,
output.dataset = NA,
char.rescale = 1,
cmap.type = 3,
row.norm = T,
u.gene.names.known = "NA",
n.randomizations = 10
)
# Calls MSIG.HeatMapPlot.9 and makes a plot sorted by the highest-scoring
# signatures and abnormalities (gene mutations or copy number alterations)
# i.e. doesn't require a "model" to score by as OPAM.sort.projection.by.score.2 does.
# However, it *will* use "model" if it cannot calculate p-values on the gene signatures, which
# happens when every cell line has a genomic aberration.
#
# Runs 3 passes on the data:
# 1st pass: looks at the genes and copy number alterations specified by u.gene.names.known
# 2nd pass: looks at only the top abnormalities (using a p-value cutoff) from the 1st pass, and adjusts
# the PATHWAY.MUT vector accordingly (only according to the genes, not by copy number data)
# 3rd pass: Takes the winning signature from the 2nd pass and then looks all the genes available
#
# Very similar to OPAM.sort.projection.by.score.4, however this version uses mutual.inf instead of
# roc.area to calculate mutual information scores and p-values for PATHWAY.MUT, the vector of total genomic aberrations
# in all samples
#
# Differs from OPAM.sort.projection.by.score.6 by requiring the gct file of expression in
# all pathways by the input tissue ("input.all.pathways.ds")
#
# Differs from OPAM.sort.projection.by.score.7 by finding the top enriched pathways that differentiate according to phenotype
# from testing all the pathways in "input.all.pathways.ds." Does not require signatures to be known a priori.
{
library(gtools)
library(verification)
library(ROCR)
library(MASS)
library(RColorBrewer)
library(heatmap.plus)
# dataset <- MSIG.Gct2Frame(filename = input.ds) # Read gene expression dataset (GCT format)
# m <- data.matrix(dataset$ds)
# model.names <- dataset$row.names
## model.descs <- dataset$descs
# Ns <- length(m[1,])
# dim(m)
# sample.names <- dataset$names
dataset.all <- MSIG.Gct2Frame( filename = input.all.pathways.ds)
m.all <- data.matrix(dataset.all$ds)#[1:30,]
#model.names <- dataset.all$row.names#[1:30]
model.names <- make.unique(dataset.all$descs)
m.all <- na.omit(t(apply(m.all, MARGIN=1, FUN=normalize)))
Ns = length(m.all[1,])
sample.names = dataset.all$names
# if( signatures == "NA" ){
# stop("Must provide a vector of signature names to evaluate, or specify 'ALL'")
# }
# if( signatures == "ALL"){
# model.names = model.names.all
# m = m.all
# model.descs = dataset.all$descs
# } else{
# model.names = signatures
# model.ind = match(signatures, model.names.all)
# m = m.all[model.ind,]
# model.descs = dataset.all$descs[model.ind]
## browser()
# }
# model.names = model.names.all
# m = m.all
model.descs = dataset.all$descs#[1:30]
n.models <- length(m.all[,1])
temp <- strsplit(input.all.pathways.ds, split="/") # Extract test file name
s <- length(temp[[1]])
test.file.name <- temp[[1]][s]
temp <- strsplit(test.file.name, split=".gct")
test.file.prefix <- temp[[1]][1]
char.res <- 0.013 * n.models + 0.65
# normalize scores
# if (normalize.score == T) {
# if (normalization.type == "zero.one") {
# for (i in 1:n.models) {
# m[i,] <- (m[i,] - min(m[i,]))/(max(m[i,]) - min(m[i,]))
# }
# } else if (normalization.type == "z.score") {
# for (i in 1:n.models) {
# m[i,] <- (m[i,] - mean(m[i,]))/sd(m[i,])
# }
# } else if (normalization.type == "r.z.score") {
# for (i in 1:n.models) {
# m[i,] <- (m[i,] - median(m[i,]))/mad(m[i,])
# }
# }
# }
CLS <- MSIG.ReadPhenFile.2(file = input.cls) # Read phenotype file (CLS format)
cls.labels <- CLS$class.v
cls.phen <- CLS$phen
cls.list <- CLS$class.list
# browser()
if (is.vector(cls.labels)) {
n.phen <- 1
} else {
n.phen <- length(cls.labels[,1])
}
if (!is.na(user.colors[1])) {
c.test <- user.colors
} else {
if (!is.null(CLS$col.phen)) {
c.test <- CLS$col.phen
} else {
c.test <- c(brewer.pal(n=7, name="Set2"), brewer.pal(n=7, name="Dark2"), brewer.pal(n=7, name="Set1"),
brewer.pal(n=8, name="Accent"), brewer.pal(n=9, name="Spectral"), brewer.pal(n=8, name="Set3"),
brewer.pal(n=8, name="BuGn"),
brewer.pal(n=7, name="Set2"), brewer.pal(n=7, name="Dark2"), brewer.pal(n=7, name="Set1"),
brewer.pal(n=8, name="Accent"), brewer.pal(n=10, name="Spectral"), brewer.pal(n=8, name="Set3"),
brewer.pal(n=8, name="BuGn"),
brewer.pal(n=7, name="Set2"), brewer.pal(n=7, name="Dark2"), brewer.pal(n=7, name="Set1"),
brewer.pal(n=8, name="Accent"), brewer.pal(n=10, name="Spectral"), brewer.pal(n=8, name="Set3"),
brewer.pal(n=8, name="BuGn"))
}
}
# browser()
if (!is.null(CLS$phen.names)) {
phen.names <- CLS$phen.names
} else if( !is.null(CLS$phen.list)){
phen.names = CLS$phen.list
} else {
phen.names <- "NA"
}
cls.phen.index <- unlist(cls.phen)
cls.phen.colors <- c.test[1:length(cls.phen.index)]
# print("cls.phen.colors:")
# print(cls.phen.colors)
n.classes <- vector(length=n.phen, mode="numeric")
if (n.phen == 1) {
max.classes <- length(cls.phen)
n.classes[1] <- max.classes
} else {
max.classes <- max(unlist(lapply(cls.phen, FUN=length)))
for (i in 1:n.phen) {
n.classes[i] <- length(cls.phen[[i]])
}
}
print("--- Begin Pass 1 ---")
# model.names.original = model.names
# m.original = m
phen.pass1 = u.gene.names.known
n.phen.pass1 = length(u.gene.names.known)
ind.phen.pass1 = which( phen.names %in% phen.pass1 )
phen.pass1 = phen.names[ind.phen.pass1]
# phen.pass1 = c( "SUMMARY", phen.pass1)
# browser()
MI.list.pass1 = vector( length=n.models, mode="numeric" )
FDR.list.pass1 = vector( length=n.models, mode="numeric" )
if( !is.vector(cls.labels)){
cls.list.pass1 = cls.list[ind.phen.pass1,]
cls.labels.pass1 = cls.labels[ind.phen.pass1,]
} else{
cls.list.pass1 = cls.list
cls.labels.pass1 = cls.labels
}
cls.list.pass1.2 = t(as.matrix(ifelse(cls.list.pass1 == "WT", 0, 1)))
# browser()
if (!is.na(target.phen)) {
if( is.vector(cls.list.pass1.2)){ bin.class.pass1 = cls.list.pass1.2
} else { bin.class.pass1 = apply( cls.list.pass1.2, MARGIN=2, FUN=sum) }
# Normalize bin.class.pass1
if( length(unique(bin.class.pass1)) > 1){
bin.class.pass1 = (
bin.class.pass1 - min(bin.class.pass1))/(max(bin.class.pass1) - min(bin.class.pass1))
} else if ( length(unique(bin.class.pass1)) == 1){
bin.class = rep(1, length(cls.list[1,]))
}
if( is.vector( cls.list.pass1) ){
cls.list.pass1 = ifelse(bin.class.pass1 > 0, "MUT", "WT")
} else{ cls.list.pass1[1,] = ifelse(bin.class.pass1 > 0, "MUT", "WT") }
} else {
# browser()
bin.class.pass1 <- ifelse(cls.list == cls.phen[1], 0, 1)
}
# browser()
# MI.ref.models.pass1 = mutual.inf.2(bin.class.pass1, bin.class.pass1)$MI
# print(paste("MI.ref.models.pass1 =", MI.ref.models.pass1))
# browser()
model.descs2.pass1 = vector(length = n.models, mode="character")
pdf(file=paste(tissue, test.file.name, ".Phase1", "pdf", sep="."))
#browser()
skipped.indices = 21:(n.models)
# browser()
if( length(unique(bin.class.pass1)) > 1 ){
# signature.ind = which(rownames(m.all) %in% signatures)
MI.results = mutual.inf.3.v2(bin.class.pass1, m.all) #signature.indices = 1:n.models, )
MI.list.pass1 = MI.results$MI
# FDR.list.pass1 = MI.results$FDR
# browser()
for (i in 1:n.models) {
MI.signif <- signif(MI.list.pass1[i], digits=3)
# FDR.signif <- signif(FDR.list.pass1[i], digits=3)
model.descs2.pass1[i] <- paste(MI.signif, sep="")
}
# browser()
# m.order.pass1 = order(MI.list.pass1, decreasing=TRUE, na.last=TRUE)
# m.order.pass1 = order(MI.list.pass1, decreasing=TRUE, na.last=TRUE)
m.order.pass1 = order(MI.list.pass1, decreasing=FALSE, na.last=TRUE)
m.order.pass1.top10 = m.order.pass1[-skipped.indices]
# m.order.pass1 = 1:n.models
m2.pass1 <- m.all[m.order.pass1, ]
s.order.pass1 <- order(m2.pass1[1,], decreasing = FALSE )
# s.order.pass1 = 1:Ns
m2.pass1 <- m2.pass1[-skipped.indices, s.order.pass1]
# m2.pass1.top10 = m2.pass1[-skipped.indices,]
} else{
MI.list.pass1 = rep(NA, n.models)
# FDR.list.pass1 = rep(NA, n.models)
model.descs2.pass1 = rep(" - ", n.models)
loc <- match(model, model.names)
s.order.pass1 <- order(m.all[loc,], decreasing = decreasing.order)
# loc = s.order.pass1[1]
# s.order.pass1 = 1:Ns
m2.pass1 <- m.all[, s.order.pass1]
correl <- cor(t(m2.pass1))[, loc]
m.order.pass1 <- order(correl, decreasing=T)
# m.order.pass1 = 1:n.models
m2.pass1 <- m2.pass1[m.order.pass1, ]
}
# browser()
dev.off()
# skipped.indices = 11:(n.models-10)
# browser()
MI.list.pass1.top10 = MI.list.pass1[m.order.pass1.top10]
MI.list.pass1 = MI.list.pass1[m.order.pass1]
# FDR.list.pass1.top10 = FDR.list.pass1[m.order.pass1.top10]
# FDR.list.pass1 = FDR.list.pass1[m.order.pass1]
bin.class.pass1 = bin.class.pass1[s.order.pass1]
# m2.pass1 <- m2.pass1[m.order.pass1, ]
model.descs2.pass1.top10 = model.descs2.pass1[m.order.pass1.top10]
model.descs2.pass1 = model.descs2.pass1[m.order.pass1]
sample.names2.pass1 <- colnames(m2.pass1)
model.names.pass1.top10 <- rownames(m2.pass1)
print(matrix(c(model.names.pass1.top10, model.descs2.pass1.top10), ncol=2), quote=F)
# browser()
if (is.vector(cls.labels)) {
cls.labels2.pass1 <- cls.labels.pass1[s.order.pass1]
cls.list2.pass1 <- cls.list.pass1[s.order.pass1]
} else {
cls.labels2.pass1 <- cls.labels.pass1[, s.order.pass1]
cls.list2.pass1 <- cls.list.pass1[, s.order.pass1]
}
m.all = m.all[, s.order.pass1]
# browser()
winning.model.ind.pass1 = which(model.names.pass1.top10[1] == rownames(m.all))
# pathway.name <- "KRAS_ALL_UP"
# pathway <- m[1,]
# pathway0 <- ifelse(pathway < median(pathway), 0, 1) # disctretized version
# MI.ref.genes.pass1 <- mutual.inf.2(m[1,], m[1,])$MI
# browser()
# m.score.pass1 <- m2.pass1[1,]
# m.score.norm.pass1 <- (m.score.pass1 - min(m.score.pass1))/(max(m.score.pass1) - min(m.score.pass1))
# m.score.pass1 = ifelse( m.score.pass1 < median(m.score.pass1), -1, 1) # discretized version
# MI.ref.genes.pass1 <- mutual.inf.2(m.score.norm.pass1, m.score.norm.pass1)$MI
# print(paste("MI.ref.genes.pass1 =", MI.ref.genes.pass1))
MI.list.phen.pass1 = vector(mode="numeric", length=n.phen.pass1)
# FDR.list.phen.pass1 = vector(mode="numeric", length=n.phen.pass1)
phen.descs2.pass1 = vector(mode="character", length=n.phen.pass1)
if( length(unique(bin.class.pass1)) > 1){
# MI.results <-(mutual.inf.3(bin.class.pass1, m.all,
# winning.model.ind.pass1, gene.target.name = phen.pass1[1]))#/MI.ref.genes.pass1
MI.signif <- signif(MI.list.pass1[1], digits=3)
MI.list.phen.pass1[1] = MI.list.pass1[1]
# FDR.signif <- signif(FDR.list.pass1[1], digits=3)
# FDR.list.phen.pass1[1] = FDR.list.pass1[1]
} else{
MI.signif <- "-"
MI.list.phen.pass1[1] = NA
# FDR.signif <- "- "
# FDR.list.phen.pass1[1] = NA
}
print(paste(format(phen.pass1[1], width=12), "mutual.inf =", MI.signif))
phen.descs2.pass1[1] <- paste(MI.signif)
# browser()
if( n.phen >= 2 ){
bin.gene.matrix = ifelse(cls.list2.pass1[-1,]=="WT", 0, 1)
n.aberrations = apply(bin.gene.matrix, MARGIN=1, FUN=sum)
u.n.aberrations = unique(n.aberrations[n.aberrations != 0])
for( i in 1:length(u.n.aberrations)){
ind.without.SUMMARY = which(n.aberrations == u.n.aberrations[i])
ind.master = ind.without.SUMMARY + 1
# browser()
# bin.gene.matrix.temp = bin.gene.matrix[ind.without.SUMMARY,]
MI.results = mutual.inf.3.v2(bin.gene.matrix[ind.without.SUMMARY,],
m.all, winning.model.ind.pass1, gene.target.name=phen.pass1[ind.master],
n.randomizations = n.randomizations)
MI.list.phen.pass1[ind.master] = MI.results$MI
# FDR.list.phen.pass1[ind.master] = MI.results$FDR
for( j in 1:length(ind.master)){
phen.descs.pass1[ind.master[j]] =
paste( signif(MI.results$MI[j], digits=3),
sep="")
}
}
ind.zeros = which(n.aberrations==0) + 1
MI.list.phen.pass1[ind.zeros] = NA
# FDR.list.phen.pass1[ind.zeros] = NA
phen.descs.pass1[ind.zeros] = " - "
}
phen.names.pass1 = phen.pass1#[g.order.pass1]#[1:n.phen.pass1]
# browser(text="Figure out how to print phen.descs.pass1 and phen.names.pass1 in a nice table")
# Recompute cls.list2 as some mutations or copy numbers may have been removed
# Recompute cls.phen and cls.labels2 as order may have changed
cls.phen2.pass1 <- NULL
if (is.vector(cls.labels)) {
classes <- unique(cls.list2.pass1)
cls.phen2.pass1 <- classes
cls.labels2.pass1 <- match(cls.list2.pass1, cls.phen2.pass1)
} else {
for (kk in 1:length(cls.list2.pass1[, 1])) {
classes <- unique(cls.list2.pass1[kk,])
# cls.phen2[[kk]] <- classes
cls.phen2.pass1 <- c(cls.phen2.pass1, classes)
cls.labels2.pass1[kk,] <- match(cls.list2.pass1[kk,], classes)
}
}
# cls.labels2.pass1 = cls.labels2.pass1[1:n.phen.pass1,]
# browser()
# correl <- cor(t(m2))[, loc]
# m.order <- order(correl, decreasing=decreasing.order)
# correl2 <- correl[m.order]
# model.descs2 <- paste(model.descs[m.order], signif(correl2, digits=3))
phen.list.pass1 <- unlist(cls.phen2.pass1)
colors.list.pass1 = rep( "gray", length(phen.list.pass1))
colors.list.pass1[phen.list.pass1=="MUT"] = cls.phen.colors[1]
colors.list.pass1[phen.list.pass1=="DEL"] = cls.phen.colors[3]
colors.list.pass1[phen.list.pass1=="AMP"] = cls.phen.colors[4]
colors.list.pass1[phen.list.pass1=="ALT"] = cls.phen.colors[5]
# browser()
# colors.list.pass1[1,] = grey(bin.class.pass1)
# print("cls.phen2:")
# print(unlist(cls.phen2))
#
# print("cls.phen:")
# print(unlist(cls.phen))
#
# print("colors.list:")
# print(colors.list)
# browser()
filename <- paste(results.dir, test.file.prefix, ".Phase1.MI|HXY", sep="")
# pdf(file=paste(filename, ".pdf", sep=""), height = 11, width = 9)
pdf(file=paste(filename, ".pdf", sep=""), height = 11, width = 17 )
# browser()
# windows(width=12, height=8)
MSIG.HeatMapPlot.9(V = m2.pass1,
# pathway.mut = bin.class.pass1,
row.names = model.names.pass1.top10,
row.names2 = model.descs2.pass1.top10,
col.labels = cls.labels2.pass1,
col.classes = cls.phen2.pass1,
phen.cmap = colors.list.pass1,
phen.names = phen.names.pass1,
phen.names2 = phen.descs2.pass1,
col.names = sample.names2.pass1,
main = paste(tissue, test.file.prefix),
xlab=" ", ylab=" ", row.norm = row.norm,
cmap.type = cmap.type, char.rescale = char.rescale, legend=F)
dev.off()
# browser()
if (!is.na(output.dataset)) {
# V.GCT <- m.all
# colnames(V.GCT) <- sample.names2
# row.names(V.GCT) <- model.names2
write.gct(gct.data.frame = m.all, descs = model.descs2.pass1, filename = paste(output.dataset, ".gct", sep=""))
write.cls.2( class.v = cls.labels2.pass1, phen = cls.phen, filename = paste(output.dataset, ".cls", sep=""))
}
}
MSIG.HeatMapPlot.11 <- function(
## For Plotting expression heatmap only!! (No phenotypes)
V,
row.names = "NA",
row.names2 = "NA",
col.labels = "NA",
col.labels2 = "NA",
col.classes = "NA",
phen.cmap = NULL,
col.names = "NA",
phen.names = "NA",
phen.names2 = "NA",
main = " ",
sub = " ",
xlab=" ",
ylab=" ",
row.norm = TRUE,
char.rescale = 0.85,
cmap.type = 1, # 1 = vintage pinkogram, 2 = scale of blues, 3 = high-resolution pinkogram for scores or probabilities [0, 1], 4 = high-resolution pinkogram for general values, 5 = color map for normalized enrichment scores, 6 = scale of red purples, 7 = scale of Oranges, 8 = scale of Greens, 9 = scale of Blues
max.v = "NA",
legend = T)
{
#
# Plots a heatmap "pinkogram" of a gene expression matrix including phenotype vector and gene, sample and phenotype labels
#
# The Broad Institute
# SOFTWARE COPYRIGHT NOTICE AGREEMENT
# This software and its documentation are copyright 2003 by the
# Broad Institute/Massachusetts Institute of Technology.
# All rights are reserved.
#
# This software is supplied without any warranty or guaranteed support
# whatsoever. Neither the Broad Institute nor MIT can be responsible for
# its use, misuse, or functionality.
n.rows <- length(V[,1])
n.cols <- length(V[1,])
V1 <- matrix(0, nrow=n.rows, ncol=n.cols)
# if ((cmap.type == 5) | (cmap.type == 3)) {
if (cmap.type == 5) {
row.norm <- F
}
if (row.norm == TRUE) {
row.mean <- apply(V, MARGIN=1, FUN=mean)
row.sd <- apply(V, MARGIN=1, FUN=sd)
row.n <- length(V[,1])
for (i in 1:n.rows) {
if (row.sd[i] == 0) {
V1[i,] <- 0
} else {
V1[i,] <- (V[i,] - row.mean[i])/(0.333 * row.sd[i])
}
V1[i,] <- ifelse(V1[i,] < -4, -4, V1[i,])
V1[i,] <- ifelse(V1[i,] > 4, 4, V1[i,])
}
} else {
V1 <- V
}
if (cmap.type == 1) {
mycol <- c("#0000FF", "#4040FF", "#7070FF", "#8888FF", "#A9A9FF", "#D5D5FF", "#EEE5EE", "#FFAADA",
"#FF9DB0", "#FF7080",
"#FF5A5A", "#FF4040", "#FF0D1D") # blue-pinkogram colors. This is the 1998-vintage,
# pre-gene cluster, original pinkogram color map
} else if (cmap.type == 2) {
violet.palette <- colorRampPalette(c("#400030", "white"), space = "rgb")
mycol <- rev(violet.palette(20))
# mycol <- c("#FCFBFD","#F4F2F8","#F8F7FB","#EFEDF5","#E1E1EF","#E8E7F2","#DADAEB","#C6C7E1","#D0D1E6",
# "#BCBDDC","#A8A6CF",
# "#B2B2D6","#9E9AC8","#8A87BF","#9491C4","#807DBA","#7260AB","#796FB3","#6A51A3","#5C3596",
# "#63439D","#54278F","#460D83","#4D1A89","#3F007D")
} else if (cmap.type == 6) {
mycol <- c("#FFF7F3", "#FDE0DD", "#FCC5C0", "#FA9FB5", "#F768A1", "#DD3497", "#AE017E",
"#7A0177", "#49006A")
} else if (cmap.type == 7) {
mycol <- c("#FFF5EB", "#FEE6CE", "#FDD0A2", "#FDAE6B", "#FD8D3C", "#F16913", "#D94801",
"#A63603", "#7F2704")
} else if (cmap.type == 8) {
mycol <- c("#F7FCF5", "#E5F5E0", "#C7E9C0", "#A1D99B", "#74C476", "#41AB5D", "#238B45",
"#006D2C", "#00441B")
} else if (cmap.type == 9) {
mycol <- c("#F7FBFF", "#DEEBF7", "#C6DBEF", "#9ECAE1", "#6BAED6", "#4292C6", "#2171B5",
"#08519C", "#08306B")
} else if ((cmap.type == 3) | (cmap.type == 4) | (cmap.type == 5)) {
mycol <- vector(length=512, mode = "numeric")
for (k in 1:256) {
mycol[k] <- rgb(255, k - 1, k - 1, maxColorValue=255)
}
for (k in 257:512) {
mycol[k] <- rgb(511 - (k - 1), 511 - (k - 1), 255, maxColorValue=255)
}
mycol <- rev(mycol)
}
ncolors <- length(mycol)
if (cmap.type == 5) {
if (max.v == "NA") {
max.v <- max(max(V1), -min(V1))
}
V2 <- ceiling(ncolors * (V1 - (- max.v))/(1.001*(max.v - (- max.v))))
} else {
V2 <- ceiling(ncolors * (V1 - min(V1))/(1.001*(max(V1) - min(V1))))
}
if (col.labels[1] == "NA") {
heatm <- matrix(0, nrow = n.rows, ncol = n.cols)
heatm[1:n.rows,] <- V2[seq(n.rows, 1, -1),]
tot.cols <- ncolors
browser()
if (legend == T) {
nf <- layout(matrix(c(1, 2, 3, 0), 2, 2, byrow=T), widths = c(5, 1), heights = c(10, 1), respect = FALSE)
} else {
nf <- layout(matrix(c(1, 2), 2, 1, byrow=T), heights = c(8, 1), respect = FALSE)
}
par(mar = c(3, 16, 3, 16))
mycol <- c(mycol, phen.cmap[1:length(col.classes)])
image(1:n.cols, 1:n.rows, t(heatm), zlim = c(0, tot.cols), col=mycol, axes=FALSE,
main=main, sub = sub, xlab= xlab, ylab=ylab)
n.rows.phen <- 0
} else {
tot.cols <- ncolors
if (is.vector(col.labels)) {
heatm <- matrix(0, nrow = n.rows + 1, ncol = n.cols)
heatm[1:n.rows,] <- V2[seq(n.rows, 1, -1),]
n.rows.phen <- 1
heatm[n.rows + 1,] <- tot.cols + col.labels
cols.row <- length(unique(col.labels))
tot.cols <- tot.cols + cols.row
phen.cmap <- phen.cmap[1:cols.row]
} else {
n.rows.phen <- length(col.labels[,1])
cols.row <- vector(length=n.rows.phen, mode = "numeric")
heatm <- matrix(0, nrow = n.rows + n.rows.phen, ncol = n.cols)
heatm[1:n.rows,] <- V2[seq(n.rows, 1, -1),]
for (k in seq(n.rows + n.rows.phen, n.rows + 1, -1)) {
heatm[k,] <- tot.cols + col.labels[n.rows + n.rows.phen - k + 1,]
cols.row[n.rows + n.rows.phen - k + 1] <- length(unique(col.labels[n.rows + n.rows.phen - k + 1,]))
tot.cols <- tot.cols + cols.row[n.rows + n.rows.phen - k + 1]
# print(c("col:", k, ":", tot.cols + col.labels[n.rows + n.rows.phen - k + 1,], "tot.cols:", tot.cols))
}
phen.cmap <- phen.cmap[1:sum(unlist(lapply(col.classes, length)))]
}
if (legend == T) {
# nf <- layout(matrix(c(1, 2, 3, 0), 2, 2, byrow=T), widths = c(10, 2), heights = c(6, 1), respect = FALSE)
nf <- layout(matrix(c(1, 2, 3), 3, 1, byrow=T), heights = c(8, 4, 1), respect = FALSE)
} else {
nf <- layout(matrix(c(1, 2), 2, 1, byrow=T), heights = c(5, 1), respect = FALSE)
}
par(mar = c(3, 16, 3, 16))
mycol <- c(mycol, phen.cmap)
image(1:n.cols, 1:(n.rows + n.rows.phen), t(heatm), zlim = c(0, tot.cols), col=mycol, axes=FALSE,
main=main, sub = sub, xlab= xlab, ylab=ylab)
}
# Add lines to separate phenotypes or subgroups
if (col.labels2[1] != "NA") {
groups <- split(col.labels2, col.labels2)
len.vec <- lapply(groups, length)
plot.div <- c(0.51, cumsum(len.vec) + 0.5)
for (i in plot.div) {
lines(c(i, i), c(0, n.rows + n.rows.phen + 0.48), lwd = 2, cex = 0.9, col = "black")
}
lines(c(0.51, n.cols + 0.49), c(0.51, 0.51), lwd = 2, cex = 0.9, col = "black")
lines(c(0.51, n.cols + 0.49), c(n.rows + n.rows.phen + 0.48, n.rows + n.rows.phen + 0.48), lwd = 2,
cex = 0.9, col = "black")
lines(c(0.51, n.cols + 0.49), c(n.rows + 0.50, n.rows + 0.50), lwd = 2,
cex = 0.9, col = "black")
}
if (row.names[1] != "NA") {
# browser()
numC <- nchar(row.names)
size.row.char <- char.rescale*25/(n.rows + 20)
for (i in 1:n.rows) {
row.names[i] <- substr(row.names[i], 1, 40)
row.names[i] <- paste(row.names[i], " ", sep="")
}
if (phen.names[1] == "NA") {
head.names <- paste("Class", seq(n.rows.phen, 1, -1))
} else {
head.names <- as.character(rev(phen.names))
}
row.names <- c(row.names[seq(n.rows, 1, -1)], head.names)
# print(paste("n.rows:", n.rows))
# print(paste("Phen names:", phen.names))
# print(paste("Head names:", head.names))
# print(paste("Row names:", row.names))
axis(2, at=1:(n.rows + n.rows.phen), labels=row.names, adj= 0.5, tick=FALSE, las = 1, cex.axis=size.row.char,
font.axis=2, line=-1)
}
if (row.names2[1] != "NA") {
# browser()
numC <- nchar(row.names2)
size.row.char <- char.rescale*25/(n.rows + 20)
for (i in 1:n.rows) {
row.names2[i] <- substr(row.names2[i], 1, 40)
row.names2[i] <- paste(" ", row.names2[i], sep="")
}
for( i in 1:n.rows.phen ){
phen.names2[i] <- substr(phen.names2[i], 1, 40)
phen.names2[i] <- paste( " ", phen.names2[i], sep="")
}
row.names2 <- rev(row.names2)
phen.names2 <- rev(phen.names2)
axis(4, at=1:(n.rows + n.rows.phen), labels=c(row.names2, phen.names2), adj= 0.5, tick=FALSE, las = 1,
cex.axis=size.row.char, font.axis=2, line=-1)
}
if (col.names[1] != "NA") {
size.col.char <- char.rescale*20/(n.cols + 25)
axis(1, at=1:n.cols, labels=col.names, tick=FALSE, las = 3, cex.axis=size.col.char, font.axis=2, line=-1)
}
# Phenotype Legend
# print("--------------------------------------------------------------------------------------------")
if (legend == T) {
leg.txt <- NULL
p.vec <- NULL
c.vec <- NULL
c2.vec <- NULL
ind <- 1
par(mar = c(0, 0, 0, 0))
plot(c(0,0), c(1, 1), xlim = c(0, 1), ylim = c(-.050, 1.05), axes=F, type="n", xlab = "", ylab="")
for (i in 1:n.rows.phen) {
if (is.vector(col.labels)) {
phen.v <- as.character(col.classes)
} else {
phen.v <- as.character(col.classes[[i]])
}
p.name <- paste(as.character(rev(head.names)[i]), ": ", sep="")
leg.txt <- c(p.name, phen.v)
p.vec <- rep(22, cols.row[i] + 1)
c.vec <- c("#FFFFFF", phen.cmap[ind:(ind + cols.row[i] - 1)])
c2.vec <- c("#FFFFFF", rep("black", cols.row[i]))
ind <- ind + cols.row[i]
offset <- 0.07
legend(x=0, y= 1 - offset*i,
horiz = T, x.intersp = 0.5, legend=leg.txt, bty="n", xjust=0, yjust= 1, pch = p.vec,
pt.bg = c.vec, col = c2.vec, cex = 1.20, pt.cex=1.75)
}
}
# Color map legend
# print(c("range V=", range(V)))
# print(c("range V1=", range(V1)))
# print(c("range V2=", range(V2)))
par(mar = c(2, 12, 2, 12))
num.v <- 20
range.v <- range(V2)
incr <- (range.v[1] - range.v[2])/(num.v - 1)
heatm.v <- matrix(rev(seq(range.v[2], range.v[1], incr)), nrow=num.v, ncol=1)
image(1:num.v, 1:1, heatm.v, zlim = c(0, tot.cols), col=mycol, axes=FALSE,
main=" ", sub = " ", xlab= ylab, ylab=xlab)
range.v <- range(V1)
incr <- (range.v[1] - range.v[2])/(num.v - 1)
heatm.v2 <- matrix(signif(rev(seq(range.v[2], range.v[1], incr)), digits=2), nrow=num.v, ncol=1)
# print(c("heatm.v2=", heatm.v2))
axis(3, at=1:num.v, labels=heatm.v2, adj= 0.5, tick=FALSE, las = 1, cex.axis=0.5*char.rescale, font.axis=1)
return()
}
MSIG.HeatMapPlot.9 <- function(
V,
row.names = "NA",
row.names2 = "NA",
col.labels = "NA",
col.labels2 = "NA",
col.classes = "NA",
phen.cmap = "NA",
col.names = "NA",
phen.names = "NA",
phen.names2 = "NA",
main = " ",
sub = " ",
xlab=" ",
ylab=" ",
row.norm = TRUE,
char.rescale = 0.85,
cmap.type = 1, # 1 = vintage pinkogram, 2 = scale of blues, 3 = high-resolution pinkogram for scores or probabilities [0, 1], 4 = high-resolution pinkogram for general values, 5 = color map for normalized enrichment scores, 6 = scale of red purples, 7 = scale of Oranges, 8 = scale of Greens, 9 = scale of Blues
max.v = "NA",
legend = F)
{
#
# Plots a heatmap "pinkogram" of a gene expression matrix including phenotype vector and gene, sample and phenotype labels
# Doesn't plot the spectrum on the bottom
#
# The Broad Institute
# SOFTWARE COPYRIGHT NOTICE AGREEMENT
# This software and its documentation are copyright 2003 by the
# Broad Institute/Massachusetts Institute of Technology.
# All rights are reserved.
#
# This software is supplied without any warranty or guaranteed support
# whatsoever. Neither the Broad Institute nor MIT can be responsible for
# its use, misuse, or functionality.
n.rows <- length(V[,1])
n.cols <- length(V[1,])
V1 <- matrix(0, nrow=n.rows, ncol=n.cols)
# if ((cmap.type == 5) | (cmap.type == 3)) {
if (cmap.type == 5) {
row.norm <- F
}
if (row.norm == TRUE) {
row.mean <- apply(V, MARGIN=1, FUN=mean)
row.sd <- apply(V, MARGIN=1, FUN=sd)
row.n <- length(V[,1])
for (i in 1:n.rows) {
if (row.sd[i] == 0) {
V1[i,] <- 0
} else {
V1[i,] <- (V[i,] - row.mean[i])/(0.333 * row.sd[i])
}
V1[i,] <- ifelse(V1[i,] < -4, -4, V1[i,])
V1[i,] <- ifelse(V1[i,] > 4, 4, V1[i,])
}
} else {
V1 <- V
}
if (cmap.type == 1) {
mycol <- c("#0000FF", "#4040FF", "#7070FF", "#8888FF", "#A9A9FF", "#D5D5FF", "#EEE5EE", "#FFAADA",
"#FF9DB0", "#FF7080",
"#FF5A5A", "#FF4040", "#FF0D1D") # blue-pinkogram colors. This is the 1998-vintage,
# pre-gene cluster, original pinkogram color map
} else if (cmap.type == 2) {
violet.palette <- colorRampPalette(c("#400030", "white"), space = "rgb")
mycol <- rev(violet.palette(20))
# mycol <- c("#FCFBFD","#F4F2F8","#F8F7FB","#EFEDF5","#E1E1EF","#E8E7F2","#DADAEB","#C6C7E1","#D0D1E6",
# "#BCBDDC","#A8A6CF",
# "#B2B2D6","#9E9AC8","#8A87BF","#9491C4","#807DBA","#7260AB","#796FB3","#6A51A3","#5C3596",
# "#63439D","#54278F","#460D83","#4D1A89","#3F007D")
} else if (cmap.type == 6) {
mycol <- c("#FFF7F3", "#FDE0DD", "#FCC5C0", "#FA9FB5", "#F768A1", "#DD3497", "#AE017E",
"#7A0177", "#49006A")
} else if (cmap.type == 7) {
mycol <- c("#FFF5EB", "#FEE6CE", "#FDD0A2", "#FDAE6B", "#FD8D3C", "#F16913", "#D94801",
"#A63603", "#7F2704")
} else if (cmap.type == 8) {
mycol <- c("#F7FCF5", "#E5F5E0", "#C7E9C0", "#A1D99B", "#74C476", "#41AB5D", "#238B45",
"#006D2C", "#00441B")
} else if (cmap.type == 9) {
mycol <- c("#F7FBFF", "#DEEBF7", "#C6DBEF", "#9ECAE1", "#6BAED6", "#4292C6", "#2171B5",
"#08519C", "#08306B")
} else if ((cmap.type == 3) | (cmap.type == 4) | (cmap.type == 5)) {
mycol <- vector(length=512, mode = "numeric")
for (k in 1:256) {
mycol[k] <- rgb(255, k - 1, k - 1, maxColorValue=255)
}
for (k in 257:512) {
mycol[k] <- rgb(511 - (k - 1), 511 - (k - 1), 255, maxColorValue=255)
}
mycol <- rev(mycol)
}
ncolors <- length(mycol)
if (cmap.type == 5) {
if (max.v == "NA") {
max.v <- max(max(V1), -min(V1))
}
V2 <- ceiling(ncolors * (V1 - (- max.v))/(1.001*(max.v - (- max.v))))
} else {
V2 <- ceiling(ncolors * (V1 - min(V1))/(1.001*(max(V1) - min(V1))))
}
if (col.labels[1] == "NA") {
heatm <- matrix(0, nrow = n.rows + 1, ncol = n.cols)
heatm[1:n.rows,] <- V2[seq(n.rows, 1, -1),]
tot.cols <- ncolors
if (legend == T) {
nf <- layout(matrix(c(1, 2, 3, 0), 2, 2, byrow=T), widths = c(5, 1), heights = c(10, 1), respect = FALSE)
} else {
nf <- layout(matrix(c(1, 2), 2, 1, byrow=T), heights = c(8, 1), respect = FALSE)
}
par(mar = c(3, 16, 3, 16))
mycol <- c(mycol, phen.cmap[1:length(col.classes)])
image(1:n.cols, 1:n.rows, t(heatm), zlim = c(0, tot.cols), col=mycol, axes=FALSE,
main=main, sub = sub, xlab= xlab, ylab=ylab)
n.rows.phen <- 0
} else {
tot.cols <- ncolors
if (is.vector(col.labels)) {
heatm <- matrix(0, nrow = n.rows + 1, ncol = n.cols)
heatm[1:n.rows,] <- V2[seq(n.rows, 1, -1),]
n.rows.phen <- 1
heatm[n.rows + 1,] <- tot.cols + col.labels
cols.row <- length(unique(col.labels))
tot.cols <- tot.cols + cols.row
phen.cmap <- phen.cmap[1:cols.row]
} else {
n.rows.phen <- length(col.labels[,1])
cols.row <- vector(length=n.rows.phen, mode = "numeric")
heatm <- matrix(0, nrow = n.rows + n.rows.phen, ncol = n.cols)
heatm[1:n.rows,] <- V2[seq(n.rows, 1, -1),]
for (k in seq(n.rows + n.rows.phen, n.rows + 1, -1)) {
heatm[k,] <- tot.cols + col.labels[n.rows + n.rows.phen - k + 1,]
cols.row[n.rows + n.rows.phen - k + 1] <- length(unique(col.labels[n.rows + n.rows.phen - k + 1,]))
tot.cols <- tot.cols + cols.row[n.rows + n.rows.phen - k + 1]
# print(c("col:", k, ":", tot.cols + col.labels[n.rows + n.rows.phen - k + 1,], "tot.cols:", tot.cols))
}
phen.cmap <- phen.cmap[1:sum(unlist(lapply(col.classes, length)))]
}
if (legend == T) {
# nf <- layout(matrix(c(1, 2, 3, 0), 2, 2, byrow=T), widths = c(10, 2), heights = c(6, 1), respect = FALSE)
nf <- layout(matrix(c(1, 2, 3), 3, 1, byrow=T), heights = c(8, 4, 1), respect = FALSE)
} else {
nf <- layout(matrix(c(1, 2), 2, 1, byrow=T), heights = c(5, 1), respect = FALSE)
}
par(mar = c(3, 16, 3, 16))
mycol <- c(mycol, phen.cmap)
image(1:n.cols, 1:(n.rows + n.rows.phen), t(heatm), zlim = c(0, tot.cols), col=mycol, axes=FALSE,
main=main, sub = sub, xlab= xlab, ylab=ylab)
}
# Add lines to separate phenotypes or subgroups
if (col.labels2[1] != "NA") {
groups <- split(col.labels2, col.labels2)
len.vec <- lapply(groups, length)
plot.div <- c(0.51, cumsum(len.vec) + 0.5)
for (i in plot.div) {
lines(c(i, i), c(0, n.rows + n.rows.phen + 0.48), lwd = 2, cex = 0.9, col = "black")
}
lines(c(0.51, n.cols + 0.49), c(0.51, 0.51), lwd = 2, cex = 0.9, col = "black")
lines(c(0.51, n.cols + 0.49), c(n.rows + n.rows.phen + 0.48, n.rows + n.rows.phen + 0.48), lwd = 2,
cex = 0.9, col = "black")
lines(c(0.51, n.cols + 0.49), c(n.rows + 0.50, n.rows + 0.50), lwd = 2,
cex = 0.9, col = "black")
}
if (row.names[1] != "NA") {
# browser()
numC <- nchar(row.names)
size.row.char <- char.rescale*25/(n.rows + 20)
for (i in 1:n.rows) {
row.names[i] <- substr(row.names[i], 1, 40)
row.names[i] <- paste(row.names[i], " ", sep="")
}
if (phen.names[1] == "NA") {
head.names <- paste("Class", seq(n.rows.phen, 1, -1))
} else {
head.names <- as.character(rev(phen.names))
}
row.names <- c(row.names[seq(n.rows, 1, -1)], head.names)
# print(paste("n.rows:", n.rows))
# print(paste("Phen names:", phen.names))
# print(paste("Head names:", head.names))
# print(paste("Row names:", row.names))
axis(2, at=1:(n.rows + n.rows.phen), labels=row.names, adj= 0.5, tick=FALSE, las = 1, cex.axis=size.row.char,
font.axis=2, line=-1)
}
if (row.names2[1] != "NA") {
# browser()
numC <- nchar(row.names2)
size.row.char <- char.rescale*25/(n.rows + 20)
for (i in 1:n.rows) {
row.names2[i] <- substr(row.names2[i], 1, 40)
row.names2[i] <- paste(" ", row.names2[i], sep="")
}
for( i in 1:n.rows.phen ){
phen.names2[i] <- substr(phen.names2[i], 1, 40)
phen.names2[i] <- paste( " ", phen.names2[i], sep="")
}
row.names2 <- rev(row.names2)
phen.names2 <- rev(phen.names2)
axis(4, at=1:(n.rows + n.rows.phen), labels=c(row.names2, phen.names2), adj= 0.5, tick=FALSE, las = 1,
cex.axis=size.row.char, font.axis=2, line=-1)
}
if (col.names[1] != "NA") {
size.col.char <- char.rescale*20/(n.cols + 25)
axis(1, at=1:n.cols, labels=col.names, tick=FALSE, las = 3, cex.axis=size.col.char, font.axis=2, line=-1)
}
# Phenotype Legend
# print("--------------------------------------------------------------------------------------------")
if (legend == T) {
leg.txt <- NULL
p.vec <- NULL
c.vec <- NULL
c2.vec <- NULL
ind <- 1
par(mar = c(0, 0, 0, 0))
plot(c(0,0), c(1, 1), xlim = c(0, 1), ylim = c(-.050, 1.05), axes=F, type="n", xlab = "", ylab="")
for (i in 1:n.rows.phen) {
if (is.vector(col.labels)) {
phen.v <- as.character(col.classes)
} else {
phen.v <- as.character(col.classes[[i]])
}
p.name <- paste(as.character(rev(head.names)[i]), ": ", sep="")
leg.txt <- c(p.name, phen.v)
p.vec <- rep(22, cols.row[i] + 1)
c.vec <- c("#FFFFFF", phen.cmap[ind:(ind + cols.row[i] - 1)])
c2.vec <- c("#FFFFFF", rep("black", cols.row[i]))
ind <- ind + cols.row[i]
offset <- 0.07
legend(x=0, y= 1 - offset*i,
horiz = T, x.intersp = 0.5, legend=leg.txt, bty="n", xjust=0, yjust= 1, pch = p.vec,
pt.bg = c.vec, col = c2.vec, cex = 1.20, pt.cex=1.75)
}
}
# Color map legend
# print(c("range V=", range(V)))
# print(c("range V1=", range(V1)))
# print(c("range V2=", range(V2)))
# par(mar = c(2, 12, 2, 12))
# num.v <- 20
# range.v <- range(V2)
# incr <- (range.v[1] - range.v[2])/(num.v - 1)
# heatm.v <- matrix(rev(seq(range.v[2], range.v[1], incr)), nrow=num.v, ncol=1)
# image(1:num.v, 1:1, heatm.v, zlim = c(0, tot.cols), col=mycol, axes=FALSE,
# main=" ", sub = " ", xlab= ylab, ylab=xlab)
# range.v <- range(V1)
# incr <- (range.v[1] - range.v[2])/(num.v - 1)
# heatm.v2 <- matrix(signif(rev(seq(range.v[2], range.v[1], incr)), digits=2), nrow=num.v, ncol=1)
## print(c("heatm.v2=", heatm.v2))
# axis(3, at=1:num.v, labels=heatm.v2, adj= 0.5, tick=FALSE, las = 1, cex.axis=0.5*char.rescale, font.axis=1)
return()
}
MSIG.HeatMapPlot.10<- function(
V,
pathway.mut,
row.names = "NA",
row.names2 = "NA",
col.labels = "NA",
col.labels2 = "NA",
col.classes = "NA",
phen.cmap = "NA",
col.names = "NA",
phen.names = "NA",
phen.names2 = "NA",
main = " ",
sub = " ",
xlab=" ",
ylab=" ",
row.norm = TRUE,
char.rescale = 0.85,
cmap.type = 1, # 1 = vintage pinkogram, 2 = scale of blues, 3 = high-resolution pinkogram for scores or probabilities [0, 1], 4 = high-resolution pinkogram for general values, 5 = color map for normalized enrichment scores, 6 = scale of red purples, 7 = scale of Oranges, 8 = scale of Greens, 9 = scale of Blues
max.v = "NA",
legend = T,
tissue.names = "NA",
tissue.labels = NA)
{
#
# Plots a heatmap "pinkogram" of a gene expression matrix including phenotype vector and gene, sample and phenotype labels
# Doesn't plot the spectrum on the bottom
#
# Plots PATHWAY.MUT as a continuous vector in a greyscale spectrum
#
# The Broad Institute
# SOFTWARE COPYRIGHT NOTICE AGREEMENT
# This software and its documentation are copyright 2003 by the
# Broad Institute/Massachusetts Institute of Technology.
# All rights are reserved.
#
# This software is supplied without any warranty or guaranteed support
# whatsoever. Neither the Broad Institute nor MIT can be responsible for
# its use, misuse, or functionality.
n.rows <- length(V[,1])
n.cols <- length(V[1,])
V1 <- matrix(0, nrow=n.rows, ncol=n.cols)
set3 = brewer.pal(12, "Set3")
accent = brewer.pal(8, "Accent")
# if ((cmap.type == 5) | (cmap.type == 3)) {
if (cmap.type == 5) {
row.norm <- F
}
if (row.norm == TRUE) {
row.mean <- apply(V, MARGIN=1, FUN=mean)
row.sd <- apply(V, MARGIN=1, FUN=sd)
row.n <- length(V[,1])
for (i in 1:n.rows) {
if (row.sd[i] == 0) {
V1[i,] <- 0
} else {
V1[i,] <- (V[i,] - row.mean[i])/(0.333 * row.sd[i])
}
V1[i,] <- ifelse(V1[i,] < -4, -4, V1[i,])
V1[i,] <- ifelse(V1[i,] > 4, 4, V1[i,])
}
} else {
V1 <- V
}
if (cmap.type == 1) {
mycol <- c("#0000FF", "#4040FF", "#7070FF", "#8888FF", "#A9A9FF", "#D5D5FF", "#EEE5EE", "#FFAADA",
"#FF9DB0", "#FF7080",
"#FF5A5A", "#FF4040", "#FF0D1D") # blue-pinkogram colors. This is the 1998-vintage,
# pre-gene cluster, original pinkogram color map
} else if (cmap.type == 2) {
violet.palette <- colorRampPalette(c("#400030", "white"), space = "rgb")
mycol <- rev(violet.palette(20))
# mycol <- c("#FCFBFD","#F4F2F8","#F8F7FB","#EFEDF5","#E1E1EF","#E8E7F2","#DADAEB","#C6C7E1","#D0D1E6",
# "#BCBDDC","#A8A6CF",
# "#B2B2D6","#9E9AC8","#8A87BF","#9491C4","#807DBA","#7260AB","#796FB3","#6A51A3","#5C3596",
# "#63439D","#54278F","#460D83","#4D1A89","#3F007D")
} else if (cmap.type == 6) {
mycol <- c("#FFF7F3", "#FDE0DD", "#FCC5C0", "#FA9FB5", "#F768A1", "#DD3497", "#AE017E",
"#7A0177", "#49006A")
} else if (cmap.type == 7) {
mycol <- c("#FFF5EB", "#FEE6CE", "#FDD0A2", "#FDAE6B", "#FD8D3C", "#F16913", "#D94801",
"#A63603", "#7F2704")
} else if (cmap.type == 8) {
mycol <- c("#F7FCF5", "#E5F5E0", "#C7E9C0", "#A1D99B", "#74C476", "#41AB5D", "#238B45",
"#006D2C", "#00441B")
} else if (cmap.type == 9) {
mycol <- c("#F7FBFF", "#DEEBF7", "#C6DBEF", "#9ECAE1", "#6BAED6", "#4292C6", "#2171B5",
"#08519C", "#08306B")
} else if ((cmap.type == 3) | (cmap.type == 4) | (cmap.type == 5)) {
mycol <- vector(length=512, mode = "numeric")
for (k in 1:256) {
mycol[k] <- rgb(255, k - 1, k - 1, maxColorValue=255)
}
for (k in 257:512) {
mycol[k] <- rgb(511 - (k - 1), 511 - (k - 1), 255, maxColorValue=255)
}
mycol <- rev(mycol)
}
# browser()
ncolors <- length(mycol)
pathway.mut = (-(pathway.mut*.749 + 0.251 - 1))
# image(1:n.cols, 1, as.matrix(pathway.mut), col=gray(n.cols:0/n.cols))
if (cmap.type == 5) {
if (max.v == "NA") {
max.v <- max(max(V1), -min(V1))
}
V2 <- ceiling(ncolors * (V1 - (- max.v))/(1.001*(max.v - (- max.v))))
} else {
V2 <- ceiling(ncolors * (V1 - min(V1))/(1.001*(max(V1) - min(V1))))
}
if (col.labels[1] == "NA") {
heatm <- matrix(0, nrow = n.rows + 1, ncol = n.cols)
heatm[1:n.rows,] <- V2[seq(n.rows, 1, -1),]
tot.cols <- ncolors
if (legend == T) {
nf <- layout(matrix(c(1, 2, 3, 0), 2, 2, byrow=T), widths = c(5, 1), heights = c(10, 1), respect = FALSE)
} else {
nf <- layout(matrix(c(1, 2), 2, 1, byrow=T), heights = c(8, 1), respect = FALSE)
}
par(mar = c(3, 16, 3, 16))
mycol <- c(mycol, phen.cmap[1:length(col.classes)])
image(1:n.cols, 1:n.rows, t(heatm), zlim = c(0, tot.cols), col=mycol, axes=FALSE,
main=main, sub = sub, xlab= xlab, ylab=ylab)
n.rows.phen <- 0
} else {
tot.cols <- ncolors
if (is.vector(col.labels)) {
heatm <- matrix(0, nrow = n.rows + 1, ncol = n.cols)
heatm[1:n.rows,] <- V2[seq(n.rows, 1, -1),]
n.rows.phen <- 1
heatm[n.rows + 1,] <- tot.cols + col.labels
cols.row <- length(unique(col.labels))
tot.cols <- tot.cols + cols.row
phen.cmap <- phen.cmap[1:cols.row]
pathway.mut.grey = grey(pathway.mut)
u.pathway.mut.grey = unique(pathway.mut.grey)
heatm[n.rows + n.rows.phen,] = match(pathway.mut.grey, u.pathway.mut.grey) + tot.cols
} else {
n.rows.phen <- length(col.labels[,1])
cols.row <- vector(length=n.rows.phen, mode = "numeric")
heatm <- matrix(0, nrow = n.rows + n.rows.phen, ncol = n.cols)
heatm[1:n.rows,] <- V2[seq(n.rows, 1, -1),]
# heatm[n.rows+n.rows.phen,] = t(as.matrix(gray(pathway.mut)))
for (k in seq(n.rows + n.rows.phen, n.rows + 1, -1)) {
heatm[k,] <- tot.cols + col.labels[n.rows + n.rows.phen - k + 1,]
cols.row[n.rows + n.rows.phen - k + 1] <- length(unique(col.labels[n.rows + n.rows.phen - k + 1,]))
tot.cols <- tot.cols + cols.row[n.rows + n.rows.phen - k + 1]
# print(c("col:", k, ":", tot.cols + col.labels[n.rows + n.rows.phen - k + 1,], "tot.cols:", tot.cols))
}
# browser()
pathway.mut.grey = grey(pathway.mut)
u.pathway.mut.grey = unique(pathway.mut.grey)
heatm[n.rows + n.rows.phen,] = match(pathway.mut.grey, u.pathway.mut.grey) + tot.cols
tot.cols = tot.cols + length(u.pathway.mut.grey)
phen.cmap <- phen.cmap[1:sum(unlist(lapply(col.classes, length)))]
}
# image(as.matrix(grey(pathway.mut)))
if (legend == T) {
# nf <- layout(matrix(c(1, 2, 3, 0), 2, 2, byrow=T), widths = c(10, 2), heights = c(6, 1), respect = FALSE)
nf <- layout(matrix(c(1, 2, 3), 3, 1, byrow=T), heights = c(8, 4, 1), respect = FALSE)
} else {
nf <- layout(matrix(c(1, 2), 2, 1, byrow=T), heights = c(5, 1), respect = FALSE)
}
par(mar = c(3, 16, 3, 16))
# browser()
mycol <- c(mycol, phen.cmap, u.pathway.mut.grey)
# browser()
image(1:n.cols, 1:(n.rows + n.rows.phen), t(heatm), zlim = c(0, tot.cols), col=mycol, axes=FALSE,
main=main, sub = sub, xlab= xlab, ylab=ylab)
}
# Add lines to separate phenotypes or subgroups
if (col.labels2[1] != "NA") {
groups <- split(col.labels2, col.labels2)
len.vec <- lapply(groups, length)
plot.div <- c(0.51, cumsum(len.vec) + 0.5)
for (i in plot.div) {
lines(c(i, i), c(0, n.rows + n.rows.phen + 0.48), lwd = 2, cex = 0.9, col = "black")
}
lines(c(0.51, n.cols + 0.49), c(0.51, 0.51), lwd = 2, cex = 0.9, col = "black")
lines(c(0.51, n.cols + 0.49), c(n.rows + n.rows.phen + 0.48, n.rows + n.rows.phen + 0.48), lwd = 2,
cex = 0.9, col = "black")
lines(c(0.51, n.cols + 0.49), c(n.rows + 0.50, n.rows + 0.50), lwd = 2,
cex = 0.9, col = "black")
}
if (row.names[1] != "NA") {
# browser()
numC <- nchar(row.names)
size.row.char <- char.rescale*25/(n.rows + 20)
for (i in 1:n.rows) {
row.names[i] <- substr(row.names[i], 1, 40)
row.names[i] <- paste(row.names[i], " ", sep="")
}
if (phen.names[1] == "NA") {
head.names <- paste("Class", seq(n.rows.phen, 1, -1))
} else {
head.names <- as.character(rev(phen.names))
}
row.names <- c(row.names[seq(n.rows, 1, -1)], head.names)
# print(paste("n.rows:", n.rows))
# print(paste("Phen names:", phen.names))
# print(paste("Head names:", head.names))
# print(paste("Row names:", row.names))
# browser()
axis(2, at=1:(n.rows + n.rows.phen), labels=row.names, adj= 0.5, tick=FALSE, las = 1, cex.axis=size.row.char,
font.axis=2, line=-1)
}
if (row.names2[1] != "NA") {
# browser()
numC <- nchar(row.names2)
size.row.char <- char.rescale*25/(n.rows + 20)
for (i in 1:n.rows) {
row.names2[i] <- substr(row.names2[i], 1, 40)
row.names2[i] <- paste(" ", row.names2[i], sep="")
}
for( i in 1:n.rows.phen ){
phen.names2[i] <- substr(phen.names2[i], 1, 40)
phen.names2[i] <- paste( " ", phen.names2[i], sep="")
}
row.names2 <- rev(row.names2)
phen.names2 <- rev(phen.names2)
axis(4, at=1:(n.rows + n.rows.phen), labels=c(row.names2, phen.names2), adj= 0.5, tick=FALSE, las = 1,
cex.axis=size.row.char, font.axis=2, line=-1)
}
if (col.names[1] != "NA") {
size.col.char <- char.rescale*20/(n.cols + 25)
axis(1, at=1:n.cols, labels=col.names, tick=FALSE, las = 3, cex.axis=size.col.char, font.axis=2, line=-1)
}
# Phenotype Legend
# print("--------------------------------------------------------------------------------------------")
if (legend == T) {
leg.txt <- NULL
p.vec <- NULL
c.vec <- NULL
c2.vec <- NULL
ind <- 1
par(mar = c(0, 0, 0, 0))
plot(c(0,0), c(1, 1), xlim = c(0, 1), ylim = c(-.050, 1.05), axes=F, type="n", xlab = "", ylab="")
for (i in 1:n.rows.phen) {
# browser()
if (is.vector(col.labels)) {
phen.v <- as.character(col.classes)
} else {
phen.v <- as.character(col.classes[[i]])
}
p.name <- paste(as.character(rev(head.names)[i]), ": ", sep="")
leg.txt <- c(p.name, phen.v)
p.vec <- rep(22, cols.row[i] + 1)
c.vec <- c("#FFFFFF", phen.cmap[ind:(ind + cols.row[i] - 1)])
c2.vec <- c("#FFFFFF", rep("black", cols.row[i]))
ind <- ind + cols.row[i]
offset <- 0.07
legend(x=0, y= 1 - offset*i,
horiz = T, x.intersp = 0.5, legend=leg.txt, bty="n", xjust=0, yjust= 1, pch = p.vec,
pt.bg = c.vec, col = c2.vec, cex = 1.20, pt.cex=1.75)
}
}
# Color map legend
# print(c("range V=", range(V)))
# print(c("range V1=", range(V1)))
# print(c("range V2=", range(V2)))
par(mar = c(2, 12, 2, 12))
num.v <- 20
range.v <- range(V2)
incr <- (range.v[1] - range.v[2])/(num.v - 1)
heatm.v <- matrix(rev(seq(range.v[2], range.v[1], incr)), nrow=num.v, ncol=1)
image(1:num.v, 1:1, heatm.v, zlim = c(0, tot.cols), col=mycol, axes=FALSE,
main=" ", sub = " ", xlab= ylab, ylab=xlab)
range.v <- range(V1)
incr <- (range.v[1] - range.v[2])/(num.v - 1)
heatm.v2 <- matrix(signif(rev(seq(range.v[2], range.v[1], incr)), digits=2), nrow=num.v, ncol=1)
# print(c("heatm.v2=", heatm.v2))
axis(3, at=1:num.v, labels=heatm.v2, adj= 0.5, tick=FALSE, las = 1, cex.axis=0.5*char.rescale, font.axis=1)
return()
}
MSIG.HeatMapPlot.10.multiple.tissues <- function(
V,
pathway.mut,
row.names = "NA",
row.names2 = "NA",
col.labels = "NA",
col.labels2 = "NA",
col.classes = "NA",
phen.cmap = "NA",
col.names = "NA",
phen.names = "NA",
phen.names2 = "NA",
main = " ",
sub = " ",
xlab=" ",
ylab=" ",
row.norm = TRUE,
char.rescale = 0.85,
cmap.type = 1, # 1 = vintage pinkogram, 2 = scale of blues, 3 = high-resolution pinkogram for scores or probabilities [0, 1], 4 = high-resolution pinkogram for general values, 5 = color map for normalized enrichment scores, 6 = scale of red purples, 7 = scale of Oranges, 8 = scale of Greens, 9 = scale of Blues
max.v = "NA",
legend = T,
tissue.names = "NA",
tissue.labels = NA)
{
#
# Plots a heatmap "pinkogram" of a gene expression matrix including phenotype vector and gene, sample and phenotype labels
# Doesn't plot the spectrum on the bottom
#
# Plots PATHWAY.MUT as a continuous vector in a greyscale spectrum
#
# The Broad Institute
# SOFTWARE COPYRIGHT NOTICE AGREEMENT
# This software and its documentation are copyright 2003 by the
# Broad Institute/Massachusetts Institute of Technology.
# All rights are reserved.
#
# This software is supplied without any warranty or guaranteed support
# whatsoever. Neither the Broad Institute nor MIT can be responsible for
# its use, misuse, or functionality.
library(RColorBrewer)
n.tissues = length(tissue.names)
n.rows <- length(V[,1])
n.cols <- length(V[1,])
V1 <- matrix(0, nrow=n.rows, ncol=n.cols)
# if ((cmap.type == 5) | (cmap.type == 3)) {
if (cmap.type == 5) {
row.norm <- F
}
if (row.norm == TRUE) {
row.mean <- apply(V, MARGIN=1, FUN=mean)
row.sd <- apply(V, MARGIN=1, FUN=sd)
row.n <- length(V[,1])
for (i in 1:n.rows) {
if (row.sd[i] == 0) {
V1[i,] <- 0
} else {
V1[i,] <- (V[i,] - row.mean[i])/(0.333 * row.sd[i])
}
V1[i,] <- ifelse(V1[i,] < -4, -4, V1[i,])
V1[i,] <- ifelse(V1[i,] > 4, 4, V1[i,])
}
} else {
V1 <- V
}
if (cmap.type == 1) {
mycol <- c("#0000FF", "#4040FF", "#7070FF", "#8888FF", "#A9A9FF", "#D5D5FF", "#EEE5EE", "#FFAADA",
"#FF9DB0", "#FF7080",
"#FF5A5A", "#FF4040", "#FF0D1D") # blue-pinkogram colors. This is the 1998-vintage,
# pre-gene cluster, original pinkogram color map
} else if (cmap.type == 2) {
violet.palette <- colorRampPalette(c("#400030", "white"), space = "rgb")
mycol <- rev(violet.palette(20))
# mycol <- c("#FCFBFD","#F4F2F8","#F8F7FB","#EFEDF5","#E1E1EF","#E8E7F2","#DADAEB","#C6C7E1","#D0D1E6",
# "#BCBDDC","#A8A6CF",
# "#B2B2D6","#9E9AC8","#8A87BF","#9491C4","#807DBA","#7260AB","#796FB3","#6A51A3","#5C3596",
# "#63439D","#54278F","#460D83","#4D1A89","#3F007D")
} else if (cmap.type == 6) {
mycol <- c("#FFF7F3", "#FDE0DD", "#FCC5C0", "#FA9FB5", "#F768A1", "#DD3497", "#AE017E",
"#7A0177", "#49006A")
} else if (cmap.type == 7) {
mycol <- c("#FFF5EB", "#FEE6CE", "#FDD0A2", "#FDAE6B", "#FD8D3C", "#F16913", "#D94801",
"#A63603", "#7F2704")
} else if (cmap.type == 8) {
mycol <- c("#F7FCF5", "#E5F5E0", "#C7E9C0", "#A1D99B", "#74C476", "#41AB5D", "#238B45",
"#006D2C", "#00441B")
} else if (cmap.type == 9) {
mycol <- c("#F7FBFF", "#DEEBF7", "#C6DBEF", "#9ECAE1", "#6BAED6", "#4292C6", "#2171B5",
"#08519C", "#08306B")
} else if ((cmap.type == 3) | (cmap.type == 4) | (cmap.type == 5)) {
mycol <- vector(length=512, mode = "numeric")
for (k in 1:256) {
mycol[k] <- rgb(255, k - 1, k - 1, maxColorValue=255)
}
for (k in 257:512) {
mycol[k] <- rgb(511 - (k - 1), 511 - (k - 1), 255, maxColorValue=255)
}
mycol <- rev(mycol)
}
# browser()
ncolors <- length(mycol)
pathway.mut = (-(pathway.mut*.749 + 0.251 - 1))
# image(1:n.cols, 1, as.matrix(pathway.mut), col=gray(n.cols:0/n.cols))
if (cmap.type == 5) {
if (max.v == "NA") {
max.v <- max(max(V1), -min(V1))
}
V2 <- ceiling(ncolors * (V1 - (- max.v))/(1.001*(max.v - (- max.v))))
} else {
V2 <- ceiling(ncolors * (V1 - min(V1))/(1.001*(max(V1) - min(V1))))
}
if (col.labels[1] == "NA") {
heatm <- matrix(0, nrow = n.rows + 1, ncol = n.cols)
heatm[1:n.rows,] <- V2[seq(n.rows, 1, -1),]
tot.cols <- ncolors
if (legend == T) {
nf <- layout(matrix(c(1, 2, 3, 0), 2, 2, byrow=T), widths = c(5, 1), heights = c(10, 1), respect = FALSE)
} else {
nf <- layout(matrix(c(1, 2), 2, 1, byrow=T), heights = c(8, 1), respect = FALSE)
}
par(mar = c(3, 16, 3, 16))
mycol <- c(mycol, phen.cmap[1:length(col.classes)])
image(1:n.cols, 1:n.rows, t(heatm), zlim = c(0, tot.cols), col=mycol, axes=FALSE,
main=main, sub = sub, xlab= xlab, ylab=ylab)
n.rows.phen <- 0
} else {
tot.cols <- ncolors
if (is.vector(col.labels)) {
heatm <- matrix(0, nrow = n.rows + 2, ncol = n.cols)
heatm[1:n.rows,] <- V2[seq(n.rows, 1, -1),]
n.rows.phen <- 1
heatm[n.rows + 1,] <- tot.cols + col.labels
cols.row <- length(unique(col.labels))
tot.cols <- tot.cols + cols.row
phen.cmap <- phen.cmap[1:cols.row]
pathway.mut.grey = grey(pathway.mut)
u.pathway.mut.grey = unique(pathway.mut.grey)
heatm[n.rows + n.rows.phen,] = match(pathway.mut.grey, u.pathway.mut.grey) + tot.cols
} else {
n.rows.phen <- length(col.labels[,1])
cols.row <- vector(length=n.rows.phen, mode = "numeric")
heatm <- matrix(0, nrow = n.rows + n.rows.phen, ncol = n.cols)
heatm[1:n.rows,] <- V2[seq(n.rows, 1, -1),]
# heatm[n.rows+n.rows.phen,] = t(as.matrix(gray(pathway.mut)))
for (k in seq(n.rows + n.rows.phen, n.rows + 1, -1)) {
heatm[k,] <- tot.cols + col.labels[n.rows + n.rows.phen - k + 1,]
cols.row[n.rows + n.rows.phen - k + 1] <- length(unique(col.labels[n.rows + n.rows.phen - k + 1,]))
tot.cols <- tot.cols + cols.row[n.rows + n.rows.phen - k + 1]
# print(c("col:", k, ":", tot.cols + col.labels[n.rows + n.rows.phen - k + 1,], "tot.cols:", tot.cols))
}
# browser()
pathway.mut.grey = grey(pathway.mut)
u.pathway.mut.grey = unique(pathway.mut.grey)
heatm[n.rows + n.rows.phen,] = match(pathway.mut.grey, u.pathway.mut.grey) + tot.cols
tot.cols = tot.cols + length(u.pathway.mut.grey)
phen.cmap <- phen.cmap[1:sum(unlist(lapply(col.classes, length)))]
}
# image(as.matrix(grey(pathway.mut)))
if (legend == T) {
# nf <- layout(matrix(c(1, 2, 3, 0), 2, 2, byrow=T), widths = c(10, 2), heights = c(6, 1), respect = FALSE)
nf <- layout(matrix(c(1, 2, 3), 3, 1, byrow=T), heights = c(8, 4, 1), respect = FALSE)
} else {
nf <- layout(matrix(c(1, 2), 2, 1, byrow=T), heights = c(5, 1), respect = FALSE)
}
par(mar = c(3, 16, 3, 16))
#browser()
mycol <- c(mycol, phen.cmap, u.pathway.mut.grey)
if( length(tissue.names) > 1 ){
#browser()
tissue.colors = c(brewer.pal(12, "Set3"), brewer.pal(12,"Paired"))[1:length(tissue.names)]
#row.names = c(row.names, "Tissue Types")
mycol <- c(mycol, tissue.colors)
n.rows.phen = n.rows.phen + 1
heatm = rbind(heatm, (tissue.labels + tot.cols))
tot.cols = tot.cols + length(tissue.colors)
}
#browser()
#browser()
image(1:n.cols, 1:(n.rows + n.rows.phen), t(heatm), zlim = c(0, tot.cols), col=mycol, axes=FALSE,
main=main, sub = sub, xlab= xlab, ylab=ylab)
}
# Add lines to separate phenotypes or subgroups
if (col.labels2[1] != "NA") {
groups <- split(col.labels2, col.labels2)
len.vec <- lapply(groups, length)
plot.div <- c(0.51, cumsum(len.vec) + 0.5)
for (i in plot.div) {
lines(c(i, i), c(0, n.rows + n.rows.phen + 0.48), lwd = 2, cex = 0.9, col = "black")
}
lines(c(0.51, n.cols + 0.49), c(0.51, 0.51), lwd = 2, cex = 0.9, col = "black")
lines(c(0.51, n.cols + 0.49), c(n.rows + n.rows.phen + 0.48, n.rows + n.rows.phen + 0.48), lwd = 2,
cex = 0.9, col = "black")
lines(c(0.51, n.cols + 0.49), c(n.rows + 0.50, n.rows + 0.50), lwd = 2,
cex = 0.9, col = "black")
}
if (row.names[1] != "NA") {
# browser()
numC <- nchar(row.names)
size.row.char <- char.rescale*25/(n.rows + 20)
for (i in 1:n.rows) {
row.names[i] <- substr(row.names[i], 1, 40)
row.names[i] <- paste(row.names[i], " ", sep="")
}
if (phen.names[1] == "NA") {
head.names <- paste("Class", seq(n.rows.phen, 1, -1))
} else {
head.names <- as.character(rev(phen.names))
}
row.names <- c(row.names[seq(n.rows, 1, -1)], head.names)
if( length(tissue.names) > 1){ row.names = c(row.names, "Tissue Types")}
# print(paste("n.rows:", n.rows))
# print(paste("Phen names:", phen.names))
# print(paste("Head names:", head.names))
# print(paste("Row names:", row.names))
# browser()
axis(2, at=1:(n.rows + n.rows.phen), labels=row.names, adj= 0.5, tick=FALSE, las = 1, cex.axis=size.row.char,
font.axis=2, line=-1)
}
if (row.names2[1] != "NA") {
# browser()
numC <- nchar(row.names2)
size.row.char <- char.rescale*25/(n.rows + 20)
for (i in 1:n.rows) {
row.names2[i] <- substr(row.names2[i], 1, 40)
row.names2[i] <- paste(" ", row.names2[i], sep="")
}
for( i in 1:n.rows.phen ){
phen.names2[i] <- substr(phen.names2[i], 1, 40)
phen.names2[i] <- paste( " ", phen.names2[i], sep="")
}
row.names2 <- rev(row.names2)
phen.names2 <- rev(phen.names2)
axis(4, at=1:(n.rows + n.rows.phen), labels=c(row.names2, phen.names2), adj= 0.5, tick=FALSE, las = 1,
cex.axis=size.row.char, font.axis=2, line=-1)
}
if (col.names[1] != "NA") {
size.col.char <- char.rescale*20/(n.cols + 25)
axis(1, at=1:n.cols, labels=col.names, tick=FALSE, las = 3, cex.axis=size.col.char, font.axis=2, line=-1)
}
# Phenotype Legend
# print("--------------------------------------------------------------------------------------------")
if (legend == T) {
leg.txt <- NULL
p.vec <- NULL
c.vec <- NULL
c2.vec <- NULL
ind <- 1
par(mar = c(0, 0, 0, 0))
plot(c(0,0), c(1, 1), xlim = c(0, 1), ylim = c(-.050, 1.05), axes=F, type="n", xlab = "", ylab="")
for (i in 1:(n.rows.phen-1)) {
# browser()
if (is.vector(col.labels)) {
phen.v <- as.character(col.classes)
} else {
phen.v <- as.character(col.classes[[i]])
}
p.name <- paste(as.character(rev(head.names)[i]), ": ", sep="")
leg.txt <- c(p.name, phen.v)
p.vec <- rep(22, cols.row[i] + 1)
c.vec <- c("#FFFFFF", phen.cmap[ind:(ind + cols.row[i] - 1)])
c2.vec <- c("#FFFFFF", rep("black", cols.row[i]))
ind <- ind + cols.row[i]
offset <- 0.07
legend(x=0, y= 1 - offset*i,
horiz = T, x.intersp = 0.5, legend=leg.txt, bty="n", xjust=0, yjust= 1, pch = p.vec,
pt.bg = c.vec, col = c2.vec, cex = 1.20, pt.cex=1.75)
}
par(mar = c(0, 0, 0, 0))
plot(c(0,0), c(1, 1), xlim = c(0, 1), ylim = c(-.050, 1.05), axes=F, type="n", xlab = "", ylab="")
legend(x=0, y= 10, horiz = T, x.intersp = 0.5, legend=tissue.names, bty="n", xjust=0, yjust= 1,
fill = tissue.colors, cex = 1.20, pt.cex=1.75, ncol=1)
}
#browser()
## Tissue Legend
if(length(tissue.names)>1){
#browser()
par(mar = c(0, 0, 0, 0))
plot(c(0,0), c(1, 1), xlim = c(0, 1), ylim = c(-.050, 1.05), axes=F, type="n", xlab = "", ylab="")
legend(x=0, y= 1, #horiz = T, x.intersp = 0.5, y.intersp=.25,
legend=tissue.names, bty="n", xjust=0, yjust= 1,
fill = tissue.colors, #cex = 1.20, pt.cex=1.75,
ncol=4)
}
# Color map legend
# print(c("range V=", range(V)))
# print(c("range V1=", range(V1)))
# print(c("range V2=", range(V2)))
if(legend==TRUE){
par(mar = c(2, 12, 2, 12))
num.v <- 20
range.v <- range(V2)
incr <- (range.v[1] - range.v[2])/(num.v - 1)
heatm.v <- matrix(rev(seq(range.v[2], range.v[1], incr)), nrow=num.v, ncol=1)
image(1:num.v, 1:1, heatm.v, zlim = c(0, tot.cols), col=mycol, axes=FALSE,
main=" ", sub = " ", xlab= ylab, ylab=xlab)
range.v <- range(V1)
incr <- (range.v[1] - range.v[2])/(num.v - 1)
heatm.v2 <- matrix(signif(rev(seq(range.v[2], range.v[1], incr)), digits=2), nrow=num.v, ncol=1)
# print(c("heatm.v2=", heatm.v2))
axis(3, at=1:num.v, labels=heatm.v2, adj= 0.5, tick=FALSE, las = 1, cex.axis=0.5*char.rescale, font.axis=1)
}
return()
}
MSIG.Gct2Frame <- function(filename = "NULL") {
#
# Reads a gene expression dataset in GCT format and converts it into an R data frame
#
# The Broad Institute
# SOFTWARE COPYRIGHT NOTICE AGREEMENT
# This software and its documentation are copyright 2003 by the
# Broad Institute/Massachusetts Institute of Technology.
# All rights are reserved.
#
# This software is supplied without any warranty or guaranteed support
# whatsoever. Neither the Broad Institute nor MIT can be responsible for
# its use, misuse, or functionality.
ds <- read.delim(filename, header=T, sep="\t", skip=2, row.names=1, blank.lines.skip=T, comment.char="", as.is=T, na.strings = "")
descs <- ds[,1]
ds <- ds[-1]
row.names <- row.names(ds)
names <- names(ds)
return(list(ds = ds, row.names = row.names, descs = descs, names = names))
}
Read.GeneSets.db <- function(
gs.db,
thres.min = 2,
thres.max = 2000,
gene.names = NULL)
{
temp <- readLines(gs.db)
max.Ng <- length(temp)
temp.size.G <- vector(length = max.Ng, mode = "numeric")
for (i in 1:max.Ng) {
temp.size.G[i] <- length(unlist(strsplit(temp[[i]], "\t"))) - 2
}
max.size.G <- max(temp.size.G)
gs <- matrix(rep("null", max.Ng*max.size.G), nrow=max.Ng, ncol= max.size.G)
temp.names <- vector(length = max.Ng, mode = "character")
temp.desc <- vector(length = max.Ng, mode = "character")
gs.count <- 1
for (i in 1:max.Ng) {
gene.set.size <- length(unlist(strsplit(temp[[i]], "\t"))) - 2
gs.line <- noquote(unlist(strsplit(temp[[i]], "\t")))
gene.set.name <- gs.line[1]
gene.set.desc <- gs.line[2]
gene.set.tags <- vector(length = gene.set.size, mode = "character")
for (j in 1:gene.set.size) {
gene.set.tags[j] <- gs.line[j + 2]
}
if (is.null(gene.names)) {
existing.set <- rep(TRUE, length(gene.set.tags))
} else {
existing.set <- is.element(gene.set.tags, gene.names)
}
set.size <- length(existing.set[existing.set == T])
if ((set.size < thres.min) || (set.size > thres.max)) next
temp.size.G[gs.count] <- set.size
gs[gs.count,] <- c(gene.set.tags[existing.set], rep("null", max.size.G - temp.size.G[gs.count]))
temp.names[gs.count] <- gene.set.name
temp.desc[gs.count] <- gene.set.desc
gs.count <- gs.count + 1
}
Ng <- gs.count - 1
gs.names <- vector(length = Ng, mode = "character")
gs.desc <- vector(length = Ng, mode = "character")
size.G <- vector(length = Ng, mode = "numeric")
gs.names <- temp.names[1:Ng]
gs.desc <- temp.desc[1:Ng]
size.G <- temp.size.G[1:Ng]
return(list(N.gs = Ng, gs = gs, gs.names = gs.names, gs.desc = gs.desc, size.G = size.G, max.N.gs = max.Ng))
}
write.cls.2 <- function (class.v, phen, filename)
{
f <- file(filename, "w")
n <- length(phen)
l <- length(class.v)
cat(l, n, "1", "\n", file = f, append = TRUE, sep = " ")
cat("#", unlist(phen), "\n", file = f, append = TRUE, sep = " ")
if (is.vector(class.v)) {
class.v <- phen[class.v]
cat(class.v, "\n", file = f, append = TRUE, sep = " ")
} else {
class.list <- matrix(0, nrow=length(class.v[,1]), ncol=length(class.v[1,]))
for (i in 1:length(class.v[,1])) {
class.list[i,] <- unlist(phen[[i]])[class.v[i,]]
cat(class.list[i,], "\n", file = f, append = TRUE, sep = " ")
}
}
close(f)
}
write.gct <- function(gct.data.frame, descs = "", filename)
{
f <- file(filename, "w")
cat("#1.2", "\n", file = f, append = TRUE, sep = "")
cat(dim(gct.data.frame)[1], "\t", dim(gct.data.frame)[2], "\n", file = f, append = TRUE, sep = "")
cat("Name", "\t", file = f, append = TRUE, sep = "")
cat("Description", file = f, append = TRUE, sep = "")
names <- names(gct.data.frame)
cat("\t", names[1], file = f, append = TRUE, sep = "")
if (length(names) > 1) {
for (j in 2:length(names)) {
cat("\t", names[j], file = f, append = TRUE, sep = "")
}
}
cat("\n", file = f, append = TRUE, sep = "\t")
oldWarn <- options(warn = -1)
m <- matrix(nrow = dim(gct.data.frame)[1], ncol = dim(gct.data.frame)[2] + 2)
m[, 1] <- row.names(gct.data.frame)
if (length(descs) > 1) {
m[, 2] <- descs
} else {
m[, 2] <- row.names(gct.data.frame)
}
index <- 3
for (i in 1:dim(gct.data.frame)[2]) {
m[, index] <- gct.data.frame[, i]
index <- index + 1
}
write.table(m, file = f, append = TRUE, quote = FALSE, sep = "\t", eol = "\n", col.names = FALSE, row.names = FALSE)
close(f)
options(warn = 0)
}
MSIG.ReadPhenFile <- function(file = "NULL") {
#
# Reads a matrix of class vectors from a CLS file and defines phenotype and class labels vectors
# (numeric and character) for the samples in a gene expression file (RES or GCT format)
#
# The Broad Institute
# SOFTWARE COPYRIGHT NOTICE AGREEMENT
# This software and its documentation are copyright 2003 by the
# Broad Institute/Massachusetts Institute of Technology.
# All rights are reserved.
#
# This software is supplied without any warranty or guaranteed support
# whatsoever. Neither the Broad Institute nor MIT can be responsible for
# its use, misuse, or functionality.
cls.cont <- readLines(file)
num.lines <- length(cls.cont)
temp <- unlist(strsplit(cls.cont[[1]], " "))
if (length(temp) == 3) {
phen.names <- NULL
col.phen <- NULL
} else {
l.phen.names <- match("phen.names:", temp)
l.col.phen <- match("col.phen:", temp)
phen.names <- temp[(l.phen.names + 1):(l.col.phen - 1)]
col.phen <- temp[(l.col.phen + 1):length(temp)]
}
temp <- unlist(strsplit(cls.cont[[2]], " "))
phen.list <- temp[2:length(temp)]
for (k in 1:(num.lines - 2)) {
temp <- unlist(strsplit(cls.cont[[k + 2]], " "))
if (k == 1) {
len <- length(temp)
class.list <- matrix(0, nrow = num.lines - 2, ncol = len)
class.v <- matrix(0, nrow = num.lines - 2, ncol = len)
phen <- list(NULL)
}
class.list[k, ] <- temp
classes <- unique(temp)
class.v[k, ] <- match(temp, classes)
phen[[k]] <- classes
}
if (num.lines == 3) {
class.list <- as.vector(class.list)
class.v <- as.vector(class.v)
phen <- unlist(phen)
}
return(list(phen.list = phen.list, phen = phen, phen.names = phen.names, col.phen = col.phen,
class.v = class.v, class.list = class.list))
}
MSIG.ReadPhenFile.2 <- function(file = "NULL") {
#
# Reads a matrix of class vectors from a CLS file and defines phenotype and class labels vectors
# (numeric and character) for the samples in a gene expression file (RES or GCT format)
#
# The Broad Institute
# SOFTWARE COPYRIGHT NOTICE AGREEMENT
# This software and its documentation are copyright 2003 by the
# Broad Institute/Massachusetts Institute of Technology.
# All rights are reserved.
#
# This software is supplied without any warranty or guaranteed support
# whatsoever. Neither the Broad Institute nor MIT can be responsible for
# its use, misuse, or functionality.
cls.cont <- readLines(file)
num.lines <- length(cls.cont)
temp <- unlist(strsplit(cls.cont[[1]], " "))
if (length(temp) == 3) {
phen.names <- NULL
col.phen <- NULL
} else {
# browser()
l.phen.names <- match("phen.names:", temp)
l.col.phen <- match("col.phen:", temp)
phen.names <- temp[(l.phen.names + 1):(l.col.phen - 1)]
col.phen <- temp[(l.col.phen + 1):length(temp)]
}
temp <- unlist(strsplit(cls.cont[[2]], " "))
phen.list <- temp[2:length(temp)]
phen <- NULL
for (k in 1:(num.lines - 2)) {
temp <- unlist(strsplit(cls.cont[[k + 2]], " "))
if (k == 1) {
len <- length(temp)
class.list <- matrix(0, nrow = num.lines - 2, ncol = len)
class.v <- matrix(0, nrow = num.lines - 2, ncol = len)
# phen <- NULL
}
class.list[k, ] <- temp
classes <- unique(temp)
class.v[k, ] <- match(temp, classes)
# phen[[k]] <- classes
phen <- c(phen, classes)
}
if (num.lines == 3) {
class.list <- as.vector(class.list)
class.v <- as.vector(class.v)
# phen <- unlist(phen)
}
return(list(phen.list = phen.list, phen = phen, phen.names = phen.names, col.phen = col.phen,
class.v = class.v, class.list = class.list))
}
MSIG.Subset.Dataset.2 <- function(
input.ds,
input.cls = NULL,
column.subset = "ALL", # subset of column numbers or names (or phenotypes)
column.sel.type = "samples", # "samples" or "phenotype"
row.subset = "ALL", # subset of row numbers or names
output.ds,
output.cls = NULL) {
# start of methodology
print(c("Running MSIG.Subset.Dataset... on GCT file:", input.ds))
print(c("Running MSIG.Subset.Dataset... on CLS file:", input.cls))
# Read input datasets
dataset <- MSIG.Gct2Frame(filename = input.ds)
m <- data.matrix(dataset$ds)
gs.names <- dataset$row.names
gs.descs <- dataset$descs
sample.names <- dataset$names
# Read CLS file
if (!is.null(input.cls)) {
CLS <- MSIG.ReadPhenFile.2(file=input.cls)
class.labels <- CLS$class.v
class.phen <- CLS$phen
class.list <- CLS$class.list
}
# Select desired column subset
if (column.sel.type == "samples") {
if (column.subset[1] == "ALL") {
m2 <- m
sample.names2 <- sample.names
if (!is.null(input.cls)) {
class.labels2 <- class.labels
}
} else {
if (is.numeric(column.subset[1])) {
m2 <- m[,column.subset]
sample.names2 <- sample.names[column.subset]
if (!is.null(input.cls)) {
if (is.vector(class.labels)) {
class.labels2 <- class.labels[column.subset]
} else {
class.labels2 <- class.labels[, column.subset]
}
}
} else {
locations <- !is.na(match(sample.names, column.subset))
sample.names2 <- sample.names[locations]
m2 <- m[, locations]
if (!is.null(input.cls)) {
if (is.vector(class.labels)) {
class.labels2 <- class.labels[locations]
} else {
class.labels2 <- class.labels[, locations]
}
}
}
}
} else if (column.sel.type == "phenotype") {
locations <- !is.na(match(class.list, column.subset))
sample.names2 <- sample.names[locations]
m2 <- m[,locations]
if (!is.null(input.cls)) {
if (is.vector(class.labels)) {
class.labels2 <- class.labels[locations]
} else {
class.labels2 <- class.labels[, locations]
}
}
}
if (row.subset[1] == "ALL") {
m3 <- m2
gs.names2 <- gs.names
gs.descs2 <- gs.descs
} else {
locations <- !is.na(match(gs.names, row.subset))
m3 <- m2[locations,]
gs.names2 <- gs.names[locations]
gs.descs2 <- gs.descs[locations]
}
# Save datasets
V <- data.frame(m3)
names(V) <- sample.names2
row.names(V) <- gs.names2
write.gct(gct.data.frame = V, descs = gs.descs2, filename = output.ds)
if (!is.null(input.cls)) {
write.cls.2(class.v = class.labels2, phen = class.phen, filename = output.cls)
}
}
OPAM.match.projection.to.pathway <- function(
input.ds,
input.cls = NA,
results.dir,
normalize.score = F,
normalization.type = "zero.one",
pathway,
max.n = 10,
user.colors = NA,
decreasing.order = T,
sort.columns = F,
char.rescale = 1.25,
cmap.type = 3,
row.norm = T,
output.dataset = NA)
{
library(gtools)
library(verification)
library(ROCR)
library(MASS)
library(RColorBrewer)
library(heatmap.plus)
dataset <- MSIG.Gct2Frame(filename = input.ds) # Read gene expression dataset (GCT format)
m <- data.matrix(dataset$ds)
pathway.names <- dataset$row.names
pathway.descs <- dataset$descs
Ns <- length(m[1,])
dim(m)
sample.names <- dataset$names
n.pathways <- length(m[,1])
temp <- strsplit(input.ds, split="/") # Extract test file name
s <- length(temp[[1]])
test.file.name <- temp[[1]][s]
temp <- strsplit(test.file.name, split=".gct")
test.file.prefix <- temp[[1]][1]
# char.res <- 0.013 * n.pathways + 0.65
# normalize scores
if (normalize.score == T) {
if (normalization.type == "zero.one") {
for (i in 1:n.pathways) {
m[i,] <- (m[i,] - min(m[i,]))/(max(m[i,]) - min(m[i,]))
}
} else if (normalization.type == "z.score") {
for (i in 1:n.pathways) {
m[i,] <- (m[i,] - mean(m[i,]))/sd(m[i,])
}
} else if (normalization.type == "r.z.score") {
for (i in 1:n.pathways) {
m[i,] <- (m[i,] - median(m[i,]))/mad(m[i,])
}
}
}
loc <- match(pathway, pathway.names)
print(c("loc:", loc))
if (sort.columns == T) {
s.order <- order(m[loc,], decreasing = decreasing.order)
m2 <- m[, s.order]
sample.names2 <- sample.names[s.order]
} else {
m2 <- m
sample.names2 <- sample.names
}
correl <- cor(t(m2))[, loc]
m.order <- order(correl, decreasing=T)
correl2 <- correl[m.order]
m2 <- m2[m.order[1:max.n],]
pathway.names2 <- pathway.names[m.order]
pathway.descs2 <- signif(correl2, digits=3)
if (input.cls == "NA") {
cls.labels2 <- c(rep(0, 10), rep(1, length(sample.names2) - 10))
cls.phen2 <- c(" ")
colors.list <- c("white")
phen.names2 <- " "
} else {
CLS <- MSIG.ReadPhenFile.2(file = input.cls) # Read phenotype file (CLS format)
cls.labels <- CLS$class.v
cls.phen <- CLS$phen
cls.list <- CLS$class.list
if (!is.null(CLS$phen.names)) {
phen.names <- CLS$phen.names
} else {
phen.names <- " "
}
if (is.vector(cls.labels)) {
if (sort.columns == T) {
cls.labels2 <- cls.labels[s.order]
cls.list2 <- cls.list[s.order]
} else {
cls.labels2 <- cls.labels
cls.list2 <- cls.list
}
n.phen <- 1
} else {
if (sort.columns == T) {
cls.labels2 <- cls.labels[, s.order]
cls.list2 <- cls.list[, s.order]
} else {
cls.labels2 <- cls.labels
cls.list2 <- cls.list
}
n.phen <- length(cls.labels2[,1])
}
cls.phen2 <- list(NULL)
if (is.vector(cls.labels2)) {
classes <- unique(cls.list2)
cls.phen2 <- classes
cls.labels2 <- match(cls.list2, cls.phen2)
} else {
for (kk in 1:length(cls.list2[, 1])) {
classes <- unique(cls.list2[kk,])
cls.phen2[[kk]] <- classes
cls.labels2[kk,] <- match(cls.list2[kk,], cls.phen2[[kk]])
}
}
phen.names2 <- phen.names
if (!is.na(user.colors[1])) {
c.test <- user.colors
} else {
if (!is.null(CLS$col.phen)) {
c.test <- CLS$col.phen
} else {
c.test <- c(brewer.pal(n=7, name="Dark2"), brewer.pal(n=7, name="Set2"), brewer.pal(n=7, name="Set1"),
brewer.pal(n=8, name="Accent"), brewer.pal(n=9, name="Spectral"), brewer.pal(n=8, name="Set3"),
brewer.pal(n=8, name="BuGn"),
brewer.pal(n=7, name="Set2"), brewer.pal(n=7, name="Dark2"), brewer.pal(n=7, name="Set1"),
brewer.pal(n=8, name="Accent"), brewer.pal(n=10, name="Spectral"), brewer.pal(n=8, name="Set3"),
brewer.pal(n=8, name="BuGn"),
brewer.pal(n=7, name="Set2"), brewer.pal(n=7, name="Dark2"), brewer.pal(n=7, name="Set1"),
brewer.pal(n=8, name="Accent"), brewer.pal(n=10, name="Spectral"), brewer.pal(n=8, name="Set3"),
brewer.pal(n=8, name="BuGn"))
}
}
}
cls.phen.index <- unlist(cls.phen2)
colors.list <- c.test[1:length(cls.phen.index)]
filename <- paste(results.dir, test.file.prefix, ".SORT.PROJ.TO.", pathway, sep="")
pdf(file=paste(filename, ".pdf", sep=""), height = 8.5, width = 10.5)
MSIG.HeatMapPlot.7(V = m2, row.names = pathway.names2[1:max.n],
row.names2 = pathway.descs2[1:max.n], col.labels = cls.labels2,
col.classes = cls.phen2, phen.cmap = colors.list, phen.names = phen.names,
col.names = sample.names2, main = " ", xlab=" ", ylab=" ", row.norm = row.norm,
cmap.type = cmap.type, char.rescale = char.rescale, legend=T)
dev.off()
if (!is.na(output.dataset)) {
V.GCT <- m2
colnames(V.GCT) <- sample.names2
row.names(V.GCT) <- pathway.names2
write.gct(gct.data.frame = V.GCT, descs = pathway.descs2, filename =output.dataset)
}
}
MSIG.Define.Dataset.from.Table2 <- function(
input.gct,
table.txt,
output.gct,
output.txt = NULL, # optional version of table with overlap (GCT & TAB) samples
output.cls,
prefix_entries = F)
{
# Read input dataset
library(RColorBrewer)
dataset1 <- MSIG.Gct2Frame(filename = input.gct)
m <- data.matrix(dataset1$ds)
gene.names <- dataset1$row.names
gene.decs <- dataset1$descs
sample.names.gct <- dataset1$names
Ns <- length(sample.names.gct)
# Read Table
tab <- read.delim(table.txt, header=T, row.names = 1, sep="\t", skip=0, blank.lines.skip=T, comment.char="", as.is=T)
sample.names.tab <- row.names(tab)
phen.names <- names(tab)
overlap <- intersect(sample.names.tab, sample.names.gct)
print("sample names GCT")
print(sample.names.gct)
print("sample names TAB")
print(sample.names.tab)
locs.gct <- match(overlap, sample.names.gct)
print(match(sample.names.tab, sample.names.gct))
print(match(sample.names.gct, sample.names.tab))
locs.tab <- match(overlap, sample.names.tab)
print(locs.tab)
print(c("GCT matching set (", length(locs.gct), " samples):", sample.names.gct[locs.gct]))
print(c("TAB matching set (", length(overlap), " samples):", sample.names.tab[locs.tab]))
print(c("overlap set (", length(overlap), " samples):", overlap))
m2 <- m[, locs.gct]
sample.names.gct <- sample.names.gct[locs.gct]
sample.names.tab <- sample.names.tab[locs.tab]
if (!is.null(output.txt)) {
tab2 <- tab[locs.tab,]
sample.names.tab2 <- sample.names.tab[locs.tab]
col.names <- paste(colnames(tab2), collapse = "\t")
col.names <- paste("SAMPLE", col.names, sep= "\t")
write(noquote(col.names), file = output.txt, append = F, ncolumns = length(col.names))
write.table(tab2, file=output.txt, quote=F, col.names = F, row.names = T, append = T, sep="\t")
}
cls.table <- t(tab[locs.tab,])
if (prefix_entries == TRUE) {
for (i in 1:length(cls.table[,1])) {
# cls.table[i,] <- paste(row.names(cls.table)[i], cls.table[i,], sep=".")
cls.table[i,] <- paste(colnames(tab)[i], tab[,i], sep=".")
}
}
if (!is.null(output.gct)) {
V <- data.frame(m2)
names(V) <- sample.names.gct
row.names(V) <- gene.names
write.gct(gct.data.frame = V, descs = gene.decs, filename = output.gct)
}
class.phen <- unique(cls.table)
n <- length(class.phen)
l <- length(cls.table[1,])
col.list <- c(brewer.pal(n=7, name="Set2"), brewer.pal(n=7, name="Dark2"), brewer.pal(n=7, name="Set1"),
brewer.pal(n=8, name="Accent"), brewer.pal(n=10, name="Spectral"), brewer.pal(n=8, name="Set3"),
brewer.pal(n=8, name="BuGn"),
brewer.pal(n=7, name="Set2"), brewer.pal(n=7, name="Dark2"), brewer.pal(n=7, name="Set1"),
brewer.pal(n=8, name="Accent"), brewer.pal(n=10, name="Spectral"), brewer.pal(n=8, name="Set3"),
brewer.pal(n=8, name="BuGn"),
brewer.pal(n=7, name="Set2"), brewer.pal(n=7, name="Dark2"), brewer.pal(n=7, name="Set1"),
brewer.pal(n=8, name="Accent"), brewer.pal(n=10, name="Spectral"), brewer.pal(n=8, name="Set3"),
brewer.pal(n=8, name="BuGn"))
num <- 0
class.order.list <- NULL
for (i in 1:length(cls.table[,1])) {
num <- num + length(unique(cls.table[i,]))
class.order.list <- c(class.order.list, unique(cls.table[i,]))
}
phen.names.string <- paste("phen.names:", paste(phen.names, collapse=" "), sep=" ")
sig.col <- col.list[1:num]
col.phen.string <- paste("col.phen:", paste(sig.col, collapse=" "), sep=" ")
cat(paste(l, num, length(cls.table[, 1]), phen.names.string, col.phen.string, sep=" "), "\n",
file = output.cls, append = FALSE, sep = "")
cat("# ", paste(class.order.list, collapse=" "), "\n", file = output.cls, append = TRUE, sep = "")
for (i in 1:length(cls.table[,1])) {
cat(paste(cls.table[i,], collapse=" "), "\n", file = output.cls, append = TRUE, sep = "")
}
}
MSIG.Define.Dataset.from.Table.2 <- function(
input.gct,
table.txt,
output.gct,
output.cls,
prefix_entries = F)
{
# Read input dataset
library(RColorBrewer)
dataset1 <- MSIG.Gct2Frame(filename = input.gct)
m <- data.matrix(dataset1$ds)
gene.names <- dataset1$row.names
gene.decs <- dataset1$descs
sample.names.gct <- dataset1$names
Ns <- length(sample.names.gct)
# browser()
# Read Table
tab <- read.delim(table.txt, header=T, row.names = 1, sep="\t", skip=0, blank.lines.skip=T, comment.char="", as.is=T)
sample.names.tab <- row.names(tab)
phen.names <- names(tab)
overlap <- intersect(sample.names.tab, sample.names.gct)
if(length(overlap)==0){ return(NULL)}
# print(overlap)
# print("sample names GCT")
# print(sample.names.gct)
# print("sample names TAB")
# print(sample.names.tab)
if(length(overlap)==0){ return(NULL)}
locs.gct <- match(overlap, sample.names.gct)
print(match(sample.names.tab, sample.names.gct))
print(match(sample.names.gct, sample.names.tab))
locs.tab <- match(overlap, sample.names.tab)
# print(locs.tab)
# print(c("GCT matching set (", length(locs.gct), " samples):", sample.names.gct[locs.gct]))
# print(c("TAB matching set (", length(overlap), " samples):", sample.names.tab[locs.tab]))
# print(c("overlap set (", length(overlap), " samples):", overlap))
m2 <- m[, locs.gct]
sample.names.gct <- sample.names.gct[locs.gct]
sample.names.tab <- sample.names.tab[locs.tab]
cls.table <- t(tab[locs.tab,])
if (prefix_entries == TRUE) {
for (i in 1:length(cls.table[,1])) {
# cls.table[i,] <- paste(row.names(cls.table)[i], cls.table[i,], sep=".")
cls.table[i,] <- paste(colnames(tab)[i], tab[,i], sep=".")
}
}
if (!is.null(output.gct)) {
V <- data.frame(m2)
names(V) <- sample.names.gct
row.names(V) <- gene.names
write.gct(gct.data.frame = V, descs = gene.decs, filename = output.gct)
}
class.phen <- unique(cls.table)
n <- length(class.phen)
l <- length(cls.table[1,])
col.list <- c(brewer.pal(n=7, name="Set2"), brewer.pal(n=7, name="Dark2"), brewer.pal(n=7, name="Set1"),
brewer.pal(n=8, name="Accent"), brewer.pal(n=10, name="Spectral"), brewer.pal(n=8, name="Set3"),
brewer.pal(n=8, name="BuGn"),
brewer.pal(n=7, name="Set2"), brewer.pal(n=7, name="Dark2"), brewer.pal(n=7, name="Set1"),
brewer.pal(n=8, name="Accent"), brewer.pal(n=10, name="Spectral"), brewer.pal(n=8, name="Set3"),
brewer.pal(n=8, name="BuGn"),
brewer.pal(n=7, name="Set2"), brewer.pal(n=7, name="Dark2"), brewer.pal(n=7, name="Set1"),
brewer.pal(n=8, name="Accent"), brewer.pal(n=10, name="Spectral"), brewer.pal(n=8, name="Set3"),
brewer.pal(n=8, name="BuGn"))
# num <- 0
# class.order.list <- NULL
class.order.list = apply(cls.table, 1, unique)
num = sum(unlist(lapply(class.order.list, length)))
class.order.list = unlist(class.order.list)
# for (i in 1:length(cls.table[,1])) {
## num <- num + length(unique(cls.table[i,]))
# class.order.list <- c(class.order.list, unique(cls.table[i,]))
# }
phen.names.string <- paste("phen.names:", paste(phen.names, collapse=" "), sep=" ")
sig.col <- col.list[1:num]
col.phen.string <- paste("col.phen:", paste(sig.col, collapse=" "), sep=" ")
cat(paste(l, num, length(cls.table[, 1]), phen.names.string, col.phen.string, sep=" "), "\n",
file = output.cls, append = FALSE, sep = "")
cat("# ", paste(class.order.list, collapse=" "), "\n", file = output.cls, append = TRUE, sep = "")
for (i in 1:length(cls.table[,1])) {
cat(paste(cls.table[i,], collapse=" "), "\n", file = output.cls, append = TRUE, sep = "")
}
}
rec.area <- function(
obs,
pred,
metric = "absolute.deviation", # Either "squared.error" or "absolute.deviation"
# null.distribution = "gaussian", # Either "gaussian" [null.model = mean(obs)] or "laplacian" [null.model = median(obs)]
interval = 0.01
){
# browser()
error.windows = seq(0, 1, by=interval)
n.errors = length(error.windows)
intervals = rep( interval, n.errors )
n.obs = length(obs)
n.pred = length(pred)
if( n.obs != n.pred ){ stop( "The number of observations does not equal the number of predictions." ) }
# if( null.distribution == "gaussian" ){ null.model = mean(obs)
# } else if( null.distribution == "laplacian" ){ null.model = median(obs) }
if( metric == "squared.error" ){
difference = (obs-pred)^2
accuracy = unlist(lapply(error.windows, FUN=squared.error, difference, n.obs))
} else if( metric == "absolute.deviation" ){
difference = abs(obs-pred)
accuracy = unlist(lapply(error.windows, FUN=absolute.deviation, difference, n.obs))
}
# plot(accuracy, type="l"); par(new=TRUE); plot(error.windows, type="l")
triangle.heights = accuracy - c(0, accuracy[1:(n.errors-1)])
triangles = triangle.heights*intervals/2
rectangle.heights = c(0, accuracy[1:(n.errors-1)])
rectangles = rectangle.heights*intervals
# A = (cumsum(accuracy)*intervals)[n.errors]
A = sum( rectangles + triangles)
# Calculate p-value using Kolmogorov-Smirnov Test
# Dn = max(accuracy-error.windows)
# i = 1:100
# x = sqrt( n.obs*n.pred/(n.obs+n.pred) )*Dn
# p.value = 1 - (sqrt(2*pi)/x)*sum( exp(-(2*i - 1)^2 * pi^2/ (8*x^2)) )
# browser()
# pred.scrambled = sample(pred)
# difference.scrambled = abs(pred.scrambled - obs)
# accuracy.scrambled = unlist(lapply(error.windows, FUN=squared.error, difference.scrambled, n.obs))
# triangle.heights = accuracy.scrambled - c(0, accuracy.scrambled[1:(n.errors-1)])
# triangles = triangle.heights*intervals/2
# rectangle.heights = c(0, accuracy.scrambled[1:(n.errors-1)])
# rectangles = rectangle.heights*intervals
# A.scrambled = sum( rectangles + triangles)
# T2.scrambled = .5*(sum((accuracy.scrambled-error.windows)^2))
# p.value.scrambled = cvmts.pval(T2.scrambled, n.errors, n.errors)
# Calculate p-value using Cramer-Von-Mises Criterion
T2 = .25*(sum((accuracy-error.windows)^2)) # accuracy-error.windows = integral difference between null model and REC
# browser()
p.value = cvmts.pval(T2, n.errors, n.errors)
# T2.norm = (T2- min(T2))/(max(T2)-min(T2))
# wilcox.test(T2.norm, error.)
#browser()
# U = 2*n.errors^2*(T2 + (4*n.errors^2-1)/12*n.errors)
# p.value.u = cvmts.pval(U, n.errors, n.errors)
# print('calculating REC...')
# browser()
# rec.list.ccle[master.ind] <<- A
# p.value.list.ccle[master.ind] <<- p.value
# T2.list.ccle[master.ind] <<- T2
#
#
#
# rec.list.scrambled[master.ind] <<- A.scrambled
# p.value.list.scrambled[master.ind] <<- p.value.scrambled
# T2.list.scrambled[master.ind] <<- T2.scrambled
# master.ind <<- master.ind+1
# browser()
# stat = ks.test(accuracy, error.windows, exact=TRUE)
return( list(A = A, p.value = p.value, T2=T2) )
}
squared.error <- function( error, squared.difference, n ){
return( length(which( squared.difference <= error ))/n )
}
absolute.deviation <- function( error, absolute.difference, n ) {
return( length(which( absolute.difference <= error ))/n )
}
mutual.inf <- function(x, y, n.grid=100) {
kde2d.xy <- kde2d(x, y, n = n.grid, h = c(width.SJ(x, method="dpi"), width.SJ(y, method="dpi")))
# X <- kde2d.xy$x
# Y <- kde2d.xy$y
PXY <- kde2d.xy$z/sum(kde2d.xy$z)
PX <- apply(PXY, MARGIN=1, sum)
PX <- PX/sum(PX)
PX <- matrix(PX, nrow=n.grid, ncol=n.grid)
PY <- apply(PXY, MARGIN=2, sum)
PY <- PY/sum(PY)
PY <- matrix(PY, byrow = TRUE, nrow=n.grid, ncol=n.grid)
MI <- sum(PXY * log2(PXY/(PX*PY)))
# browser()
return(MI)
}
mutual.inf.2 <- function(x, y, n.grid=100, normalize.by ="HXY", # Whether to normalize by HXY, HX, or HY
pos.and.neg = T
) {
# x and y can be binary or continuous
# If there is not sufficient variation in x and y,
# will take the standard deviation as the bandwidth
# (IQR finds the inter-quartile range of the data vector)
if( length(unique(x)) == 1 || length(unique(y)) == 1 ){
# browser()
return( NA )
}
# bandwidth.x = ifelse(IQR(x) == 0, bcv(x, n.grid), width.SJ(x, method="dpi"))
# bandwidth.y = ifelse(IQR(y) == 0, bcv(y, n.grid), width.SJ(y, method="dpi"))
# print("---")
# print(x)
# print(y)
kde2d.xy <- kde2d(x, y, n = n.grid, h = c(suppressWarnings(bcv(x)), suppressWarnings(bcv(y))) )
# X <- kde2d.xy$x
# Y <- kde2d.xy$y
# Z = kde2d.xy$z
PXY <- kde2d.xy$z/sum(kde2d.xy$z)
PX <- apply(PXY, MARGIN=1, sum)
PX <- PX/sum(PX)
HX = -sum(PX * log2(PX))
PX <- matrix(PX, nrow=n.grid, ncol=n.grid)
PY <- apply(PXY, MARGIN=2, sum)
PY <- PY/sum(PY)
HY = -sum( PY * log2(PY))
PY <- matrix(PY, byrow = TRUE, nrow=n.grid, ncol=n.grid)
# browser()
# MIXY = PXY * log2(PXY/(PX*PY))
#
# if( pos.and.neg ){
# q1 = MIXY[1:(n.grid/2), 1:(n.grid/2)]
# q2 = MIXY[1:(n.grid/2), (n.grid/2 + 1):n.grid]
# q3 = MIXY[(n.grid/2+1):n.grid, 1:(n.grid/2)]
# q4 = MIXY[(n.grid/2+1):n.grid, (n.grid/2+1):n.grid]
#
# # q's divide MIXY into quarters. If the sum of q2 and q3 is greater than the sum of q1 and q4, then
# # x and y are negatively correlated.
# # on heatmap: q2 q4
# # q1 q3
#
## Ignore NaN's that are a result of underflow (experimentally derived)
# MI <- ifelse( sum(q1+q4, na.rm=TRUE) < sum(q2+q3, na.rm=TRUE),
# -sum(MIXY, na.rm=TRUE), sum(MIXY, na.rm=TRUE))
#} else{ MI = sum(MIXY, na.rm=TRUE)}
# MI <- ifelse( sum(q1+q4, na.rm=TRUE) < sum(q2+q3, na.rm=TRUE), -sum(q2+q3-q1-q4, na.rm=TRUE), sum(q1+q4-q2-q3, na.rm=TRUE))
HXY <- - sum(PXY * log2(PXY), na.rm=TRUE)
# HX = -sum( PX * log2(PX) )
# HY = -sum( PY * log2(PY) )
# MI.norm = (HX+HY)/HXY
#browser()
# normalization.factor = 1 #ifelse(normalize.by=="HXY", HXY, ifelse(normalize.by=="HX", HX, HY))
## browser()
MI.norm = ifelse(pos.and.neg, sign(cor(x, y)), 1) * ((HX + HY)/HXY - 1) #MI/normalization.factor
# browser()
return( MI.norm )#list(MI=MI, HXY=HXY))
}
#mutual.inf.2.single.gene.target <- function( signature, gene.target){
# return(mutual.inf.2(gene.target, signature))
#}
mutual.inf.2.multiple.gene.targets <- function( signature, gene.targets ){
return(apply(gene.targets,
MARGIN=1, FUN=mutual.inf.2,
signature ) )
}
mutual.inf.3 <- function(gene.target, signature.matrix, signature.indices, n.grid=100, gene.target.name = "",
n.randomizations = 100, tissue = "NA") {
## How this is different from mutual.inf.2:
## x is a target pathway and y is a matrix
## calculates a false discovery rate for the mutual information
## of the pathway with a randomized version of x
## y.ind indicates the row index of the target gene set / signature
# x and the elements of y can be binary or continuous
# If there is not sufficient variation in x and y,
# will take the standard deviation as the bandwidth
# (IQR finds the inter-quartile range of the data vector)
# "signature.indices" is the indices of signatures that you are interested in using.
# This code is used in comparing the chosen signatures to SUMMARY, the mutation
# status of all the cell lines.
# browser()
n.signatures = length(signature.matrix[,1])
MI.vector = vector(length=n.signatures, mode="double")
# MI.vector.rand = vector(length=length(signature.matrix[,1]), mode="double")
gene.target.rand = t(replicate(n.randomizations, sample(gene.target)))
MI.matrix.rand = matrix(ncol = n.signatures, nrow = n.randomizations)
# browser()
# for( i in 1:length(signature.matrix[,1]) ){
MI.vector = apply(signature.matrix, MARGIN=1, FUN=mutual.inf.2, gene.target)
# browser()
# for( j in 1:n.randomizations ){
MI.matrix.rand = apply(signature.matrix,
MARGIN=1, FUN=mutual.inf.2.multiple.gene.targets,
gene.target.rand)
#mutual.inf.2(gene.target.rand[j,], signature.matrix[i,])$MI
# }
# browser()
# MI.vector.rand[i] = mean(temp.MI.rand)
# }
# x.rand = sample(x)
# print("Make plot of densities!! And save the output!")
# browser()
quartz()
# if( gene.target.name =="SUMMARY"){
temp <- density(MI.vector, adjust=1, n = 512, from=min(MI.vector), to=max(MI.vector))
x <- temp$x
y <- temp$y/sum(temp$y)
temp.rand <- density(MI.matrix.rand, adjust=1, n = 512, from=min(MI.matrix.rand), to=max(MI.matrix.rand))
x.rand <- temp.rand$x
y.rand <- temp.rand$y/sum(temp.rand$y)
# pdf(file=paste(tissue, gene.target.name, n.randomizations, "pdf", sep=".") )
# quartz(file=paste(tissue, gene.target.name, n.randomizations, "pdf", sep="."))
plot(x.rand, y.rand, type="l", lwd=2, xlab="MI", #xlim = c(max(min(x), 10^-5), max(x)), ylim = range(c(y, y.rand)),
col="red",
ylab = "P(MI)", main=paste(tissue, gene.target.name, n.randomizations, sep=" "))
points(x, y, type="l", lwd=2, col = "black")
legend("topright", c("actual gene target vs all gene sets", "randomized gene target vs all gene sets"), col=c("black", "red"), lwd=c(2,2))
browser()
# dev.off()
# }
MI = MI.vector[signature.indices]
FDR = vector(length=length(MI))
# browser()
ranked.MI.vector = rank(-MI.vector) # take negative so rank 1 corresponds to highest value
ranked.MI.matrix.rand = rank(MI.matrix.rand)
median.MI.rand = median(MI.matrix.rand)
if( gene.target.name[1] == "EGFR_AMP" #|| gene.target.name =="TP53"
){ browser() }
for( i in 1:length(MI)){
if( MI[i] > median.MI.rand ){
rank.observed = ranked.MI.vector[signature.indices[i]]
rank.randomized = sum(MI[i] < MI.matrix.rand)
} else{
rank.observed = n.signatures - ranked.MI.vector[signature.indices[i]] + 1
rank.randomized = sum(MI[i] > MI.matrix.rand)
# browser()
}
FDR[i] = (rank.randomized/n.randomizations)/rank.observed
# if( MI[i] <= median.MI.rand ){ browser() }
}
# MI.rand.ind = which(x.rand >= MI)
# MI.integral = sum(x[MI.ind]*y[MI.ind], na.rm=T)
# MI.rand.integral = sum(x.rand[MI.ind]*y.rand[MI.ind], na.rm=T)
# FDR = MI.rand.integral/MI.integral
# browser()
return(list(MI=MI, FDR=FDR))
}
mutual.inf.3.v2 <- function(target.vector, comparison.matrix, n.grid=100, target.vector.name = "",
tissue = "NA", normalize.by = "HXY", pos.and.neg=T) {
## How this is different from mutual.inf.2:
## x is a target pathway and y is a matrix
## calculates a false discovery rate for the mutual information
## of the pathway with a randomized version of x
## y.ind indicates the row index of the target gene set / signature
# x and the elements of y can be binary or continuous
# If there is not sufficient variation in x and y,
# will take the standard deviation as the bandwidth
# (IQR finds the inter-quartile range of the data vector)
# "signature.indices" is the indices of signatures that you are interested in using.
# This code is used in comparing the chosen signatures to SUMMARY, the mutation
# status of all the cell lines.
n.signatures = length(comparison.matrix[,1])
MI.vector = vector(length=n.signatures, mode="double")
MI.ref = mutual.inf.2(target.vector, target.vector, normalize.by=normalize.by)
print(paste("MI.ref =", MI.ref))
MI.vector = apply(comparison.matrix, MARGIN=1, FUN=mutual.inf.2, target.vector, normalize.by=normalize.by,
pos.and.neg=pos.and.neg)
MI = MI.vector/MI.ref
FDR = rep(1, length=length(MI))
return(list(MI=MI, FDR=FDR))
}
mutual.inf.4 <- function(gene.targets, signature.matrix, signature.index, n.grid=100, gene.target.name = "",
n.randomizations = 100) {
## How this is different from mutual.inf.2:
## x is a target pathway and y is a matrix
## calculates a false discovery rate for the mutual information
## of the pathway with a randomized version of x
## y.ind indicates the row index of the target gene set / signature
# x and the elements of y can be binary or continuous
# If there is not sufficient variation in x and y,
# will take the standard deviation as the bandwidth
# (IQR finds the inter-quartile range of the data vector)
# "signature.indices" is the index of the "winning" signature that is used to compare to
# all the genomic aberrations.
# browser()
if( is.vector(gene.targets)){
gene.targets = t(as.matrix(gene.targets))
}
n.gene.targets = length(gene.targets[,1])
n.signatures = length(signature.matrix[,1])
n.samples = length(gene.targets[1,])
MI.matrix = matrix(ncol = n.signatures, nrow = n.gene.targets)
MI.matrix.rand = matrix(ncol = n.signatures, nrow = n.randomizations)
gene.target.rand = t(replicate(n.randomizations, sample(gene.targets[1,])))
# temp.MI.rand = vector(length=n.iter)
# browser()
# for( i in 1:length(signature.matrix[,1]) ){
# browser()
MI.matrix = apply(signature.matrix, MARGIN=1,
FUN=mutual.inf.2.multiple.gene.targets,
gene.targets)
MI.matrix.rand = apply(signature.matrix,
MARGIN=1, FUN=mutual.inf.2.multiple.gene.targets,
gene.target.rand)
# MI.matrix.rand[i,] = apply(gene.target.rand,
# MARGIN=1, FUN=mutual.inf.2,
# signature.matrix[i,])
# browser()
# MI.vector.rand[i] = mean(temp.MI.rand)
# }
# x.rand = sample(x)
# browser()
quartz()
temp <- density(MI.matrix, adjust=1, n = 512, from=min(MI.matrix), to=max(MI.matrix))
x <- temp$x
y <- temp$y/sum(temp$y)
temp.rand <- density(MI.matrix.rand, adjust=1, n = 512, from=min(MI.matrix), to=max(MI.matrix))
x.rand <- temp.rand$x
y.rand <- temp.rand$y/sum(temp.rand$y)
# pdf(file=paste(tissue, paste(gene.target.name, collapse="-"), rownames(signature.matrix)[signature.index], n.randomizations, "pdf", sep="."))
# quartz(file=paste(tissue, paste(gene.target.name, collapse="-"), rownames(signature.matrix)[signature.index], n.randomizations, "pdf", sep="."))
#if( gene.gene.target.name =="SUMMARY"){
plot(x.rand, y.rand, type="l", lwd=2, xlab="MI", #xlim = c(max(min(x), 10^-5), max(x)), ylim = range(c(y, y.rand)),
col="red",
ylab = "P(MI)", main=paste(tissue, paste(gene.target.name, collapse=" "), rownames(signature.matrix)[signature.index], n.randomizations, sep=" "))
points(x, y, type="l", lwd=2, col = "black")
legend("topright", c("actual gene target(s) vs all gene sets", "randomized gene target vs all gene sets"), col=c("black", "red"), lwd=c(2,2))
# if( gene.target.name[1] =="KRAS_AMP") {browser()}
browser()
# dev.off()
#}
#
# browser()
# MI = ifelse(is.matrix(MI.matrix), MI.matrix[,signature.index], MI.matrix[signature.index])
# ranked.MI.matrix = ifelse( is.matrix(MI.matrix), apply(-MI.matrix, MARGIN=1, rank), rank(-MI.matrix))
# MI.vector = ifelse(is.matrix(MI.matrix))
if(is.matrix(MI.matrix)){
MI = MI.matrix[,signature.index]
ranked.MI.matrix = apply(-MI.matrix, MARGIN=1, rank)
FDR = vector(length=n.gene.targets, mode="numeric")
# browser()
for( i in 1:n.gene.targets){
if( MI[i] > median(MI.matrix.rand) ){
rank.observed = ranked.MI.matrix[signature.index, i]
rank.randomized = sum(MI[i] < MI.matrix.rand)
} else{
# browser()
rank.observed = n.signatures - ranked.MI.matrix[signature.index, i]
rank.randomized = sum(MI[i] > MI.matrix.rand)
}
FDR[i] = (rank.randomized/n.randomizations)/rank.observed
# browser()
}
} else{
MI = MI.matrix[signature.index]
ranked.MI.matrix = rank(-MI.matrix)
if( MI > median(MI.matrix.rand)){
rank.observed = ranked.MI.matrix[signature.index]
rank.randomized = sum(MI <= MI.matrix.rand)
} else{
rank.observed = n.signatures - ranked.MI.matrix[signature.index]
rank.randomized = sum(MI >= MI.matrix.rand)
}
FDR = (rank.randomized/n.randomizations)/rank.observed
}
# if( gene.target.name == "EGFR_AMP" #|| gene.target.name =="TP53"
# ){ browser() }
# if( MI > 0 ){
# MI.ind = which(x >= MI)
# } else{ MI.ind = which( x <= MI) }
# MI.rand.ind = which(x.rand >= MI)
# MI.integral = sum(x[MI.ind]*y[MI.ind], na.rm=T)
# MI.rand.integral = sum(x.rand[MI.ind]*y.rand[MI.ind], na.rm=T)
# FDR = MI.rand.integral/MI.integral
# browser()
return(list(MI=MI, FDR=FDR))
}
mutual.inf.4.v2 <- function(gene.targets, signature.matrix, signature.index, n.grid=100, gene.target.name = "",
n.randomizations = 100) {
## How this is different from mutual.inf.2:
## x is a target pathway and y is a matrix
## calculates a false discovery rate for the mutual information
## of the pathway with a randomized version of x
## y.ind indicates the row index of the target gene set / signature
# x and the elements of y can be binary or continuous
# If there is not sufficient variation in x and y,
# will take the standard deviation as the bandwidth
# (IQR finds the inter-quartile range of the data vector)
# "signature.indices" is the index of the "winning" signature that is used to compare to
# all the genomic aberrations.
# browser()
if( is.vector(gene.targets)){
gene.targets = t(as.matrix(gene.targets))
}
n.gene.targets = length(gene.targets[,1])
n.signatures = length(signature.matrix[,1])
n.samples = length(gene.targets[1,])
MI.matrix = matrix(ncol = n.signatures, nrow = n.gene.targets)
if( n.randomizations > 0 ){
MI.matrix.rand = matrix(ncol = n.signatures, nrow = n.randomizations)
gene.target.rand = t(replicate(n.randomizations, sample(gene.targets[1,])))
MI.matrix.rand = apply(signature.matrix,
MARGIN=1, FUN=mutual.inf.2.multiple.gene.targets,
gene.target.rand)
MI.matrix.rand = normalize(MI.matrix.rand)
}
# temp.MI.rand = vector(length=n.iter)
# browser()
# for( i in 1:length(signature.matrix[,1]) ){
# browser()
MI.matrix = apply(signature.matrix, MARGIN=1,
FUN=mutual.inf.2.multiple.gene.targets,
gene.targets)
MI.matrix = normalize(MI.matrix)
# MI.matrix.rand[i,] = apply(gene.target.rand,
# MARGIN=1, FUN=mutual.inf.2,
# signature.matrix[i,])
# browser()
# MI.vector.rand[i] = mean(temp.MI.rand)
# }
# x.rand = sample(x)
# browser()
# quartz()
# temp <- density(MI.matrix, adjust=1, n = 512, from=min(MI.matrix), to=max(MI.matrix))
# x <- temp$x
# y <- temp$y/sum(temp$y)
#
# temp.rand <- density(MI.matrix.rand, adjust=1, n = 512, from=min(MI.matrix), to=max(MI.matrix))
# x.rand <- temp.rand$x
# y.rand <- temp.rand$y/sum(temp.rand$y)
#
## pdf(file=paste(tissue, paste(gene.target.name, collapse="-"), rownames(signature.matrix)[signature.index], n.randomizations, "pdf", sep="."))
## quartz(file=paste(tissue, paste(gene.target.name, collapse="-"), rownames(signature.matrix)[signature.index], n.randomizations, "pdf", sep="."))
##if( gene.gene.target.name =="SUMMARY"){
# plot(x.rand, y.rand, type="l", lwd=2, xlab="MI", #xlim = c(max(min(x), 10^-5), max(x)), ylim = range(c(y, y.rand)),
# col="red",
# ylab = "P(MI)", main=paste(tissue, paste(gene.target.name, collapse=" "), rownames(signature.matrix)[signature.index], n.randomizations, sep=" "))
# points(x, y, type="l", lwd=2, col = "black")
# legend("topright", c("actual gene target(s) vs all gene sets", "randomized gene target vs all gene sets"), col=c("black", "red"), lwd=c(2,2))
## if( gene.target.name[1] =="KRAS_AMP") {browser()}
# browser()
# dev.off()
#}
#
# browser()
# MI = ifelse(is.matrix(MI.matrix), MI.matrix[,signature.index], MI.matrix[signature.index])
# ranked.MI.matrix = ifelse( is.matrix(MI.matrix), apply(-MI.matrix, MARGIN=1, rank), rank(-MI.matrix))
# MI.vector = ifelse(is.matrix(MI.matrix))
# browser()
#MI.ref = mutual.inf.2( signature.matrix )
if(is.matrix(MI.matrix)){
MI = MI.matrix[,signature.index]
if( n.randomizations > 0 ){
ranked.MI.matrix = apply(-MI.matrix, MARGIN=1, rank)
FDR = vector(length=n.gene.targets, mode="numeric")
# browser()
for( i in 1:n.gene.targets){
if( MI[i] > median(MI.matrix.rand) ){
rank.observed = ranked.MI.matrix[signature.index, i]
rank.randomized = sum(MI[i] < MI.matrix.rand)
} else{
# browser()
rank.observed = n.signatures - ranked.MI.matrix[signature.index, i]
rank.randomized = sum(MI[i] > MI.matrix.rand)
}
FDR[i] = (rank.randomized/n.randomizations)/rank.observed
# browser()
}
} else{ FDR = rep(1, length=n.gene.targets) }
} else{
MI = MI.matrix[signature.index]
if( n.randomizations > 0 ){
ranked.MI.matrix = rank(-MI.matrix)
if( MI > median(MI.matrix.rand)){
rank.observed = ranked.MI.matrix[signature.index]
rank.randomized = sum(MI <= MI.matrix.rand)
} else{
rank.observed = n.signatures - ranked.MI.matrix[signature.index]
rank.randomized = sum(MI >= MI.matrix.rand)
}
FDR = (rank.randomized/n.randomizations)/rank.observed
} else{ FDR = rep(1, length=n.gene.targets) }
}
# if( gene.target.name == "EGFR_AMP" #|| gene.target.name =="TP53"
# ){ browser() }
# if( MI > 0 ){
# MI.ind = which(x >= MI)
# } else{ MI.ind = which( x <= MI) }
# MI.rand.ind = which(x.rand >= MI)
# MI.integral = sum(x[MI.ind]*y[MI.ind], na.rm=T)
# MI.rand.integral = sum(x.rand[MI.ind]*y.rand[MI.ind], na.rm=T)
# FDR = MI.rand.integral/MI.integral
# browser()
return(list(MI=MI, FDR=FDR))
}
mise <- function( x ) {
n.x = length(x)
r = seq(2,10)
f = vector(length=n.x, mode=mode(x))
for( i in 1:n.x ){
f = f + (x - x[i])^2
}
expected.value = sum(f)/n.x
}
amise <- function( v ){
# Reference:
# "Very fast optimal bandwith selection for univariate kernel density estimation"
# Vikas Chandrakant Raykar and Ramani Duraiswami
# [CS-TR-4774/UMIACS-TR-2005-73] June 28, 2006
H4 <- function( x ){ x^4 - 6*x^2 + 3 }
H6 <- function( x ){ x^6 - 15*x^4 + 45*x^2 - 15 }
N = length(v)
# Step 1 on page 11 of reference
sigma = mean(v)
# Step 2 on p. 11
Phi6 = sigma^(-7)*(-15/(16*sqrt(pi)))
Phi8 = sigma^(-9)*(-105/(32*sqrt(pi)))
# Step 3 on p. 11
g1 = ( -6/(sqrt(2*pi) * Phi6 * N))^(1/7)
g2 = ( 30/(sqrt(2*pi) * Phi8 * N))^(1/9)
# Make a matrix Z where Z(i,j) = x_i - x_j
Z = matrix(v, ncol = N, nrow = N, byrow=TRUE) - matrix(v, ncol = N, nrow = N, byrow=FALSE)
Phi4 <- function( g ) 1/(N*(N-1)*sqrt(2*pi)*g^5) * sum( H4(Z/g) * exp( -(Z^2)/(2*g^2)) )
Phi4.g1 = Phi4(g1)
Phi6.g2 = 1/(N*(N-1)*sqrt(2*pi)*g1^5) * sum( H4(Z/g1) * exp( -(Z^2)/(2*g1^2)) )
# Step 4
Y <- function( h ){
( (-6*sqrt(2)*Phi4.g1)/Phi6.g2)^(1/7)*h^(5/7)
}
fxn <- function( h ){
h - ( 1/ (sqrt(2) * Phi4(Y(h)) * N))^(1/5)
}
newtonraphson(fxn, mean(v))
}
#parzen.window <- function(z, h){
# Sigma = cov(z,z)
#
#
#}
write.cls.with.locs <- function( output.cls,
cls.labels,
phen.names){
class.order.list = apply(cls.labels, 1, unique)
num = sum(unlist(lapply(class.order.list, length)))
class.order.list = unlist(class.order.list)
class.phen <- unique(cls.labels)
n <- length(class.phen)
l <- length(cls.list[1,])
phen.names.string <- paste("phen.names:", paste(phen.names, collapse=" "), "col.names:", sep=" ")
cat(paste(l, num, length(cls.list[, 1]), phen.names.string, sep=" "), "\n",
file = output.cls, append = FALSE, sep = "")
cat("# ", paste(class.order.list, collapse=" "), "\n", file = output.cls, append = TRUE, sep = "")
for (i in 1:length(cls.list[,1])) {
cat(paste(cls.list[i,], collapse=" "), "\n", file = output.cls, append = TRUE, sep = "")
}
}
normalize <- function( v ){
(v - min(v))/(max(v) - min(v))
}
mutual.inf.P <- function(x, y, n.grid=100) {
# for definitions of mutual information and the universal metric (NMI) see the
# definition of "Mutual Information" in wikipedia and Thomas and Cover's book
# kde2d.xy <- kde2d(x, y, n = n.grid, h = c(width.SJ(x, method="dpi"), width.SJ(y, method="dpi")))
kde2d.xy <- kde2d(x, y, n = n.grid, h = c(bcv(x), bcv(y)))
X <- kde2d.xy$x
Y <- kde2d.xy$y
# PXY <- kde2d.xy$z/sum(kde2d.xy$z)
PXY <- kde2d.xy$z/sum(kde2d.xy$z) + .Machine$double.eps
# PX <- apply(PXY, MARGIN=1, sum)
PX <- rowSums(PXY)
PX <- PX/sum(PX)
HX <- -sum(PX * log2(PX))
PX <- matrix(PX, nrow=n.grid, ncol=n.grid)
# PY <- apply(PXY, MARGIN=2, sum)
PY <- colSums(PXY)
PY <- PY/sum(PY)
HY <- -sum(PY * log2(PY))
PY <- matrix(PY, byrow = TRUE, nrow=n.grid, ncol=n.grid)
MI <- sum(PXY * log2(PXY/(PX*PY)))
MI
HXY <- - sum(PXY * log2(PXY))
NMI <- sign(cor(x, y)) * ((HX + HY)/HXY - 1) # use peason correlation the get the sign (directionality)
return(list(MI=MI, HXY=HXY, HX=HX, HY=HY, NMI=NMI))
}
OPAM.Evaluate.Results <- function(
input.ds,
input.cls,
phenotype = NULL,
target.class = NULL,
target.type = "discrete",
output.txt,
output.pdf) {
pdf(file=output.pdf, height=8.5, width=11)
dataset <- MSIG.Gct2Frame(filename = input.ds) # Read gene expression dataset (GCT format)
m <- data.matrix(dataset$ds)
model.names <- dataset$row.names
model.descs <- dataset$descs
Ns <- length(m[1,])
for (i in 1:length(m[,1])) {
if (sd(m[i,]) == 0) {
val <- m[i, 1]
m[i,] <- m[i,] + runif(n=Ns, min= val - 0.001, max=val + 0.001) # add small noise to flat profiles
}
}
dim(m)
sample.names <- dataset$names
CLS <- MSIG.ReadPhenFile.2(file = input.cls) # Read phenotype file (CLS format)
cls.labels <- CLS$class.v
cls.phen <- CLS$phen
cls.list <- CLS$class.list
library(verification)
if (is.null(phenotype)) {
phen.loc <- 1
} else {
phen.loc <- match(phenotype, CLS$phen.names)
}
if (is.vector(CLS$class.list)) {
target.vec <- CLS$class.list
} else {
target.vec <- CLS$class.list[phen.loc,]
}
if (target.type == "continuous") {
target <- target.vec
} else if (target.type == "discrete") {
target <- ifelse(target.vec == target.class, 1, 0)
}
ind <- order(target)
target <- target[ind]
target.vec <- target.vec[ind]
m <- m[, ind]
sample.names[ind]
class.v <- CLS$class.v
if (is.vector(class.v)) {
class.v <- class.v[ind]
} else {
class.v <- class.v[, ind]
}
annot <- MI <- AUC <- AUC.pval <- t.stat <- t.pval <- vector(length=dim(m)[1], mode="numeric")
NMI.ref <- mutual.inf.P(x = target, y = target, n.grid=100)$NMI
for (i in 1:dim(m)[1]) {
feature <- m[i,]
MI[i] <- signif(mutual.inf.P(target, feature, n.grid=100)$NMI/NMI.ref, 4)
if (target.type == "continuous") {
AUC[i] <- AUC.pval[i] <- t.stat[i] <- t.pval[i] <- "-"
} else if (target.type == "discrete") {
feature.norm <- (feature - min(feature))/(max(feature) - min(feature))
perf.auc <- roc.area(target, feature.norm)
AUC[i] <- ifelse(perf.auc$A < 0.5, -(1 - perf.auc$A), perf.auc$A)
AUC[i] <- signif(AUC[i], digits=4)
p.val <- perf.auc$p.value
p.val <- ifelse(p.val > 0.5, 1 - p.val, p.val)
AUC.pval[i] <- signif(p.val, digits=4)
temp <- split(feature, target)
x <- temp$'1'
y <- temp$'0'
t.stat[i] <- signif(t.test(x=x, y=y)$statistic, digits=4)
p.val <- t.test(x=x, y=y)$p.value
p.val <- ifelse(p.val > 0.5, 1 - p.val, p.val)
t.pval[i] <- signif(p.val, digits=4)
}
annot[i] <- paste(MI[i], " ", AUC[i], " (", AUC.pval[i], ") ", t.stat[i], " (", t.pval[i], ")", sep="")
}
mycol <- vector(length=512, mode = "numeric")
for (k in 1:256) mycol[k] <- rgb(255, k - 1, k - 1, maxColorValue=255)
for (k in 257:512) mycol[k] <- rgb(511 - (k - 1), 511 - (k - 1), 255, maxColorValue=255)
mycol <- rev(mycol)
cex.axis = 1
ncolors <- length(mycol)
nf <- layout(matrix(c(1, 2), 2, 1, byrow=T), 1, c(4, 10), FALSE)
par(mar = c(1, 15, 5, 15))
max.v <- max(max(target), -min(target))
V1 <- target
image(1:length(target), 1:1, as.matrix(V1), zlim = c(0, 1), col=c("yellow", "purple"), axes=FALSE, main="", sub = "", xlab= "", ylab="")
axis(2, at=1:1, labels=paste(phenotype, target.class), adj= 0.5, tick=FALSE, las = 1, cex.axis=0.70, font.axis=1, line=-1)
axis(4, at=1:1, labels=paste("NMI AUC (p-val) t-test (p-val)", sep=""), adj= 0.5, tick=FALSE,
las = 1, cex.axis=0.80, font.axis=1, line=-1)
par(mar = c(5, 15, 1, 15))
V1 <- m
for (i in 1:dim(V1)[1]) V1[i,] <- (V1[i,] - mean(V1[i,]))/sd(V1[i,])
max.v <- max(max(V1), -min(V1))
V1 <- ceiling(ncolors * (V1 - (- max.v))/(1.001*(max.v - (- max.v))))
V1 <- apply(V1, MARGIN=2, FUN=rev)
image(1:dim(V1)[2], 1:dim(V1)[1], t(V1), zlim = c(0, ncolors), col=mycol, axes=FALSE, main="", sub = "", xlab= "", ylab="")
axis(2, at=1:dim(V1)[1], labels=row.names(V1), adj= 0.5, tick=FALSE, las = 1, cex.axis=0.70, font.axis=1, line=-1)
axis(4, at=1:dim(V1)[1], labels=rev(annot), adj= 0.5, tick=FALSE, las = 1, cex.axis=0.70, font.axis=1, line=-1)
axis(1, at=1:dim(V1)[2], labels=sample.names, adj= 0.5, tick=FALSE, las = 3, cex.axis=0.60, font.axis=1, line=-1)
dev.off()
annot2 <- data.frame(cbind(MI, AUC, AUC.pval, t.stat, t.pval))
row.names(annot2) <- row.names(m)
write(paste(c("gene set ", noquote(colnames(annot2))), collapse="\t"), file = output.txt, append = F, ncolumns = length(colnames(annot2)))
write.table(annot2, file=output.txt, append=T, quote=F, sep="\t", eol="\n", col.names=F, row.names=T)
}
OPAM.Evaluate.Results.2 <- function(
input.ds,
input.cls,
phenotype = NULL,
target.class = NULL,
target.type = "discrete",
sort.results = T,
display.top.n = 20,
output.txt,
output.pdf) {
pdf(file=output.pdf, height=8.5, width=11)
dataset <- MSIG.Gct2Frame(filename = input.ds) # Read gene expression dataset (GCT format)
m <- data.matrix(dataset$ds)
N <- dim(m)[1]
model.names <- dataset$row.names
model.descs <- dataset$descs
Ns <- length(as.matrix(m)[1,])
for (i in 1:N) {
if (sd(as.matrix(m)[i,]) == 0) {
val <- as.matrix(m)[i, 1]
m[i,] <- as.matrix(m)[i,] + runif(n=Ns, min= val - 0.001, max=val + 0.001) # add small noise to flat profiles
}
}
dim(m)
sample.names <- dataset$names
CLS <- MSIG.ReadPhenFile.2(file = input.cls) # Read phenotype file (CLS format)
cls.labels <- CLS$class.v
cls.phen <- CLS$phen
cls.list <- CLS$class.list
library(verification)
if (is.null(phenotype)) {
phen.loc <- 1
} else {
phen.loc <- match(phenotype, CLS$phen.names)
}
if (is.vector(CLS$class.list)) {
target.vec <- CLS$class.list
} else {
target.vec <- CLS$class.list[phen.loc,]
}
if (target.type == "continuous") {
target <- target.vec
} else if (target.type == "discrete") {
target <- ifelse(target.vec == target.class, 1, 0)
}
ind <- order(target)
target <- target[ind]
target.vec <- target.vec[ind]
m <- as.matrix(m)[, ind]
sample.names <- sample.names[ind]
class.v <- CLS$class.v
if (is.vector(class.v)) {
class.v <- class.v[ind]
} else {
class.v <- class.v[, ind]
}
annot <- MI <- AUC <- AUC.pval <- t.stat <- t.pval <- vector(length=N, mode="numeric")
NMI.ref <- mutual.inf.P(x = target, y = target, n.grid=100)$NMI
for (i in 1:N) {
if (N == 1) {
feature <- m
} else {
feature <- m[i,]
}
MI[i] <- signif(mutual.inf.P(target, feature, n.grid=100)$NMI/NMI.ref, 4)
if (target.type == "continuous") {
AUC[i] <- AUC.pval[i] <- t.stat[i] <- t.pval[i] <- "-"
} else if (target.type == "discrete") {
feature.norm <- (feature - min(feature))/(max(feature) - min(feature))
perf.auc <- roc.area(target, feature.norm)
AUC[i] <- ifelse(perf.auc$A < 0.5, -(1 - perf.auc$A), perf.auc$A)
AUC[i] <- signif(AUC[i], digits=4)
p.val <- perf.auc$p.value
p.val <- ifelse(p.val > 0.5, 1 - p.val, p.val)
AUC.pval[i] <- signif(p.val, digits=4)
temp <- split(feature, target)
x <- temp$'1'
y <- temp$'0'
t.stat[i] <- signif(t.test(x=x, y=y)$statistic, digits=4)
p.val <- t.test(x=x, y=y)$p.value
p.val <- ifelse(p.val > 0.5, 1 - p.val, p.val)
t.pval[i] <- signif(p.val, digits=4)
}
annot[i] <- paste(MI[i], " ", AUC[i], " (", AUC.pval[i], ") ", t.stat[i], " (", t.pval[i], ")", sep="")
}
if ((N > 1) & (sort.results == T)) {
MI.order <- order(MI, decreasing=T)
MI <- MI[MI.order]
AUC <- AUC[MI.order]
AUC.pval <- AUC.pval[MI.order]
t.stat <- t.stat[MI.order]
t.pval <= t.pval[MI.order]
m <- as.matrix(m)[MI.order,]
annot <- annot[MI.order]
}
mycol <- vector(length=512, mode = "numeric")
for (k in 1:256) mycol[k] <- rgb(255, k - 1, k - 1, maxColorValue=255)
for (k in 257:512) mycol[k] <- rgb(511 - (k - 1), 511 - (k - 1), 255, maxColorValue=255)
mycol <- rev(mycol)
cex.axis = 1
ncolors <- length(mycol)
nf <- layout(matrix(c(1, 2), 2, 1, byrow=T), 1, c(4, 10), FALSE)
par(mar = c(1, 15, 5, 15))
max.v <- max(max(target), -min(target))
V1 <- target
image(1:length(target), 1:1, as.matrix(V1), zlim = c(0, 1), col=c("yellow", "purple"), axes=FALSE, main="", sub = "", xlab= "", ylab="")
axis(2, at=1:1, labels=paste(phenotype, target.class), adj= 0.5, tick=FALSE, las = 1, cex.axis=0.70, font.axis=1, line=-1)
axis(4, at=1:1, labels=paste("NMI AUC (p-val) t-test (p-val)", sep=""), adj= 0.5, tick=FALSE,
las = 1, cex.axis=0.80, font.axis=1, line=-1)
par(mar = c(5, 15, 1, 15))
if (display.top.n > N) display.top.n <- N
if (N == 1) {
V1 <- m
V1 <- (V1 - mean(V1))/sd(V1)
} else {
V1 <- m[1:display.top.n, ]
for (i in 1:display.top.n) V1[i,] <- (V1[i,] - mean(V1[i,]))/sd(V1[i,])
}
max.v <- max(max(V1), -min(V1))
V1 <- ceiling(ncolors * (V1 - (- max.v))/(1.001*(max.v - (- max.v))))
if (N > 1) {
V1 <- apply(V1, MARGIN=2, FUN=rev)
image(1:Ns, 1:display.top.n, t(V1), zlim = c(0, ncolors), col=mycol, axes=FALSE, main="", sub = "", xlab= "", ylab="")
axis(2, at=1:display.top.n, labels=row.names(V1), adj= 0.5, tick=FALSE, las = 1, cex.axis=0.70, font.axis=1, line=-1)
axis(4, at=1:display.top.n, labels=rev(annot[1:display.top.n]), adj= 0.5, tick=FALSE, las = 1, cex.axis=0.70, font.axis=1, line=-1)
axis(1, at=1:Ns, labels=sample.names, adj= 0.5, tick=FALSE, las = 3, cex.axis=0.60, font.axis=1, line=-1)
} else {
image(1:Ns, 1:1, as.matrix(V1), zlim = c(0, ncolors), col=mycol, axes=FALSE, main="", sub = "", xlab= "", ylab="")
axis(2, at=1:1, labels=model.names, adj= 0.5, tick=FALSE, las = 1, cex.axis=0.70, font.axis=1, line=-1)
axis(4, at=1:1, labels=annot[1], adj= 0.5, tick=FALSE, las = 1, cex.axis=0.70, font.axis=1, line=-1)
axis(1, at=1:Ns, labels=sample.names, adj= 0.5, tick=FALSE, las = 3, cex.axis=0.60, font.axis=1, line=-1)
}
dev.off()
annot2 <- data.frame(cbind(MI, AUC, AUC.pval, t.stat, t.pval))
row.names(annot2) <- row.names(m)
write(paste(c("gene set ", noquote(colnames(annot2))), collapse="\t"), file = output.txt, append = F,
ncolumns = length(colnames(annot2)))
write.table(annot2, file=output.txt, append=T, quote=F, sep="\t", eol="\n", col.names=F, row.names=T)
}
OPAM.Evaluate.Results.2.1 <- function(
input.ds,
input.cls,
phenotype = NULL,
target.class = NULL,
target.type = "discrete",
sort.results = T,
display.top.n = 20,
output.txt,
output.tiff) {
tiff(file=output.tiff, width = 1200, height = 1000, units = "px", pointsize = 17)
dataset <- MSIG.Gct2Frame(filename = input.ds) # Read gene expression dataset (GCT format)
m <- data.matrix(dataset$ds)
N <- dim(m)[1]
model.names <- dataset$row.names
model.descs <- dataset$descs
Ns <- length(as.matrix(m)[1,])
for (i in 1:N) {
if (sd(as.matrix(m)[i,]) == 0) {
val <- as.matrix(m)[i, 1]
m[i,] <- as.matrix(m)[i,] + runif(n=Ns, min= val - 0.001, max=val + 0.001) # add small noise to flat profiles
}
}
dim(m)
sample.names <- dataset$names
CLS <- MSIG.ReadPhenFile.2(file = input.cls) # Read phenotype file (CLS format)
cls.labels <- CLS$class.v
cls.phen <- CLS$phen
cls.list <- CLS$class.list
library(verification)
if (is.null(phenotype)) {
phen.loc <- 1
} else {
phen.loc <- match(phenotype, CLS$phen.names)
}
if (is.vector(CLS$class.list)) {
target.vec <- CLS$class.list
} else {
target.vec <- CLS$class.list[phen.loc,]
}
if (target.type == "continuous") {
target <- target.vec
} else if (target.type == "discrete") {
target <- ifelse(target.vec == target.class, 1, 0)
}
ind <- order(target)
target <- target[ind]
target.vec <- target.vec[ind]
m <- as.matrix(m)[, ind]
sample.names <- sample.names[ind]
class.v <- CLS$class.v
if (is.vector(class.v)) {
class.v <- class.v[ind]
} else {
class.v <- class.v[, ind]
}
annot <- MI <- AUC <- AUC.pval <- t.stat <- t.pval <- vector(length=N, mode="numeric")
NMI.ref <- mutual.inf.P(x = target, y = target, n.grid=100)$NMI
for (i in 1:N) {
if (N == 1) {
feature <- m
} else {
feature <- m[i,]
}
MI[i] <- signif(mutual.inf.P(target, feature, n.grid=100)$NMI/NMI.ref, 2)
if (target.type == "continuous") {
AUC[i] <- AUC.pval[i] <- t.stat[i] <- t.pval[i] <- "-"
} else if (target.type == "discrete") {
feature.norm <- (feature - min(feature))/(max(feature) - min(feature))
perf.auc <- roc.area(target, feature.norm)
AUC[i] <- ifelse(perf.auc$A < 0.5, -(1 - perf.auc$A), perf.auc$A)
AUC[i] <- signif(AUC[i], digits=2)
p.val <- perf.auc$p.value
p.val <- ifelse(p.val > 0.5, 1 - p.val, p.val)
AUC.pval[i] <- signif(p.val, digits=2)
temp <- split(feature, target)
x <- temp$'1'
y <- temp$'0'
t.stat[i] <- signif(t.test(x=x, y=y)$statistic, digits=2)
p.val <- t.test(x=x, y=y)$p.value
p.val <- ifelse(p.val > 0.5, 1 - p.val, p.val)
t.pval[i] <- signif(p.val, digits=2)
}
annot[i] <- paste(MI[i], " ", AUC[i], " (", AUC.pval[i], ") ", t.stat[i], " (", t.pval[i], ")", sep="")
}
if ((N > 1) & (sort.results == T)) {
MI.order <- order(MI, decreasing=T)
MI <- MI[MI.order]
AUC <- AUC[MI.order]
AUC.pval <- AUC.pval[MI.order]
t.stat <- t.stat[MI.order]
t.pval <= t.pval[MI.order]
m <- as.matrix(m)[MI.order,]
annot <- annot[MI.order]
}
mycol <- vector(length=512, mode = "numeric")
for (k in 1:256) mycol[k] <- rgb(255, k - 1, k - 1, maxColorValue=255)
for (k in 257:512) mycol[k] <- rgb(511 - (k - 1), 511 - (k - 1), 255, maxColorValue=255)
mycol <- rev(mycol)
cex.axis = 1
ncolors <- length(mycol)
nf <- layout(matrix(c(1, 2), 2, 1, byrow=T), 1, c(2, 10), FALSE)
par(mar = c(1, 19, 4, 11))
max.v <- max(max(target), -min(target))
V1 <- target
image(1:length(target), 1:1, as.matrix(V1), zlim = c(0, 1), col=c("lightgray", "black"), axes=FALSE, main="", sub = "", xlab= "", ylab="")
axis(2, at=1:1, labels=paste(phenotype, target.class), adj= 0.5, tick=FALSE, las = 1, cex.axis=0.70, font.axis=1, line=-1)
axis(4, at=1:1, labels=paste("NMI AUC (p-val) t-test (p-val)", sep=""), adj= 0.5, tick=FALSE,
las = 1, cex.axis=0.80, font.axis=1, line=-1)
par(mar = c(4, 19, 1, 11))
if (display.top.n > N) display.top.n <- N
if (N == 1) {
V1 <- m
V1 <- (V1 - mean(V1))/sd(V1)
} else {
V1 <- m[1:display.top.n, ]
for (i in 1:display.top.n) V1[i,] <- (V1[i,] - mean(V1[i,]))/sd(V1[i,])
}
max.v <- max(max(V1), -min(V1))
V1 <- ceiling(ncolors * (V1 - (- max.v))/(1.001*(max.v - (- max.v))))
if (N > 1) {
V1 <- apply(V1, MARGIN=2, FUN=rev)
image(1:Ns, 1:display.top.n, t(V1), zlim = c(0, ncolors), col=mycol, axes=FALSE, main="", sub = "", xlab= "", ylab="")
axis(2, at=1:display.top.n, labels=row.names(V1), adj= 0.5, tick=FALSE, las = 1, cex.axis=0.70, font.axis=1, line=-1)
axis(4, at=1:display.top.n, labels=rev(annot[1:display.top.n]), adj= 0.5, tick=FALSE, las = 1, cex.axis=0.70, font.axis=1, line=-1)
axis(1, at=1:Ns, labels=sample.names, adj= 0.5, tick=FALSE, las = 3, cex.axis=0.60, font.axis=1, line=-1)
} else {
image(1:Ns, 1:1, as.matrix(V1), zlim = c(0, ncolors), col=mycol, axes=FALSE, main="", sub = "", xlab= "", ylab="")
axis(2, at=1:1, labels=model.names, adj= 0.5, tick=FALSE, las = 1, cex.axis=0.70, font.axis=1, line=-1)
axis(4, at=1:1, labels=annot[1], adj= 0.5, tick=FALSE, las = 1, cex.axis=0.70, font.axis=1, line=-1)
axis(1, at=1:Ns, labels=sample.names, adj= 0.5, tick=FALSE, las = 3, cex.axis=0.60, font.axis=1, line=-1)
}
dev.off()
annot2 <- data.frame(cbind(MI, AUC, AUC.pval, t.stat, t.pval))
row.names(annot2) <- row.names(m)
write(paste(c("gene set ", noquote(colnames(annot2))), collapse="\t"), file = output.txt, append = F,
ncolumns = length(colnames(annot2)))
write.table(annot2, file=output.txt, append=T, quote=F, sep="\t", eol="\n", col.names=F, row.names=T)
}
OPAM.Evaluate.Results.4 <- function(
input.ds,
input.cls,
phenotype = NULL,
target.class = NULL,
target.type = "discrete",
sort.phenotype = T,
sort.results = T,
display.top.n = 20,
output.txt,
output.pdf,
cex.axis=0.7) {
pdf(file=output.pdf, height=8.5, width=11)
dataset <- MSIG.Gct2Frame(filename = input.ds) # Read gene expression dataset (GCT format)
m <- data.matrix(dataset$ds)
N <- dim(m)[1]
model.names <- dataset$row.names
model.descs <- dataset$descs
Ns <- length(as.matrix(m)[1,])
for (i in 1:N) {
if (sd(as.matrix(m)[i,]) == 0) {
val <- as.matrix(m)[i, 1]
m[i,] <- as.matrix(m)[i,] + runif(n=Ns, min= val - 0.001, max=val + 0.001) # add small noise to flat profiles
}
}
dim(m)
sample.names <- dataset$names
CLS <- MSIG.ReadPhenFile.2(file = input.cls) # Read phenotype file (CLS format)
cls.labels <- CLS$class.v
cls.phen <- CLS$phen
cls.list <- CLS$class.list
library(verification)
if (is.null(phenotype)) {
phen.loc <- 1
} else {
phen.loc <- match(phenotype, CLS$phen.names)
}
if (is.vector(CLS$class.list)) {
if (target.type == "discrete") {
target <- ifelse(CLS$class.list == target.class, 1, 0)
} else {
target <- as.numeric(CLS$class.v) + runif(length(CLS$class.v), min=0, max=10*.Machine$double.eps)
}
} else {
if (target.type == "discrete") {
target <- ifelse(CLS$class.list[phen.loc,] == target.class, 1, 0)
} else {
target <- as.numeric(CLS$class.v[phen.loc,]) + runif(length(CLS$class.v[phen.loc,]), min=0, max=10*.Machine$double.eps)
}
}
print(paste("target:", target))
class.v <- CLS$class.v
if (sort.phenotype == T) {
ind <- order(target)
target <- target[ind]
m <- as.matrix(m)[, ind]
sample.names <- sample.names[ind]
if (is.vector(class.v)) {
class.v <- class.v[ind]
} else {
class.v <- class.v[, ind]
}
}
annot <- MI <- AUC <- AUC.pval <- t.stat <- t.pval <- vector(length=N, mode="numeric")
NMI.ref <- mutual.inf.P(x = target, y = target, n.grid=100)$NMI
for (i in 1:N) {
if (N == 1) {
feature <- m
} else {
feature <- m[i,]
}
MI[i] <- signif(mutual.inf.P(target, feature, n.grid=100)$NMI/NMI.ref, 4)
if (target.type == "continuous") {
AUC[i] <- AUC.pval[i] <- t.stat[i] <- t.pval[i] <- "-"
} else if (target.type == "discrete") {
feature.norm <- (feature - min(feature))/(max(feature) - min(feature))
perf.auc <- roc.area(target, feature.norm)
AUC[i] <- ifelse(perf.auc$A < 0.5, -(1 - perf.auc$A), perf.auc$A)
AUC[i] <- signif(AUC[i], digits=4)
p.val <- perf.auc$p.value
p.val <- ifelse(p.val > 0.5, 1 - p.val, p.val)
AUC.pval[i] <- signif(p.val, digits=4)
temp <- split(feature, target)
x <- temp$'1'
y <- temp$'0'
t.stat[i] <- signif(t.test(x=x, y=y)$statistic, digits=4)
p.val <- t.test(x=x, y=y)$p.value
p.val <- ifelse(p.val > 0.5, 1 - p.val, p.val)
t.pval[i] <- signif(p.val, digits=4)
}
annot[i] <- paste(MI[i], " ", AUC[i], " (", AUC.pval[i], ") ", t.stat[i], " (", t.pval[i], ")", sep="")
}
if ((N > 1) & (sort.results == T)) {
MI.order <- order(MI, decreasing=T)
MI <- MI[MI.order]
AUC <- AUC[MI.order]
AUC.pval <- AUC.pval[MI.order]
t.stat <- t.stat[MI.order]
t.pval <= t.pval[MI.order]
m <- as.matrix(m)[MI.order,]
annot <- annot[MI.order]
}
mycol <- vector(length=512, mode = "numeric")
for (k in 1:256) mycol[k] <- rgb(255, k - 1, k - 1, maxColorValue=255)
for (k in 257:512) mycol[k] <- rgb(511 - (k - 1), 511 - (k - 1), 255, maxColorValue=255)
mycol <- rev(mycol)
ncolors <- length(mycol)
nf <- layout(matrix(c(1, 2), 2, 1, byrow=T), 1, c(4, 10), FALSE)
par(mar = c(1, 18, 3, 11))
max.v <- max(max(target), -min(target))
V1 <- target
image(1:length(target), 1:1, as.matrix(V1), zlim = c(0, 1), col=c("yellow", "purple"), axes=FALSE, main="", sub = "", xlab= "", ylab="")
axis(2, at=1:1, labels=paste(phenotype, target.class), adj= 0.5, tick=FALSE, las = 1, cex.axis=cex.axis, font.axis=1, line=-1)
axis(4, at=1:1, labels=paste("NMI AUC (p-val) t-test (p-val)", sep=""), adj= 0.5, tick=FALSE,
las = 1, cex.axis=cex.axis, font.axis=1, line=-1)
par(mar = c(5, 18, 1, 11))
if (display.top.n > N) display.top.n <- N
if (N == 1) {
V1 <- m
V1 <- (V1 - mean(V1))/sd(V1)
} else {
V1 <- m[1:display.top.n, ]
for (i in 1:display.top.n) V1[i,] <- (V1[i,] - mean(V1[i,]))/sd(V1[i,])
}
max.v <- max(max(V1), -min(V1))
V1 <- ceiling(ncolors * (V1 - (- max.v))/(1.001*(max.v - (- max.v))))
if (N > 1) {
V1 <- apply(V1, MARGIN=2, FUN=rev)
image(1:Ns, 1:display.top.n, t(V1), zlim = c(0, ncolors), col=mycol, axes=FALSE, main="", sub = "", xlab= "", ylab="")
axis(2, at=1:display.top.n, labels=row.names(V1), adj= 0.5, tick=FALSE, las = 1, cex.axis=cex.axis, font.axis=1, line=-1)
axis(4, at=1:display.top.n, labels=rev(annot[1:display.top.n]), adj= 0.5, tick=FALSE, las = 1, cex.axis=cex.axis, font.axis=1, line=-1)
axis(1, at=1:Ns, labels=sample.names, adj= 0.5, tick=FALSE, las = 3, cex.axis=cex.axis, font.axis=1, line=-1)
} else {
image(1:Ns, 1:1, as.matrix(V1), zlim = c(0, ncolors), col=mycol, axes=FALSE, main="", sub = "", xlab= "", ylab="")
axis(2, at=1:1, labels=model.names, adj= 0.5, tick=FALSE, las = 1, cex.axis=cex.axis, font.axis=1, line=-1)
axis(4, at=1:1, labels=annot[1], adj= 0.5, tick=FALSE, las = 1, cex.axis=cex.axis, font.axis=1, line=-1)
axis(1, at=1:Ns, labels=sample.names, adj= 0.5, tick=FALSE, las = 3, cex.axis=cex.axis, font.axis=1, line=-1)
}
dev.off()
annot2 <- data.frame(cbind(MI, AUC, AUC.pval, t.stat, t.pval))
row.names(annot2) <- row.names(m)
write(paste(c("gene set ", noquote(colnames(annot2))), collapse="\t"), file = output.txt, append = F,
ncolumns = length(colnames(annot2)))
write.table(annot2, file=output.txt, append=T, quote=F, sep="\t", eol="\n", col.names=F, row.names=T)
}
OPAM.Evaluate.Results.Yan <- function(
# this version has to option of using the Kernel method, its adjusted variant and the knn approach from the parmigene package
input.ds,
input.cls,
phenotype = NULL,
target.class = NULL,
target.type = "discrete",
MI.method = "kernel_I", # kernel_I, kernel_Adj or knn
sort.results = T,
display.top.n = 20,
output.txt,
output.pdf) {
library(parmigene)
pdf(file=output.pdf, height=8.5, width=11)
dataset <- MSIG.Gct2Frame(filename = input.ds) # Read gene expression dataset (GCT format)
m <- data.matrix(dataset$ds)
N <- dim(m)[1]
model.names <- dataset$row.names
model.descs <- dataset$descs
Ns <- length(as.matrix(m)[1,])
for (i in 1:N) {
if (sd(as.matrix(m)[i,]) == 0) {
val <- as.matrix(m)[i, 1]
m[i,] <- as.matrix(m)[i,] + runif(n=Ns, min= val - 0.001, max=val + 0.001) # add small noise to flat profiles
}
}
dim(m)
sample.names <- dataset$names
CLS <- MSIG.ReadPhenFile.2(file = input.cls) # Read phenotype file (CLS format)
cls.labels <- CLS$class.v
cls.phen <- CLS$phen
cls.list <- CLS$class.list
library(verification)
if (is.null(phenotype)) {
phen.loc <- 1
} else {
phen.loc <- match(phenotype, CLS$phen.names)
}
if (is.vector(CLS$class.list)) {
target.vec <- CLS$class.list
} else {
target.vec <- CLS$class.list[phen.loc,]
}
if (target.type == "continuous") {
target <- target.vec
} else if (target.type == "discrete") {
target <- ifelse(target.vec == target.class, 1, 0)
}
ind <- order(target)
target <- target[ind]
target.vec <- target.vec[ind]
m <- as.matrix(m)[, ind]
sample.names <- sample.names[ind]
class.v <- CLS$class.v
if (is.vector(class.v)) {
class.v <- class.v[ind]
} else {
class.v <- class.v[, ind]
}
annot <- MI <- AUC <- AUC.pval <- t.stat <- t.pval <- vector(length=N, mode="numeric")
if (MI.method == "kernel_I") {
NMI.ref <- NMI(x=target, y=target, n.grid=50, make.plot=F)$NMI
} else if (MI.method == "kernel_Adj") {
rho <- cor(target, target)
adj <- log(1/(abs(rho) + 0.25)) + 0.75
delta.param <- 1
delta <- c(delta.param * adj * bcv(target), delta.param * adj * bcv(target))
NMI.ref <- NMI(x=target, y=target, n.grid=50, delta = delta, make.plot=F)$NMI
} else { # knn method
NMI.ref <- cor(target, target) * knnmi(target, target, k=3, noise=1e-12)/NMI(x=target, y=target, n.grid=50, make.plot=F)$HXY
}
for (i in 1:N) {
if (N == 1) {
feature <- m
} else {
feature <- m[i,]
}
if (MI.method == "kernel_I") {
NMI.val <- NMI(x=target, y=feature, n.grid=50, make.plot=F)$NMI # MI according to our kernel method
} else if (MI.method == "kernel_Adj") {
rho <- cor(target, feature)
adj <- log(1/(abs(rho) + 0.25)) + 0.75
delta.param <- 1
delta <- c(delta.param * adj * bcv(target), delta.param * adj * bcv(feature))
NMI.val <- NMI(x=target, y=feature, n.grid=50, delta = delta, make.plot=F)$NMI # MI according to our kernel method
} else { # knn method
NMI.val<- cor(target, feature) * knnmi(target, feature, k=3, noise=1e-12)/NMI(x=target, y=feature, n.grid=50, make.plot=F)$HXY
}
MI[i] <- signif(NMI.val/NMI.ref, 4)
if (target.type == "continuous") {
AUC[i] <- AUC.pval[i] <- t.stat[i] <- t.pval[i] <- "-"
} else if (target.type == "discrete") {
feature.norm <- (feature - min(feature))/(max(feature) - min(feature))
perf.auc <- roc.area(target, feature.norm)
AUC[i] <- ifelse(perf.auc$A < 0.5, -(1 - perf.auc$A), perf.auc$A)
AUC[i] <- signif(AUC[i], digits=4)
p.val <- perf.auc$p.value
p.val <- ifelse(p.val > 0.5, 1 - p.val, p.val)
AUC.pval[i] <- signif(p.val, digits=4)
temp <- split(feature, target)
x <- temp$'1'
y <- temp$'0'
t.stat[i] <- signif(t.test(x=x, y=y)$statistic, digits=4)
p.val <- t.test(x=x, y=y)$p.value
p.val <- ifelse(p.val > 0.5, 1 - p.val, p.val)
t.pval[i] <- signif(p.val, digits=4)
}
annot[i] <- paste(MI[i], " ", AUC[i], " (", AUC.pval[i], ") ", t.stat[i], " (", t.pval[i], ")", sep="")
}
if ((N > 1) & (sort.results == T)) {
MI.order <- order(MI, decreasing=T)
MI <- MI[MI.order]
AUC <- AUC[MI.order]
AUC.pval <- AUC.pval[MI.order]
t.stat <- t.stat[MI.order]
t.pval <= t.pval[MI.order]
m <- as.matrix(m)[MI.order,]
annot <- annot[MI.order]
}
mycol <- vector(length=512, mode = "numeric")
for (k in 1:256) mycol[k] <- rgb(255, k - 1, k - 1, maxColorValue=255)
for (k in 257:512) mycol[k] <- rgb(511 - (k - 1), 511 - (k - 1), 255, maxColorValue=255)
mycol <- rev(mycol)
cex.axis = 1
ncolors <- length(mycol)
nf <- layout(matrix(c(1, 2), 2, 1, byrow=T), 1, c(4, 10), FALSE)
par(mar = c(1, 15, 5, 15))
max.v <- max(max(target), -min(target))
V1 <- target
image(1:length(target), 1:1, as.matrix(V1), zlim = c(0, 1), col=c("yellow", "purple"), axes=FALSE, main="", sub = "", xlab= "", ylab="")
axis(2, at=1:1, labels=paste(phenotype, target.class), adj= 0.5, tick=FALSE, las = 1, cex.axis=0.70, font.axis=1, line=-1)
axis(4, at=1:1, labels=paste("NMI AUC (p-val) t-test (p-val)", sep=""), adj= 0.5, tick=FALSE,
las = 1, cex.axis=0.80, font.axis=1, line=-1)
par(mar = c(5, 15, 1, 15))
if (display.top.n > N) display.top.n <- N
if (N == 1) {
V1 <- m
V1 <- (V1 - mean(V1))/sd(V1)
} else {
V1 <- m[1:display.top.n, ]
for (i in 1:display.top.n) V1[i,] <- (V1[i,] - mean(V1[i,]))/sd(V1[i,])
}
max.v <- max(max(V1), -min(V1))
V1 <- ceiling(ncolors * (V1 - (- max.v))/(1.001*(max.v - (- max.v))))
if (N > 1) {
V1 <- apply(V1, MARGIN=2, FUN=rev)
image(1:Ns, 1:display.top.n, t(V1), zlim = c(0, ncolors), col=mycol, axes=FALSE, main="", sub = "", xlab= "", ylab="")
axis(2, at=1:display.top.n, labels=row.names(V1), adj= 0.5, tick=FALSE, las = 1, cex.axis=0.70, font.axis=1, line=-1)
axis(4, at=1:display.top.n, labels=rev(annot[1:display.top.n]), adj= 0.5, tick=FALSE, las = 1, cex.axis=0.70, font.axis=1, line=-1)
axis(1, at=1:Ns, labels=sample.names, adj= 0.5, tick=FALSE, las = 3, cex.axis=0.60, font.axis=1, line=-1)
} else {
image(1:Ns, 1:1, as.matrix(V1), zlim = c(0, ncolors), col=mycol, axes=FALSE, main="", sub = "", xlab= "", ylab="")
axis(2, at=1:1, labels=model.names, adj= 0.5, tick=FALSE, las = 1, cex.axis=0.70, font.axis=1, line=-1)
axis(4, at=1:1, labels=annot[1], adj= 0.5, tick=FALSE, las = 1, cex.axis=0.70, font.axis=1, line=-1)
axis(1, at=1:Ns, labels=sample.names, adj= 0.5, tick=FALSE, las = 3, cex.axis=0.60, font.axis=1, line=-1)
}
dev.off()
annot2 <- data.frame(cbind(MI, AUC, AUC.pval, t.stat, t.pval))
row.names(annot2) <- row.names(m)
write(paste(c("gene set ", noquote(colnames(annot2))), collapse="\t"), file = output.txt, append = F,
ncolumns = length(colnames(annot2)))
write.table(annot2, file=output.txt, append=T, quote=F, sep="\t", eol="\n", col.names=F, row.names=T)
}
NMI <- function(x, y, n.grid=100, make.plot=F, delta = c(bcv(x), bcv(y))) {
# for definitions of mutual information and the universal metric (NMI) see the
# definition of "Mutual Information" in wikipedia and Thomas and Cover's book
kde2d.xy <- kde2d(x, y, n = n.grid, h = delta)
X <- kde2d.xy$x
Y <- kde2d.xy$y
PXY <- kde2d.xy$z + .Machine$double.eps
PXY <- PXY/sum(PXY)
PX <- rowSums(PXY)
PX <- PX/sum(PX)
HX <- -sum(PX * log2(PX))
PY <- colSums(PXY)
PY <- PY/sum(PY)
HY <- -sum(PY * log2(PY))
HXY <- - sum(PXY * log2(PXY))
NMI <- sign(cor(x, y)) * ((HX + HY)/HXY - 1) # use pearson correlation the get the sign (directionality)
PX <- matrix(PX, nrow=n.grid, ncol=n.grid)
PY <- matrix(PY, byrow = TRUE, nrow=n.grid, ncol=n.grid)
MI <- sum(PXY * log2(PXY/(PX*PY)))
if (make.plot != F) {
mycol <- vector(length=512, mode = "numeric")
for (k in 1:256) mycol[k] <- rgb(255, k - 1, k - 1, maxColorValue=255)
for (k in 257:512) mycol[k] <- rgb(511 - (k - 1), 511 - (k - 1), 255, maxColorValue=255)
mycol <- rev(mycol)
quartz(width=12, height=8)
nf <- layout(matrix(c(1,2,3,4), 2, 2, byrow=T), c(1,1), c(1,1), TRUE)
plot(X, PX[,1], type="l", main="X")
plot(Y, PY[1,], type="l", main="Y")
MIXY <- PXY * log2(PXY/(PX*PY))
image(PXY, main = paste("P(x, y)", " pearson cor=", signif(cor(x, y), 3)), col=mycol)
sub <- ifelse(make.plot != T, make.plot, " ")
image(MIXY, main=paste("MI:", signif(MI, 3)), col=mycol, sub=sub)
}
return(list(NMI=NMI, MI=MI, HXY=HXY, HX=HX, HY=HY))
}
|
/project_scripts/pablo_NMF_scripts/OPAM.library.v7.R
|
no_license
|
lhogstrom/jailbird
|
R
| false
| false
| 359,323
|
r
|
library(CvM2SL2Test)
library(MASS)
library(verification)
OPAM.project.dataset.RNAi <- function(
input.ds,
output.ds,
gene.set.databases,
gene.set.selection = "ALL", # "ALL" or list with names of gene sets
sample.norm.type = "rank", # "rank", "log" or "log.rank"
weight = 0.25,
statistic = "area.under.RES",
output.score.type = "ES", # "ES" or "NES"
nperm = 200, # number of random permutations for NES case
combine.mode = "combine.off", # "combine.off" do not combine *_UP and *_DN versions in
# a single score. "combine.replace" combine *_UP and
# *_DN versions in a single score that replaces the individual
# *_UP and *_DN versions. "combine.add" combine *_UP and
# *_DN versions in a single score and add it but keeping
# the individual *_UP and *_DN versions.
min.overlap = 1,
correl.type = "rank") # "rank", "z.score", "symm.rank"
{ #----------------------------------------------------------------------------------------
# Load libraries
library(gtools)
library(verification)
library(RColorBrewer)
# Read input dataset
dataset <- MSIG.Gct2Frame(filename = input.ds) # Read gene expression dataset (GCT format)
m <- data.matrix(dataset$ds)
# gene.names <- dataset$row.names # in Ataris or hairpin gct files the gene symbols are in the descs column
gene.names <- dataset$descs
gene.descs <- dataset$descs
sample.names <- dataset$names
Ns <- length(m[1,])
Ng <- length(m[,1])
temp <- strsplit(input.ds, split="/") # Extract input file name
s <- length(temp[[1]])
input.file.name <- temp[[1]][s]
temp <- strsplit(input.file.name, split=".gct")
input.file.prefix <- temp[[1]][1]
# Sample normalization
if (sample.norm.type == "rank") {
for (j in 1:Ns) { # column rank normalization
m[,j] <- rank(m[,j], ties.method = "average")
}
m <- 10000*m/Ng
} else if (sample.norm.type == "log.rank") {
for (j in 1:Ns) { # column rank normalization
m[,j] <- rank(m[,j], ties.method = "average")
}
m <- log(10000*m/Ng + exp(1))
} else if (sample.norm.type == "log") {
m[m < 1] <- 1
m <- log(m + exp(1))
}
# Read gene set databases
max.G <- 0
max.N <- 0
for (gsdb in gene.set.databases) {
GSDB <- Read.GeneSets.db(gsdb, thres.min = 2, thres.max = 2000, gene.names = NULL)
max.G <- max(max.G, max(GSDB$size.G))
max.N <- max.N + GSDB$N.gs
}
N.gs <- 0
gs <- matrix("null", nrow=max.N, ncol=max.G)
gs.names <- vector(length=max.N, mode="character")
gs.descs <- vector(length=max.N, mode="character")
size.G <- vector(length=max.N, mode="numeric")
start <- 1
for (gsdb in gene.set.databases) {
GSDB <- Read.GeneSets.db(gsdb, thres.min = 2, thres.max = 2000, gene.names = NULL)
N.gs <- GSDB$N.gs
gs.names[start:(start + N.gs - 1)] <- GSDB$gs.names
gs.descs[start:(start + N.gs - 1)] <- GSDB$gs.desc
size.G[start:(start + N.gs - 1)] <- GSDB$size.G
gs[start:(start + N.gs - 1), 1:max(GSDB$size.G)] <- GSDB$gs[1:N.gs, 1:max(GSDB$size.G)]
start <- start + N.gs
}
N.gs <- max.N
# Select desired gene sets
if (gene.set.selection[1] != "ALL") {
locs <- match(gene.set.selection, gs.names)
N.gs <- sum(!is.na(locs))
if(N.gs > 1) {
gs <- gs[locs,]
} else {
gs <- t(as.matrix(gs[locs,])) # Force vector to matrix if only one gene set specified
}
gs.names <- gs.names[locs]
gs.descs <- gs.descs[locs]
size.G <- size.G[locs]
}
# Loop over gene sets
score.matrix <- matrix(0, nrow=N.gs, ncol=Ns)
for (gs.i in 1:N.gs) {
#browser()
gene.set <- gs[gs.i, 1:size.G[gs.i]]
gene.overlap <- intersect(gene.set, gene.names)
print(paste(gs.i, "gene set:", gs.names[gs.i], " overlap=", length(gene.overlap)))
if (length(gene.overlap) < min.overlap) {
score.matrix[gs.i, ] <- rep(NA, Ns)
next
} else {
gene.set.locs <- match(gene.overlap, gene.set)
gene.names.locs <- match(gene.overlap, gene.names)
msig <- m[gene.names.locs,]
msig.names <- gene.names[gene.names.locs]
if (output.score.type == "ES") {
OPAM <- OPAM.Projection.RNAi(data.array = m, gene.names = gene.names, n.cols = Ns,
n.rows = Ng, weight = weight, statistic = statistic,
gene.set = gene.overlap, nperm = 1, correl.type = correl.type)
score.matrix[gs.i,] <- OPAM$ES.vector
} else if (output.score.type == "NES") {
OPAM <- OPAM.Projection.RNAi(data.array = m, gene.names = gene.names, n.cols = Ns,
n.rows = Ng, weight = weight, statistic = statistic,
gene.set = gene.overlap, nperm = nperm, correl.type = correl.type)
score.matrix[gs.i,] <- OPAM$NES.vector
}
}
}
locs <- !is.na(score.matrix[,1])
print(paste("N.gs before overlap prunning:", N.gs))
N.gs <- sum(locs)
print(paste("N.gs after overlap prunning:", N.gs))
score.matrix <- score.matrix[locs,]
gs.names <- gs.names[locs]
gs.descs <- gs.descs[locs]
initial.up.entries <- 0
final.up.entries <- 0
initial.dn.entries <- 0
final.dn.entries <- 0
combined.entries <- 0
other.entries <- 0
if (combine.mode == "combine.off") {
score.matrix.2 <- score.matrix
gs.names.2 <- gs.names
gs.descs.2 <- gs.descs
} else if ((combine.mode == "combine.replace") || (combine.mode == "combine.add")) {
score.matrix.2 <- NULL
gs.names.2 <- NULL
gs.descs.2 <- NULL
k <- 1
for (i in 1:N.gs) {
temp <- strsplit(gs.names[i], split="_")
body <- paste(temp[[1]][seq(1, length(temp[[1]]) -1)], collapse="_")
suffix <- tail(temp[[1]], 1)
print(paste("i:", i, "gene set:", gs.names[i], "body:", body, "suffix:", suffix))
if (suffix == "UP") { # This is an "UP" gene set
initial.up.entries <- initial.up.entries + 1
target <- paste(body, "DN", sep="_")
loc <- match(target, gs.names)
if (!is.na(loc)) { # found corresponding "DN" gene set: create combined entry
score <- score.matrix[i,] - score.matrix[loc,]
score.matrix.2 <- rbind(score.matrix.2, score)
gs.names.2 <- c(gs.names.2, body)
gs.descs.2 <- c(gs.descs.2, paste(gs.descs[i], "combined UP & DN"))
combined.entries <- combined.entries + 1
if (combine.mode == "combine.add") { # also add the "UP entry
score.matrix.2 <- rbind(score.matrix.2, score.matrix[i,])
gs.names.2 <- c(gs.names.2, gs.names[i])
gs.descs.2 <- c(gs.descs.2, gs.descs[i])
final.up.entries <- final.up.entries + 1
}
} else { # did not find corresponding "DN" gene set: create "UP" entry
score.matrix.2 <- rbind(score.matrix.2, score.matrix[i,])
gs.names.2 <- c(gs.names.2, gs.names[i])
gs.descs.2 <- c(gs.descs.2, gs.descs[i])
final.up.entries <- final.up.entries + 1
}
} else if (suffix == "DN") { # This is a "DN" gene set
initial.dn.entries <- initial.dn.entries + 1
target <- paste(body, "UP", sep="_")
loc <- match(target, gs.names)
if (is.na(loc)) { # did not find corresponding "UP" gene set: create "DN" entry
score.matrix.2 <- rbind(score.matrix.2, score.matrix[i,])
gs.names.2 <- c(gs.names.2, gs.names[i])
gs.descs.2 <- c(gs.descs.2, gs.descs[i])
final.dn.entries <- final.dn.entries + 1
} else { # it found corresponding "UP" gene set
if (combine.mode == "combine.add") { # create "DN" entry
score.matrix.2 <- rbind(score.matrix.2, score.matrix[i,])
gs.names.2 <- c(gs.names.2, gs.names[i])
gs.descs.2 <- c(gs.descs.2, gs.descs[i])
final.dn.entries <- final.dn.entries + 1
}
}
} else { # This is neither "UP nor "DN" gene set: create individual entry
score.matrix.2 <- rbind(score.matrix.2, score.matrix[i,])
gs.names.2 <- c(gs.names.2, gs.names[i])
gs.descs.2 <- c(gs.descs.2, gs.descs[i])
other.entries <- other.entries + 1
}
} # end for loop over gene sets
print(paste("initial.up.entries:", initial.up.entries))
print(paste("final.up.entries:", final.up.entries))
print(paste("initial.dn.entries:", initial.dn.entries))
print(paste("final.dn.entries:", final.dn.entries))
print(paste("other.entries:", other.entries))
print(paste("combined.entries:", combined.entries))
print(paste("total entries:", length(score.matrix.2[,1])))
}
V.GCT <- data.frame(score.matrix.2)
names(V.GCT) <- sample.names
row.names(V.GCT) <- gs.names.2
write.gct(gct.data.frame = V.GCT, descs = gs.descs.2, filename = output.ds)
} # end of OPAM.project.RNAi.dataset
OPAM.project.dataset.4 <- function(
input.ds,
output.ds,
gene.set.databases,
gene.set.selection = "ALL", # "ALL" or list with names of gene sets
sample.norm.type = "rank", # "rank", "log" or "log.rank"
weight = 0.25,
statistic = "area.under.RES",
output.score.type = "ES", # "ES" or "NES"
nperm = 200, # number of random permutations for NES case
combine.mode = "combine.off", # "combine.off" do not combine *_UP and *_DN versions in
# a single score. "combine.replace" combine *_UP and
# *_DN versions in a single score that replaces the individual
# *_UP and *_DN versions. "combine.add" combine *_UP and
# *_DN versions in a single score and add it but keeping
# the individual *_UP and *_DN versions.
min.overlap = 1,
correl.type = "rank") # "rank", "z.score", "symm.rank"
{ #----------------------------------------------------------------------------------------
# Load libraries
library(gtools)
library(verification)
library(RColorBrewer)
# Read input dataset
dataset <- MSIG.Gct2Frame(filename = input.ds) # Read gene expression dataset (GCT format)
m <- data.matrix(dataset$ds)
gene.names <- dataset$row.names
gene.descs <- dataset$descs
sample.names <- dataset$names
Ns <- length(m[1,])
Ng <- length(m[,1])
temp <- strsplit(input.ds, split="/") # Extract input file name
s <- length(temp[[1]])
input.file.name <- temp[[1]][s]
temp <- strsplit(input.file.name, split=".gct")
input.file.prefix <- temp[[1]][1]
# Sample normalization
if (sample.norm.type == "rank") {
for (j in 1:Ns) { # column rank normalization
m[,j] <- rank(m[,j], ties.method = "average")
}
m <- 10000*m/Ng
} else if (sample.norm.type == "log.rank") {
for (j in 1:Ns) { # column rank normalization
m[,j] <- rank(m[,j], ties.method = "average")
}
m <- log(10000*m/Ng + exp(1))
} else if (sample.norm.type == "log") {
m[m < 1] <- 1
m <- log(m + exp(1))
}
# Read gene set databases
max.G <- 0
max.N <- 0
for (gsdb in gene.set.databases) {
GSDB <- Read.GeneSets.db(gsdb, thres.min = 2, thres.max = 2000, gene.names = NULL)
max.G <- max(max.G, max(GSDB$size.G))
max.N <- max.N + GSDB$N.gs
}
N.gs <- 0
gs <- matrix("null", nrow=max.N, ncol=max.G)
gs.names <- vector(length=max.N, mode="character")
gs.descs <- vector(length=max.N, mode="character")
size.G <- vector(length=max.N, mode="numeric")
start <- 1
for (gsdb in gene.set.databases) {
GSDB <- Read.GeneSets.db(gsdb, thres.min = 2, thres.max = 2000, gene.names = NULL)
N.gs <- GSDB$N.gs
gs.names[start:(start + N.gs - 1)] <- GSDB$gs.names
gs.descs[start:(start + N.gs - 1)] <- GSDB$gs.desc
size.G[start:(start + N.gs - 1)] <- GSDB$size.G
gs[start:(start + N.gs - 1), 1:max(GSDB$size.G)] <- GSDB$gs[1:N.gs, 1:max(GSDB$size.G)]
start <- start + N.gs
}
N.gs <- max.N
# Select desired gene sets
if (gene.set.selection[1] != "ALL") {
locs <- match(gene.set.selection, gs.names)
N.gs <- sum(!is.na(locs))
if(N.gs > 1) {
gs <- gs[locs,]
} else {
gs <- t(as.matrix(gs[locs,])) # Force vector to matrix if only one gene set specified
}
gs.names <- gs.names[locs]
gs.descs <- gs.descs[locs]
size.G <- size.G[locs]
}
# Loop over gene sets
score.matrix <- matrix(0, nrow=N.gs, ncol=Ns)
for (gs.i in 1:N.gs) {
#browser()
gene.set <- gs[gs.i, 1:size.G[gs.i]]
gene.overlap <- intersect(gene.set, gene.names)
print(paste(gs.i, "gene set:", gs.names[gs.i], " overlap=", length(gene.overlap)))
if (length(gene.overlap) < min.overlap) {
score.matrix[gs.i, ] <- rep(NA, Ns)
next
} else {
gene.set.locs <- match(gene.overlap, gene.set)
gene.names.locs <- match(gene.overlap, gene.names)
msig <- m[gene.names.locs,]
msig.names <- gene.names[gene.names.locs]
if (output.score.type == "ES") {
OPAM <- OPAM.Projection.3(data.array = m, gene.names = gene.names, n.cols = Ns,
n.rows = Ng, weight = weight, statistic = statistic,
gene.set = gene.overlap, nperm = 1, correl.type = correl.type)
score.matrix[gs.i,] <- OPAM$ES.vector
} else if (output.score.type == "NES") {
OPAM <- OPAM.Projection.3(data.array = m, gene.names = gene.names, n.cols = Ns,
n.rows = Ng, weight = weight, statistic = statistic,
gene.set = gene.overlap, nperm = nperm, correl.type = correl.type)
score.matrix[gs.i,] <- OPAM$NES.vector
}
}
}
locs <- !is.na(score.matrix[,1])
print(paste("N.gs before overlap prunning:", N.gs))
N.gs <- sum(locs)
print(paste("N.gs after overlap prunning:", N.gs))
score.matrix <- score.matrix[locs,]
gs.names <- gs.names[locs]
gs.descs <- gs.descs[locs]
initial.up.entries <- 0
final.up.entries <- 0
initial.dn.entries <- 0
final.dn.entries <- 0
combined.entries <- 0
other.entries <- 0
if (combine.mode == "combine.off") {
score.matrix.2 <- score.matrix
gs.names.2 <- gs.names
gs.descs.2 <- gs.descs
} else if ((combine.mode == "combine.replace") || (combine.mode == "combine.add")) {
score.matrix.2 <- NULL
gs.names.2 <- NULL
gs.descs.2 <- NULL
k <- 1
for (i in 1:N.gs) {
temp <- strsplit(gs.names[i], split="_")
body <- paste(temp[[1]][seq(1, length(temp[[1]]) -1)], collapse="_")
suffix <- tail(temp[[1]], 1)
print(paste("i:", i, "gene set:", gs.names[i], "body:", body, "suffix:", suffix))
if (suffix == "UP") { # This is an "UP" gene set
initial.up.entries <- initial.up.entries + 1
target <- paste(body, "DN", sep="_")
loc <- match(target, gs.names)
if (!is.na(loc)) { # found corresponding "DN" gene set: create combined entry
score <- score.matrix[i,] - score.matrix[loc,]
score.matrix.2 <- rbind(score.matrix.2, score)
gs.names.2 <- c(gs.names.2, body)
gs.descs.2 <- c(gs.descs.2, paste(gs.descs[i], "combined UP & DN"))
combined.entries <- combined.entries + 1
if (combine.mode == "combine.add") { # also add the "UP entry
score.matrix.2 <- rbind(score.matrix.2, score.matrix[i,])
gs.names.2 <- c(gs.names.2, gs.names[i])
gs.descs.2 <- c(gs.descs.2, gs.descs[i])
final.up.entries <- final.up.entries + 1
}
} else { # did not find corresponding "DN" gene set: create "UP" entry
score.matrix.2 <- rbind(score.matrix.2, score.matrix[i,])
gs.names.2 <- c(gs.names.2, gs.names[i])
gs.descs.2 <- c(gs.descs.2, gs.descs[i])
final.up.entries <- final.up.entries + 1
}
} else if (suffix == "DN") { # This is a "DN" gene set
initial.dn.entries <- initial.dn.entries + 1
target <- paste(body, "UP", sep="_")
loc <- match(target, gs.names)
if (is.na(loc)) { # did not find corresponding "UP" gene set: create "DN" entry
score.matrix.2 <- rbind(score.matrix.2, score.matrix[i,])
gs.names.2 <- c(gs.names.2, gs.names[i])
gs.descs.2 <- c(gs.descs.2, gs.descs[i])
final.dn.entries <- final.dn.entries + 1
} else { # it found corresponding "UP" gene set
if (combine.mode == "combine.add") { # create "DN" entry
score.matrix.2 <- rbind(score.matrix.2, score.matrix[i,])
gs.names.2 <- c(gs.names.2, gs.names[i])
gs.descs.2 <- c(gs.descs.2, gs.descs[i])
final.dn.entries <- final.dn.entries + 1
}
}
} else { # This is neither "UP nor "DN" gene set: create individual entry
score.matrix.2 <- rbind(score.matrix.2, score.matrix[i,])
gs.names.2 <- c(gs.names.2, gs.names[i])
gs.descs.2 <- c(gs.descs.2, gs.descs[i])
other.entries <- other.entries + 1
}
} # end for loop over gene sets
print(paste("initial.up.entries:", initial.up.entries))
print(paste("final.up.entries:", final.up.entries))
print(paste("initial.dn.entries:", initial.dn.entries))
print(paste("final.dn.entries:", final.dn.entries))
print(paste("other.entries:", other.entries))
print(paste("combined.entries:", combined.entries))
print(paste("total entries:", length(score.matrix.2[,1])))
}
V.GCT <- data.frame(score.matrix.2)
names(V.GCT) <- sample.names
row.names(V.GCT) <- gs.names.2
write.gct(gct.data.frame = V.GCT, descs = gs.descs.2, filename = output.ds)
} # end of OPAM.project.dataset.4
OPAM.project.dataset.5 <- function(
input.ds,
output.ds,
gene.set.databases,
gene.set.selection = "ALL", # "ALL" or list with names of gene sets
sample.norm.type = "rank", # "rank", "log" or "log.rank"
weight = 0.25,
statistic = "area.under.RES",
output.score.type = "ES", # "ES" or "NES"
nperm = 200, # number of random permutations for NES case
combine.mode = "combine.off", # "combine.off" do not combine *_UP and *_DN versions in
# a single score. "combine.replace" combine *_UP and
# *_DN versions in a single score that replaces the individual
# *_UP and *_DN versions. "combine.add" combine *_UP and
# *_DN versions in a single score and add it but keeping
# the individual *_UP and *_DN versions.
min.overlap = 1,
correl.type = "rank") # "rank", "z.score", "symm.rank"
{ #----------------------------------------------------------------------------------------
# Load libraries
library(gtools)
library(verification)
library(RColorBrewer)
# Read input dataset
dataset <- MSIG.Gct2Frame(filename = input.ds) # Read gene expression dataset (GCT format)
m <- data.matrix(dataset$ds)
gene.names <- dataset$row.names
gene.descs <- dataset$descs
sample.names <- dataset$names
Ns <- length(m[1,])
Ng <- length(m[,1])
temp <- strsplit(input.ds, split="/") # Extract input file name
s <- length(temp[[1]])
input.file.name <- temp[[1]][s]
temp <- strsplit(input.file.name, split=".gct")
input.file.prefix <- temp[[1]][1]
# Sample normalization
if (sample.norm.type == "rank") {
for (j in 1:Ns) { # column rank normalization
m[,j] <- rank(m[,j], ties.method = "average")
}
m <- 10000*m/Ng
} else if (sample.norm.type == "log.rank") {
for (j in 1:Ns) { # column rank normalization
m[,j] <- rank(m[,j], ties.method = "average")
}
m <- log(10000*m/Ng + exp(1))
} else if (sample.norm.type == "log") {
m[m < 1] <- 1
m <- log(m + exp(1))
}
# Read gene set databases
max.G <- 0
max.N <- 0
for (gsdb in gene.set.databases) {
GSDB <- Read.GeneSets.db(gsdb, thres.min = 2, thres.max = 2000, gene.names = NULL)
max.G <- max(max.G, max(GSDB$size.G))
max.N <- max.N + GSDB$N.gs
}
N.gs <- 0
gs <- matrix("null", nrow=max.N, ncol=max.G)
gs.names <- vector(length=max.N, mode="character")
gs.descs <- vector(length=max.N, mode="character")
size.G <- vector(length=max.N, mode="numeric")
start <- 1
for (gsdb in gene.set.databases) {
GSDB <- Read.GeneSets.db(gsdb, thres.min = 2, thres.max = 2000, gene.names = NULL)
N.gs <- GSDB$N.gs
gs.names[start:(start + N.gs - 1)] <- GSDB$gs.names
gs.descs[start:(start + N.gs - 1)] <- GSDB$gs.desc
size.G[start:(start + N.gs - 1)] <- GSDB$size.G
gs[start:(start + N.gs - 1), 1:max(GSDB$size.G)] <- GSDB$gs[1:N.gs, 1:max(GSDB$size.G)]
start <- start + N.gs
}
N.gs <- max.N
# Select desired gene sets
if (gene.set.selection[1] != "ALL") {
locs <- match(gene.set.selection, gs.names)
print(rbind(gene.set.selection, locs))
N.gs <- sum(!is.na(locs))
if(N.gs > 1) {
gs <- gs[locs,]
} else {
gs <- t(as.matrix(gs[locs,])) # Force vector to matrix if only one gene set specified
}
gs.names <- gs.names[locs]
gs.descs <- gs.descs[locs]
size.G <- size.G[locs]
}
# Check for redundant gene sets
tab <- as.data.frame(table(gs.names))
ind <- order(tab[, "Freq"], decreasing=T)
tab <- tab[ind,]
print(tab[1:10,])
print(paste("Total gene sets:", length(gs.names)))
print(paste("Unique gene sets:", length(unique(gs.names))))
# Loop over gene sets
score.matrix <- matrix(0, nrow=N.gs, ncol=Ns)
for (gs.i in 1:N.gs) {
#browser()
gene.set <- gs[gs.i, 1:size.G[gs.i]]
gene.overlap <- intersect(gene.set, gene.names)
print(paste(gs.i, "gene set:", gs.names[gs.i], " overlap=", length(gene.overlap)))
if (length(gene.overlap) < min.overlap) {
score.matrix[gs.i, ] <- rep(NA, Ns)
next
} else {
gene.set.locs <- match(gene.overlap, gene.set)
gene.names.locs <- match(gene.overlap, gene.names)
msig <- m[gene.names.locs,]
msig.names <- gene.names[gene.names.locs]
if (output.score.type == "ES") {
OPAM <- OPAM.Projection.3(data.array = m, gene.names = gene.names, n.cols = Ns,
n.rows = Ng, weight = weight, statistic = statistic,
gene.set = gene.overlap, nperm = 1, correl.type = correl.type)
score.matrix[gs.i,] <- OPAM$ES.vector
} else if (output.score.type == "NES") {
OPAM <- OPAM.Projection.3(data.array = m, gene.names = gene.names, n.cols = Ns,
n.rows = Ng, weight = weight, statistic = statistic,
gene.set = gene.overlap, nperm = nperm, correl.type = correl.type)
score.matrix[gs.i,] <- OPAM$NES.vector
}
}
}
locs <- !is.na(score.matrix[,1])
print(paste("N.gs before overlap prunning:", N.gs))
N.gs <- sum(locs)
print(paste("N.gs after overlap prunning:", N.gs))
score.matrix <- score.matrix[locs,]
gs.names <- gs.names[locs]
gs.descs <- gs.descs[locs]
initial.up.entries <- 0
final.up.entries <- 0
initial.dn.entries <- 0
final.dn.entries <- 0
combined.entries <- 0
other.entries <- 0
if (combine.mode == "combine.off") {
score.matrix.2 <- score.matrix
gs.names.2 <- gs.names
gs.descs.2 <- gs.descs
} else if ((combine.mode == "combine.replace") || (combine.mode == "combine.add")) {
score.matrix.2 <- NULL
gs.names.2 <- NULL
gs.descs.2 <- NULL
k <- 1
for (i in 1:N.gs) {
temp <- strsplit(gs.names[i], split="_")
body <- paste(temp[[1]][seq(1, length(temp[[1]]) -1)], collapse="_")
suffix <- tail(temp[[1]], 1)
print(paste("i:", i, "gene set:", gs.names[i], "body:", body, "suffix:", suffix))
if (suffix == "UP") { # This is an "UP" gene set
initial.up.entries <- initial.up.entries + 1
target <- paste(body, "DN", sep="_")
loc <- match(target, gs.names)
if (!is.na(loc)) { # found corresponding "DN" gene set: create combined entry
score <- score.matrix[i,] - score.matrix[loc,]
score.matrix.2 <- rbind(score.matrix.2, score)
gs.names.2 <- c(gs.names.2, body)
gs.descs.2 <- c(gs.descs.2, paste(gs.descs[i], "combined UP & DN"))
combined.entries <- combined.entries + 1
if (combine.mode == "combine.add") { # also add the "UP entry
score.matrix.2 <- rbind(score.matrix.2, score.matrix[i,])
gs.names.2 <- c(gs.names.2, gs.names[i])
gs.descs.2 <- c(gs.descs.2, gs.descs[i])
final.up.entries <- final.up.entries + 1
}
} else { # did not find corresponding "DN" gene set: create "UP" entry
score.matrix.2 <- rbind(score.matrix.2, score.matrix[i,])
gs.names.2 <- c(gs.names.2, gs.names[i])
gs.descs.2 <- c(gs.descs.2, gs.descs[i])
final.up.entries <- final.up.entries + 1
}
} else if (suffix == "DN") { # This is a "DN" gene set
initial.dn.entries <- initial.dn.entries + 1
target <- paste(body, "UP", sep="_")
loc <- match(target, gs.names)
if (is.na(loc)) { # did not find corresponding "UP" gene set: create "DN" entry
score.matrix.2 <- rbind(score.matrix.2, score.matrix[i,])
gs.names.2 <- c(gs.names.2, gs.names[i])
gs.descs.2 <- c(gs.descs.2, gs.descs[i])
final.dn.entries <- final.dn.entries + 1
} else { # it found corresponding "UP" gene set
if (combine.mode == "combine.add") { # create "DN" entry
score.matrix.2 <- rbind(score.matrix.2, score.matrix[i,])
gs.names.2 <- c(gs.names.2, gs.names[i])
gs.descs.2 <- c(gs.descs.2, gs.descs[i])
final.dn.entries <- final.dn.entries + 1
}
}
} else { # This is neither "UP nor "DN" gene set: create individual entry
score.matrix.2 <- rbind(score.matrix.2, score.matrix[i,])
gs.names.2 <- c(gs.names.2, gs.names[i])
gs.descs.2 <- c(gs.descs.2, gs.descs[i])
other.entries <- other.entries + 1
}
} # end for loop over gene sets
print(paste("initial.up.entries:", initial.up.entries))
print(paste("final.up.entries:", final.up.entries))
print(paste("initial.dn.entries:", initial.dn.entries))
print(paste("final.dn.entries:", final.dn.entries))
print(paste("other.entries:", other.entries))
print(paste("combined.entries:", combined.entries))
print(paste("total entries:", length(score.matrix.2[,1])))
}
# Check for redundant gene sets
tab <- as.data.frame(table(gs.names.2))
ind <- order(tab[, "Freq"], decreasing=T)
tab <- tab[ind,]
print(tab[1:20,])
print(paste("Total gene sets:", length(gs.names.2)))
print(paste("Unique gene sets:", length(unique(gs.names.2))))
V.GCT <- data.frame(score.matrix.2)
names(V.GCT) <- sample.names
row.names(V.GCT) <- gs.names.2
write.gct(gct.data.frame = V.GCT, descs = gs.descs.2, filename = output.ds)
} # end of OPAM.project.dataset.5
OPAM.Projection.2 <- function(
data.array,
gene.names,
n.cols,
n.rows,
weight = 0,
statistic = "Kolmogorov-Smirnov", # "Kolmogorov-Smirnov", # "Kolmogorov-Smirnov", "Cramer-von-Mises",
# "Anderson-Darling", "Zhang_A", "Zhang_C", "Zhang_K",
# "area.under.RES", or "Wilcoxon"
gene.set,
nperm = 200,
correl.type = "rank") # "rank", "z.score", "symm.rank"
{
ES.vector <- vector(length=n.cols)
NES.vector <- vector(length=n.cols)
p.val.vector <- vector(length=n.cols)
correl.vector <- vector(length=n.rows, mode="numeric")
# Compute ES score for signatures in each sample
# print("Computing GSEA.....")
phi <- array(0, c(n.cols, nperm))
for (sample.index in 1:n.cols) {
gene.list <- order(data.array[, sample.index], decreasing=T)
# print(paste("Computing observed enrichment for UP signature in sample:", sample.index, sep=" "))
gene.set2 <- match(gene.set, gene.names)
if (weight == 0) {
correl.vector <- rep(1, n.rows)
} else if (weight > 0) {
if (correl.type == "rank") {
correl.vector <- data.array[gene.list, sample.index]
} else if (correl.type == "symm.rank") {
correl.vector <- data.array[gene.list, sample.index]
correl.vector <- ifelse(correl.vector > correl.vector[ceiling(n.rows/2)],
correl.vector,
correl.vector + correl.vector - correl.vector[ceiling(n.rows/2)])
} else if (correl.type == "z.score") {
x <- data.array[gene.list, sample.index]
correl.vector <- (x - mean(x))/sd(x)
}
}
GSEA.results <- GSEA.EnrichmentScore5(gene.list=gene.list, gene.set=gene.set2,
statistic = statistic, alpha = weight, correl.vector = correl.vector)
ES.vector[sample.index] <- GSEA.results$ES
if (nperm == 0) {
NES.vector[sample.index] <- ES.vector[sample.index]
p.val.vector[sample.index] <- 1
} else {
for (r in 1:nperm) {
reshuffled.gene.labels <- sample(1:n.rows)
if (weight == 0) {
correl.vector <- rep(1, n.rows)
} else if (weight > 0) {
correl.vector <- data.array[reshuffled.gene.labels, sample.index]
}
GSEA.results <- GSEA.EnrichmentScore5(gene.list=reshuffled.gene.labels, gene.set=gene.set2,
statistic = statistic, alpha = weight, correl.vector = correl.vector)
phi[sample.index, r] <- GSEA.results$ES
}
if (ES.vector[sample.index] >= 0) {
pos.phi <- phi[sample.index, phi[sample.index, ] >= 0]
if (length(pos.phi) == 0) pos.phi <- 0.5
pos.m <- mean(pos.phi)
NES.vector[sample.index] <- ES.vector[sample.index]/pos.m
s <- sum(pos.phi >= ES.vector[sample.index])/length(pos.phi)
p.val.vector[sample.index] <- ifelse(s == 0, 1/nperm, s)
} else {
neg.phi <- phi[sample.index, phi[sample.index, ] < 0]
if (length(neg.phi) == 0) neg.phi <- 0.5
neg.m <- mean(neg.phi)
NES.vector[sample.index] <- ES.vector[sample.index]/abs(neg.m)
s <- sum(neg.phi <= ES.vector[sample.index])/length(neg.phi)
p.val.vector[sample.index] <- ifelse(s == 0, 1/nperm, s)
}
}
}
return(list(ES.vector = ES.vector, NES.vector = NES.vector, p.val.vector = p.val.vector))
} # end of OPAM.Projection.2
OPAM.Projection.3 <- function(
data.array,
gene.names,
n.cols,
n.rows,
weight = 0,
statistic = "Kolmogorov-Smirnov", # "Kolmogorov-Smirnov", # "Kolmogorov-Smirnov", "Cramer-von-Mises",
# "Anderson-Darling", "Zhang_A", "Zhang_C", "Zhang_K",
# "area.under.RES", or "Wilcoxon"
gene.set,
nperm = 200,
correl.type = "rank") # "rank", "z.score", "symm.rank"
# Runs a 2-3x faster (2-2.5x for ES statistic and 2.5-3x faster for area.under.ES statsitic)
# version of GSEA.EnrichmentScore.5 internally that avoids overhead from the function call.
{
ES.vector <- vector(length=n.cols)
NES.vector <- vector(length=n.cols)
p.val.vector <- vector(length=n.cols)
correl.vector <- vector(length=n.rows, mode="numeric")
# Compute ES score for signatures in each sample
# print("Computing GSEA.....")
phi <- array(0, c(n.cols, nperm))
for (sample.index in 1:n.cols) {
gene.list <- order(data.array[, sample.index], decreasing=T)
# print(paste("Computing observed enrichment for UP signature in sample:", sample.index, sep=" "))
gene.set2 <- match(gene.set, gene.names)
if (weight == 0) {
correl.vector <- rep(1, n.rows)
} else if (weight > 0) {
if (correl.type == "rank") {
correl.vector <- data.array[gene.list, sample.index]
} else if (correl.type == "symm.rank") {
correl.vector <- data.array[gene.list, sample.index]
correl.vector <- ifelse(correl.vector > correl.vector[ceiling(n.rows/2)],
correl.vector,
correl.vector + correl.vector - correl.vector[ceiling(n.rows/2)])
} else if (correl.type == "z.score") {
x <- data.array[gene.list, sample.index]
correl.vector <- (x - mean(x))/sd(x)
}
}
### Olga's Additions ###
# ptm.new = proc.time()
tag.indicator <- sign(match(gene.list, gene.set2, nomatch=0)) # notice that the sign is 0 (no tag) or 1 (tag)
no.tag.indicator <- 1 - tag.indicator
N <- length(gene.list)
Nh <- length(gene.set2)
Nm <- N - Nh
orig.correl.vector <- correl.vector
if (weight == 0) correl.vector <- rep(1, N) # unweighted case
ind = which(tag.indicator==1)
correl.vector <- abs(correl.vector[ind])^weight
sum.correl = sum(correl.vector)
up = correl.vector/sum.correl # "up" represents the peaks in the mountain plot
gaps = (c(ind-1, N) - c(0, ind)) # gaps between ranked pathway genes
down = gaps/Nm
RES = cumsum(c(up,up[Nh])-down)
valleys = RES[1:Nh]-up
max.ES = max(RES)
min.ES = min(valleys)
if( statistic == "Kolmogorov-Smirnov" ){
if( max.ES > -min.ES ){
ES <- signif(max.ES, digits=5)
arg.ES <- which.max(RES)
} else{
ES <- signif(min.ES, digits=5)
arg.ES <- which.min(RES)
}
}
if( statistic == "area.under.RES"){
if( max.ES > -min.ES ){
arg.ES <- which.max(RES)
} else{
arg.ES <- which.min(RES)
}
gaps = gaps+1
RES = c(valleys,0) * (gaps) + 0.5*( c(0,RES[1:Nh]) - c(valleys,0) ) * (gaps)
ES = sum(RES)
}
GSEA.results = list(ES = ES, arg.ES = arg.ES, RES = RES, indicator = tag.indicator)
# new.time <<- new.time + (proc.time() - ptm.new)
### End Olga's Additions ###
#GSEA.results <- GSEA.EnrichmentScore5(gene.list=gene.list, gene.set=gene.set2,
# statistic = statistic, alpha = weight, correl.vector = correl.vector)
ES.vector[sample.index] <- GSEA.results$ES
if (nperm == 0) {
NES.vector[sample.index] <- ES.vector[sample.index]
p.val.vector[sample.index] <- 1
} else {
for (r in 1:nperm) {
reshuffled.gene.labels <- sample(1:n.rows)
if (weight == 0) {
correl.vector <- rep(1, n.rows)
} else if (weight > 0) {
correl.vector <- data.array[reshuffled.gene.labels, sample.index]
}
# GSEA.results <- GSEA.EnrichmentScore5(gene.list=reshuffled.gene.labels, gene.set=gene.set2,
# statistic = statistic, alpha = weight, correl.vector = correl.vector)
### Olga's Additions ###
tag.indicator <- sign(match(reshuffled.gene.labels, gene.set2, nomatch=0)) # notice that the sign is 0 (no tag) or 1 (tag)
no.tag.indicator <- 1 - tag.indicator
N <- length(reshuffled.gene.labels)
Nh <- length(gene.set2)
Nm <- N - Nh
# orig.correl.vector <- correl.vector
if (weight == 0) correl.vector <- rep(1, N) # unweighted case
ind <- which(tag.indicator==1)
correl.vector <- abs(correl.vector[ind])^weight
sum.correl <- sum(correl.vector)
up = correl.vector/sum.correl
gaps = (c(ind-1, N) - c(0, ind))
down = gaps/Nm
RES = cumsum(c(up,up[Nh])-down)
valleys = RES[1:Nh]-up
max.ES = max(RES)
min.ES = min(valleys)
if( statistic == "Kolmogorov-Smirnov" ){
if( max.ES > -min.ES ){
ES <- signif(max.ES, digits=5)
arg.ES <- which.max(RES)
} else{
ES <- signif(min.ES, digits=5)
arg.ES <- which.min(RES)
}
}
if( statistic == "area.under.RES"){
if( max.ES > -min.ES ){
arg.ES <- which.max(RES)
} else{
arg.ES <- which.min(RES)
}
gaps = gaps+1
RES = c(valleys,0) * (gaps) + 0.5*( c(0,RES[1:Nh]) - c(valleys,0) ) * (gaps)
ES = sum(RES)
}
GSEA.results = list(ES = ES, arg.ES = arg.ES, RES = RES, indicator = tag.indicator)
### End Olga's Additions ###
phi[sample.index, r] <- GSEA.results$ES
}
if (ES.vector[sample.index] >= 0) {
pos.phi <- phi[sample.index, phi[sample.index, ] >= 0]
if (length(pos.phi) == 0) pos.phi <- 0.5
pos.m <- mean(pos.phi)
NES.vector[sample.index] <- ES.vector[sample.index]/pos.m
s <- sum(pos.phi >= ES.vector[sample.index])/length(pos.phi)
p.val.vector[sample.index] <- ifelse(s == 0, 1/nperm, s)
} else {
neg.phi <- phi[sample.index, phi[sample.index, ] < 0]
if (length(neg.phi) == 0) neg.phi <- 0.5
neg.m <- mean(neg.phi)
NES.vector[sample.index] <- ES.vector[sample.index]/abs(neg.m)
s <- sum(neg.phi <= ES.vector[sample.index])/length(neg.phi)
p.val.vector[sample.index] <- ifelse(s == 0, 1/nperm, s)
}
}
}
return(list(ES.vector = ES.vector, NES.vector = NES.vector, p.val.vector = p.val.vector))
} # end of OPAM.Projection.3
OPAM.Projection.RNAi <- function(
data.array,
gene.names,
n.cols,
n.rows,
weight = 0,
statistic = "Kolmogorov-Smirnov", # "Kolmogorov-Smirnov", # "Kolmogorov-Smirnov", "Cramer-von-Mises",
# "Anderson-Darling", "Zhang_A", "Zhang_C", "Zhang_K",
# "area.under.RES", or "Wilcoxon"
gene.set,
nperm = 200,
correl.type = "rank") # "rank", "z.score", "symm.rank"
# Runs a 2-3x faster (2-2.5x for ES statistic and 2.5-3x faster for area.under.ES statsitic)
# version of GSEA.EnrichmentScore.5 internally that avoids overhead from the function call.
{
ES.vector <- vector(length=n.cols)
NES.vector <- vector(length=n.cols)
p.val.vector <- vector(length=n.cols)
correl.vector <- vector(length=n.rows, mode="numeric")
# Compute ES score for signatures in each sample
# print("Computing GSEA.....")
phi <- array(0, c(n.cols, nperm))
for (sample.index in 1:n.cols) {
gene.list <- order(data.array[, sample.index], decreasing=T)
# print(paste("Computing observed enrichment for UP signature in sample:", sample.index, sep=" "))
# gene.set2 <- match(gene.set, gene.names)
gene.set2 <- seq(1:length(gene.names))[!is.na(match(gene.names, gene.set))]
if (weight == 0) {
correl.vector <- rep(1, n.rows)
} else if (weight > 0) {
if (correl.type == "rank") {
correl.vector <- data.array[gene.list, sample.index]
} else if (correl.type == "symm.rank") {
correl.vector <- data.array[gene.list, sample.index]
correl.vector <- ifelse(correl.vector > correl.vector[ceiling(n.rows/2)],
correl.vector,
correl.vector + correl.vector - correl.vector[ceiling(n.rows/2)])
} else if (correl.type == "z.score") {
x <- data.array[gene.list, sample.index]
correl.vector <- (x - mean(x))/sd(x)
}
}
### Olga's Additions ###
# ptm.new = proc.time()
tag.indicator <- sign(match(gene.list, gene.set2, nomatch=0)) # notice that the sign is 0 (no tag) or 1 (tag)
no.tag.indicator <- 1 - tag.indicator
N <- length(gene.list)
Nh <- length(gene.set2)
Nm <- N - Nh
orig.correl.vector <- correl.vector
if (weight == 0) correl.vector <- rep(1, N) # unweighted case
ind = which(tag.indicator==1)
correl.vector <- abs(correl.vector[ind])^weight
sum.correl = sum(correl.vector)
up = correl.vector/sum.correl # "up" represents the peaks in the mountain plot
gaps = (c(ind-1, N) - c(0, ind)) # gaps between ranked pathway genes
down = gaps/Nm
RES = cumsum(c(up,up[Nh])-down)
valleys = RES[1:Nh]-up
max.ES = max(RES)
min.ES = min(valleys)
if( statistic == "Kolmogorov-Smirnov" ){
if( max.ES > -min.ES ){
ES <- signif(max.ES, digits=5)
arg.ES <- which.max(RES)
} else{
ES <- signif(min.ES, digits=5)
arg.ES <- which.min(RES)
}
}
if( statistic == "area.under.RES"){
if( max.ES > -min.ES ){
arg.ES <- which.max(RES)
} else{
arg.ES <- which.min(RES)
}
gaps = gaps+1
RES = c(valleys,0) * (gaps) + 0.5*( c(0,RES[1:Nh]) - c(valleys,0) ) * (gaps)
ES = sum(RES)
}
GSEA.results = list(ES = ES, arg.ES = arg.ES, RES = RES, indicator = tag.indicator)
# new.time <<- new.time + (proc.time() - ptm.new)
### End Olga's Additions ###
#GSEA.results <- GSEA.EnrichmentScore5(gene.list=gene.list, gene.set=gene.set2,
# statistic = statistic, alpha = weight, correl.vector = correl.vector)
ES.vector[sample.index] <- GSEA.results$ES
if (nperm == 0) {
NES.vector[sample.index] <- ES.vector[sample.index]
p.val.vector[sample.index] <- 1
} else {
for (r in 1:nperm) {
reshuffled.gene.labels <- sample(1:n.rows)
if (weight == 0) {
correl.vector <- rep(1, n.rows)
} else if (weight > 0) {
correl.vector <- data.array[reshuffled.gene.labels, sample.index]
}
# GSEA.results <- GSEA.EnrichmentScore5(gene.list=reshuffled.gene.labels, gene.set=gene.set2,
# statistic = statistic, alpha = weight, correl.vector = correl.vector)
### Olga's Additions ###
tag.indicator <- sign(match(reshuffled.gene.labels, gene.set2, nomatch=0)) # notice that the sign is 0 (no tag) or 1 (tag)
no.tag.indicator <- 1 - tag.indicator
N <- length(reshuffled.gene.labels)
Nh <- length(gene.set2)
Nm <- N - Nh
# orig.correl.vector <- correl.vector
if (weight == 0) correl.vector <- rep(1, N) # unweighted case
ind <- which(tag.indicator==1)
correl.vector <- abs(correl.vector[ind])^weight
sum.correl <- sum(correl.vector)
up = correl.vector/sum.correl
gaps = (c(ind-1, N) - c(0, ind))
down = gaps/Nm
RES = cumsum(c(up,up[Nh])-down)
valleys = RES[1:Nh]-up
max.ES = max(RES)
min.ES = min(valleys)
if( statistic == "Kolmogorov-Smirnov" ){
if( max.ES > -min.ES ){
ES <- signif(max.ES, digits=5)
arg.ES <- which.max(RES)
} else{
ES <- signif(min.ES, digits=5)
arg.ES <- which.min(RES)
}
}
if( statistic == "area.under.RES"){
if( max.ES > -min.ES ){
arg.ES <- which.max(RES)
} else{
arg.ES <- which.min(RES)
}
gaps = gaps+1
RES = c(valleys,0) * (gaps) + 0.5*( c(0,RES[1:Nh]) - c(valleys,0) ) * (gaps)
ES = sum(RES)
}
GSEA.results = list(ES = ES, arg.ES = arg.ES, RES = RES, indicator = tag.indicator)
### End Olga's Additions ###
phi[sample.index, r] <- GSEA.results$ES
}
if (ES.vector[sample.index] >= 0) {
pos.phi <- phi[sample.index, phi[sample.index, ] >= 0]
if (length(pos.phi) == 0) pos.phi <- 0.5
pos.m <- mean(pos.phi)
NES.vector[sample.index] <- ES.vector[sample.index]/pos.m
s <- sum(pos.phi >= ES.vector[sample.index])/length(pos.phi)
p.val.vector[sample.index] <- ifelse(s == 0, 1/nperm, s)
} else {
neg.phi <- phi[sample.index, phi[sample.index, ] < 0]
if (length(neg.phi) == 0) neg.phi <- 0.5
neg.m <- mean(neg.phi)
NES.vector[sample.index] <- ES.vector[sample.index]/abs(neg.m)
s <- sum(neg.phi <= ES.vector[sample.index])/length(neg.phi)
p.val.vector[sample.index] <- ifelse(s == 0, 1/nperm, s)
}
}
}
return(list(ES.vector = ES.vector, NES.vector = NES.vector, p.val.vector = p.val.vector))
} # end of OPAM.Projection.3
OPAM.project.dataset.2 <- function(
input.ds,
output.ds,
gene.set.databases,
gene.set.selection = "ALL", # "ALL" or list with names of gene sets
sample.norm.type = "rank", # "rank", "log" or "log.rank"
weight = 0.25,
statistic = "area.under.RES",
output.score.type = "ES", # "ES" or "NES"
nperm = 200, # number of random permutations for NES case
combine.mode = "combine.off", # "combine.off" do not combine *_UP and *_DN versions in
# a single score. "combine.replace" combine *_UP and
# *_DN versions in a single score that replaces the individual
# *_UP and *_DN versions. "combine.add" combine *_UP and
# *_DN versions in a single score and add it but keeping
# the individual *_UP and *_DN versions.
correl.type = "rank") # "rank", "z.score", "symm.rank"
{ #----------------------------------------------------------------------------------------
# Load libraries
library(gtools)
library(verification)
library(RColorBrewer)
# Read input dataset
dataset <- MSIG.Gct2Frame(filename = input.ds) # Read gene expression dataset (GCT format)
m <- data.matrix(dataset$ds)
gene.names <- dataset$row.names
gene.descs <- dataset$descs
sample.names <- dataset$names
gene.descs <- dataset$descs
Ns <- length(m[1,])
Ng <- length(m[,1])
temp <- strsplit(input.ds, split="/") # Extract input file name
s <- length(temp[[1]])
input.file.name <- temp[[1]][s]
temp <- strsplit(input.file.name, split=".gct")
input.file.prefix <- temp[[1]][1]
# Sample normalization
if (sample.norm.type == "rank") {
for (j in 1:Ns) { # column rank normalization
m[,j] <- rank(m[,j], ties.method = "average")
}
m <- 10000*m/Ng
} else if (sample.norm.type == "log.rank") {
for (j in 1:Ns) { # column rank normalization
m[,j] <- rank(m[,j], ties.method = "average")
}
m <- log(10000*m/Ng + exp(1))
} else if (sample.norm.type == "log") {
m[m < 1] <- 1
m <- log(m + exp(1))
}
# Read gene set databases
max.G <- 0
max.N <- 0
for (gsdb in gene.set.databases) {
GSDB <- Read.GeneSets.db(gsdb, thres.min = 2, thres.max = 2000, gene.names = NULL)
max.G <- max(max.G, max(GSDB$size.G))
max.N <- max.N + GSDB$N.gs
}
N.gs <- 0
gs <- matrix("null", nrow=max.N, ncol=max.G)
gs.names <- vector(length=max.N, mode="character")
gs.descs <- vector(length=max.N, mode="character")
size.G <- vector(length=max.N, mode="numeric")
start <- 1
for (gsdb in gene.set.databases) {
GSDB <- Read.GeneSets.db(gsdb, thres.min = 2, thres.max = 2000, gene.names = NULL)
N.gs <- GSDB$N.gs
gs.names[start:(start + N.gs - 1)] <- GSDB$gs.names
gs.descs[start:(start + N.gs - 1)] <- GSDB$gs.desc
size.G[start:(start + N.gs - 1)] <- GSDB$size.G
gs[start:(start + N.gs - 1), 1:max(GSDB$size.G)] <- GSDB$gs[1:N.gs, 1:max(GSDB$size.G)]
start <- start + N.gs
}
N.gs <- max.N
# Select desired gene sets
if (gene.set.selection[1] != "ALL") {
locs <- match(gene.set.selection, gs.names)
N.gs <- sum(!is.na(locs))
gs <- gs[locs,]
gs.names <- gs.names[locs]
gs.descs <- gs.descs[locs]
size.G <- size.G[locs]
}
# Loop over gene sets
score.matrix <- matrix(0, nrow=N.gs, ncol=Ns)
for (gs.i in 1:N.gs) {
gene.set <- gs[gs.i, 1:size.G[gs.i]]
gene.overlap <- intersect(gene.set, gene.names)
print(paste(gs.i, "gene set:", gs.names[gs.i], " overlap=", length(gene.overlap)))
if (length(gene.overlap) == 0) {
score.matrix[gs.i, ] <- runif(Ns, min=1E-06, max=1.1E-06)
next
} else {
gene.set.locs <- match(gene.overlap, gene.set)
gene.names.locs <- match(gene.overlap, gene.names)
msig <- m[gene.names.locs,]
msig.names <- gene.names[gene.names.locs]
if (output.score.type == "ES") {
OPAM <- OPAM.Projection.2(data.array = m, gene.names = gene.names, n.cols = Ns,
n.rows = Ng, weight = weight, statistic = statistic,
gene.set = gene.overlap, nperm = 1, correl.type = correl.type)
score.matrix[gs.i,] <- OPAM$ES.vector
} else if (output.score.type == "NES") {
OPAM <- OPAM.Projection.2(data.array = m, gene.names = gene.names, n.cols = Ns,
n.rows = Ng, weight = weight, statistic = statistic,
gene.set = gene.overlap, nperm = nperm, correl.type = correl.type)
score.matrix[gs.i,] <- OPAM$NES.vector
}
}
}
initial.up.entries <- 0
final.up.entries <- 0
initial.dn.entries <- 0
final.dn.entries <- 0
combined.entries <- 0
other.entries <- 0
if (combine.mode == "combine.off") {
score.matrix.2 <- score.matrix
gs.names.2 <- gs.names
gs.descs.2 <- gs.descs
} else if ((combine.mode == "combine.replace") || (combine.mode == "combine.add")) {
score.matrix.2 <- NULL
gs.names.2 <- NULL
gs.descs.2 <- NULL
k <- 1
for (i in 1:N.gs) {
temp <- strsplit(gs.names[i], split="_")
body <- paste(temp[[1]][seq(1, length(temp[[1]]) -1)], collapse="_")
suffix <- tail(temp[[1]], 1)
print(paste("i:", i, "gene set:", gs.names[i], "body:", body, "suffix:", suffix))
if (suffix == "UP") { # This is an "UP" gene set
initial.up.entries <- initial.up.entries + 1
target <- paste(body, "DN", sep="_")
loc <- match(target, gs.names)
if (!is.na(loc)) { # found corresponding "DN" gene set: create combined entry
score <- score.matrix[i,] - score.matrix[loc,]
score.matrix.2 <- rbind(score.matrix.2, score)
gs.names.2 <- c(gs.names.2, body)
gs.descs.2 <- c(gs.descs.2, paste(gs.descs[i], "combined UP & DN"))
combined.entries <- combined.entries + 1
if (combine.mode == "combine.add") { # also add the "UP entry
score.matrix.2 <- rbind(score.matrix.2, score.matrix[i,])
gs.names.2 <- c(gs.names.2, gs.names[i])
gs.descs.2 <- c(gs.descs.2, gs.descs[i])
final.up.entries <- final.up.entries + 1
}
} else { # did not find corresponding "DN" gene set: create "UP" entry
score.matrix.2 <- rbind(score.matrix.2, score.matrix[i,])
gs.names.2 <- c(gs.names.2, gs.names[i])
gs.descs.2 <- c(gs.descs.2, gs.descs[i])
final.up.entries <- final.up.entries + 1
}
} else if (suffix == "DN") { # This is a "DN" gene set
initial.dn.entries <- initial.dn.entries + 1
target <- paste(body, "UP", sep="_")
loc <- match(target, gs.names)
if (is.na(loc)) { # did not find corresponding "UP" gene set: create "DN" entry
score.matrix.2 <- rbind(score.matrix.2, score.matrix[i,])
gs.names.2 <- c(gs.names.2, gs.names[i])
gs.descs.2 <- c(gs.descs.2, gs.descs[i])
final.dn.entries <- final.dn.entries + 1
} else { # it found corresponding "UP" gene set
if (combine.mode == "combine.add") { # create "DN" entry
score.matrix.2 <- rbind(score.matrix.2, score.matrix[i,])
gs.names.2 <- c(gs.names.2, gs.names[i])
gs.descs.2 <- c(gs.descs.2, gs.descs[i])
final.dn.entries <- final.dn.entries + 1
}
}
} else { # This is neither "UP nor "DN" gene set: create individual entry
score.matrix.2 <- rbind(score.matrix.2, score.matrix[i,])
gs.names.2 <- c(gs.names.2, gs.names[i])
gs.descs.2 <- c(gs.descs.2, gs.descs[i])
other.entries <- other.entries + 1
}
} # end for loop over gene sets
print(paste("initial.up.entries:", initial.up.entries))
print(paste("final.up.entries:", final.up.entries))
print(paste("initial.dn.entries:", initial.dn.entries))
print(paste("final.dn.entries:", final.dn.entries))
print(paste("other.entries:", other.entries))
print(paste("combined.entries:", combined.entries))
print(paste("total entries:", length(score.matrix.2[,1])))
}
V.GCT <- data.frame(score.matrix.2)
names(V.GCT) <- sample.names
row.names(V.GCT) <- gs.names.2
write.gct(gct.data.frame = V.GCT, descs = gs.descs.2, filename = output.ds)
} # end of OPAM.project.dataset.2
OPAM.project.dataset.3 <- function(
input.ds,
output.ds,
gene.set.databases,
gene.set.selection = "ALL", # "ALL" or list with names of gene sets
sample.norm.type = "rank", # "rank", "log" or "log.rank"
weight = 0.25,
statistic = "area.under.RES",
output.score.type = "ES", # "ES" or "NES"
nperm = 200, # number of random permutations for NES case
combine.mode = "combine.off", # "combine.off" do not combine *_UP and *_DN versions in
# a single score. "combine.replace" combine *_UP and
# *_DN versions in a single score that replaces the individual
# *_UP and *_DN versions. "combine.add" combine *_UP and
# *_DN versions in a single score and add it but keeping
# the individual *_UP and *_DN versions.
correl.type = "rank") # "rank", "z.score", "symm.rank"
{ #----------------------------------------------------------------------------------------
# Load libraries
library(gtools)
library(verification)
library(RColorBrewer)
# Read input dataset
dataset <- MSIG.Gct2Frame(filename = input.ds) # Read gene expression dataset (GCT format)
m <- data.matrix(dataset$ds)
gene.names <- dataset$row.names
gene.descs <- dataset$descs
sample.names <- dataset$names
Ns <- length(m[1,])
Ng <- length(m[,1])
temp <- strsplit(input.ds, split="/") # Extract input file name
s <- length(temp[[1]])
input.file.name <- temp[[1]][s]
temp <- strsplit(input.file.name, split=".gct")
input.file.prefix <- temp[[1]][1]
# Sample normalization
if (sample.norm.type == "rank") {
for (j in 1:Ns) { # column rank normalization
m[,j] <- rank(m[,j], ties.method = "average")
}
m <- 10000*m/Ng
} else if (sample.norm.type == "log.rank") {
for (j in 1:Ns) { # column rank normalization
m[,j] <- rank(m[,j], ties.method = "average")
}
m <- log(10000*m/Ng + exp(1))
} else if (sample.norm.type == "log") {
m[m < 1] <- 1
m <- log(m + exp(1))
}
# Read gene set databases
max.G <- 0
max.N <- 0
for (gsdb in gene.set.databases) {
GSDB <- Read.GeneSets.db(gsdb, thres.min = 2, thres.max = 2000, gene.names = NULL)
max.G <- max(max.G, max(GSDB$size.G))
max.N <- max.N + GSDB$N.gs
}
N.gs <- 0
gs <- matrix("null", nrow=max.N, ncol=max.G)
gs.names <- vector(length=max.N, mode="character")
gs.descs <- vector(length=max.N, mode="character")
size.G <- vector(length=max.N, mode="numeric")
start <- 1
for (gsdb in gene.set.databases) {
GSDB <- Read.GeneSets.db(gsdb, thres.min = 2, thres.max = 2000, gene.names = NULL)
N.gs <- GSDB$N.gs
gs.names[start:(start + N.gs - 1)] <- GSDB$gs.names
gs.descs[start:(start + N.gs - 1)] <- GSDB$gs.desc
size.G[start:(start + N.gs - 1)] <- GSDB$size.G
gs[start:(start + N.gs - 1), 1:max(GSDB$size.G)] <- GSDB$gs[1:N.gs, 1:max(GSDB$size.G)]
start <- start + N.gs
}
N.gs <- max.N
# Select desired gene sets
if (gene.set.selection[1] != "ALL") {
locs <- match(gene.set.selection, gs.names)
N.gs <- sum(!is.na(locs))
if(N.gs > 1) {
gs <- gs[locs,]
} else {
gs <- t(as.matrix(gs[locs,])) # Force vector to matrix if only one gene set specified
}
gs.names <- gs.names[locs]
gs.descs <- gs.descs[locs]
size.G <- size.G[locs]
}
# Loop over gene sets
score.matrix <- matrix(0, nrow=N.gs, ncol=Ns)
for (gs.i in 1:N.gs) {
#browser()
gene.set <- gs[gs.i, 1:size.G[gs.i]]
gene.overlap <- intersect(gene.set, gene.names)
print(paste(gs.i, "gene set:", gs.names[gs.i], " overlap=", length(gene.overlap)))
if (length(gene.overlap) == 0) {
score.matrix[gs.i, ] <- runif(Ns, min=1E-06, max=1.1E-06)
next
} else {
gene.set.locs <- match(gene.overlap, gene.set)
gene.names.locs <- match(gene.overlap, gene.names)
msig <- m[gene.names.locs,]
msig.names <- gene.names[gene.names.locs]
if (output.score.type == "ES") {
OPAM <- OPAM.Projection.3(data.array = m, gene.names = gene.names, n.cols = Ns,
n.rows = Ng, weight = weight, statistic = statistic,
gene.set = gene.overlap, nperm = 1, correl.type = correl.type)
score.matrix[gs.i,] <- OPAM$ES.vector
} else if (output.score.type == "NES") {
OPAM <- OPAM.Projection.3(data.array = m, gene.names = gene.names, n.cols = Ns,
n.rows = Ng, weight = weight, statistic = statistic,
gene.set = gene.overlap, nperm = nperm, correl.type = correl.type)
score.matrix[gs.i,] <- OPAM$NES.vector
}
}
}
initial.up.entries <- 0
final.up.entries <- 0
initial.dn.entries <- 0
final.dn.entries <- 0
combined.entries <- 0
other.entries <- 0
if (combine.mode == "combine.off") {
score.matrix.2 <- score.matrix
gs.names.2 <- gs.names
gs.descs.2 <- gs.descs
} else if ((combine.mode == "combine.replace") || (combine.mode == "combine.add")) {
score.matrix.2 <- NULL
gs.names.2 <- NULL
gs.descs.2 <- NULL
k <- 1
for (i in 1:N.gs) {
temp <- strsplit(gs.names[i], split="_")
body <- paste(temp[[1]][seq(1, length(temp[[1]]) -1)], collapse="_")
suffix <- tail(temp[[1]], 1)
print(paste("i:", i, "gene set:", gs.names[i], "body:", body, "suffix:", suffix))
if (suffix == "UP") { # This is an "UP" gene set
initial.up.entries <- initial.up.entries + 1
target <- paste(body, "DN", sep="_")
loc <- match(target, gs.names)
if (!is.na(loc)) { # found corresponding "DN" gene set: create combined entry
score <- score.matrix[i,] - score.matrix[loc,]
score.matrix.2 <- rbind(score.matrix.2, score)
gs.names.2 <- c(gs.names.2, body)
gs.descs.2 <- c(gs.descs.2, paste(gs.descs[i], "combined UP & DN"))
combined.entries <- combined.entries + 1
if (combine.mode == "combine.add") { # also add the "UP entry
score.matrix.2 <- rbind(score.matrix.2, score.matrix[i,])
gs.names.2 <- c(gs.names.2, gs.names[i])
gs.descs.2 <- c(gs.descs.2, gs.descs[i])
final.up.entries <- final.up.entries + 1
}
} else { # did not find corresponding "DN" gene set: create "UP" entry
score.matrix.2 <- rbind(score.matrix.2, score.matrix[i,])
gs.names.2 <- c(gs.names.2, gs.names[i])
gs.descs.2 <- c(gs.descs.2, gs.descs[i])
final.up.entries <- final.up.entries + 1
}
} else if (suffix == "DN") { # This is a "DN" gene set
initial.dn.entries <- initial.dn.entries + 1
target <- paste(body, "UP", sep="_")
loc <- match(target, gs.names)
if (is.na(loc)) { # did not find corresponding "UP" gene set: create "DN" entry
score.matrix.2 <- rbind(score.matrix.2, score.matrix[i,])
gs.names.2 <- c(gs.names.2, gs.names[i])
gs.descs.2 <- c(gs.descs.2, gs.descs[i])
final.dn.entries <- final.dn.entries + 1
} else { # it found corresponding "UP" gene set
if (combine.mode == "combine.add") { # create "DN" entry
score.matrix.2 <- rbind(score.matrix.2, score.matrix[i,])
gs.names.2 <- c(gs.names.2, gs.names[i])
gs.descs.2 <- c(gs.descs.2, gs.descs[i])
final.dn.entries <- final.dn.entries + 1
}
}
} else { # This is neither "UP nor "DN" gene set: create individual entry
score.matrix.2 <- rbind(score.matrix.2, score.matrix[i,])
gs.names.2 <- c(gs.names.2, gs.names[i])
gs.descs.2 <- c(gs.descs.2, gs.descs[i])
other.entries <- other.entries + 1
}
} # end for loop over gene sets
print(paste("initial.up.entries:", initial.up.entries))
print(paste("final.up.entries:", final.up.entries))
print(paste("initial.dn.entries:", initial.dn.entries))
print(paste("final.dn.entries:", final.dn.entries))
print(paste("other.entries:", other.entries))
print(paste("combined.entries:", combined.entries))
print(paste("total entries:", length(score.matrix.2[,1])))
}
V.GCT <- data.frame(score.matrix.2)
names(V.GCT) <- sample.names
row.names(V.GCT) <- gs.names.2
write.gct(gct.data.frame = V.GCT, descs = gs.descs.2, filename = output.ds)
} # end of OPAM.project.dataset.2
OPAM.match.projection.to.phenotypes <- function(
input.ds,
input.cls,
results.dir,
normalize.score = T,
normalization.type = "zero.one", # "zero.one", "z.score" or "r.z.score"
markers.num=5,
user.colors = NA,
markers.metric = "ROC", # "ROC" or "T.TEST"
markers.file = NULL,
sort.phenotypes = T,
sort.decreasing = T, # T = decreasing, F = increasing
sort.expression = T,
sort.decreasing.genes = T,
legend = T,
char.res = 1,
only.up = F,
cmap.type = 3,
show.desc = T,
row.norm = T)
{
library(gtools)
library(verification)
library(ROCR)
library(MASS)
library(RColorBrewer)
library(heatmap.plus)
dataset <- MSIG.Gct2Frame(filename = input.ds) # Read gene expression dataset (GCT format)
m <- data.matrix(dataset$ds)
model.names <- dataset$row.names
model.descs <- dataset$descs
n.models <-
Ns <- length(m[1,])
for (i in 1:length(m[,1])) {
if (sd(m[i,]) == 0) {
val <- m[i, 1]
m[i,] <- m[i,] + runif(n=Ns, min= val - 0.001, max=val + 0.001) # add small noise to flat profiles
}
}
dim(m)
sample.names <- dataset$names
temp <- strsplit(input.ds, split="/") # Extract test file name
s <- length(temp[[1]])
test.file.name <- temp[[1]][s]
temp <- strsplit(test.file.name, split=".gct")
test.file.prefix <- temp[[1]][1]
# char.res <- 0.013 * n.models + 0.65
# normalize scores
if (normalize.score == T) {
if (normalization.type == "zero.one") {
for (i in 1:n.models) {
m[i,] <- (m[i,] - min(m[i,]))/(max(m[i,]) - min(m[i,]))
}
} else if (normalization.type == "z.score") {
for (i in 1:n.models) {
m[i,] <- (m[i,] - mean(m[i,]))/sd(m[i,])
}
} else if (normalization.type == "r.z.score") {
for (i in 1:n.models) {
m[i,] <- (m[i,] - median(m[i,]))/mad(m[i,])
}
}
}
CLS <- MSIG.ReadPhenFile.2(file = input.cls) # Read phenotype file (CLS format)
cls.labels <- CLS$class.v
cls.phen <- CLS$phen
cls.list <- CLS$class.list
if (is.vector(cls.labels)) {
n.phen <- 1
} else {
n.phen <- length(cls.labels[,1])
}
if (!is.na(user.colors)) {
c.test <- user.colors
} else {
if (!is.null(CLS$col.phen)) {
c.test <- CLS$col.phen
} else {
c.test <- c(brewer.pal(n=7, name="Dark2"), brewer.pal(n=7, name="Set2"), brewer.pal(n=7, name="Set1"),
brewer.pal(n=8, name="Accent"), brewer.pal(n=9, name="Spectral"), brewer.pal(n=8, name="Set3"),
brewer.pal(n=8, name="BuGn"),
brewer.pal(n=7, name="Set2"), brewer.pal(n=7, name="Dark2"), brewer.pal(n=7, name="Set1"),
brewer.pal(n=8, name="Accent"), brewer.pal(n=10, name="Spectral"), brewer.pal(n=8, name="Set3"),
brewer.pal(n=8, name="BuGn"),
brewer.pal(n=7, name="Set2"), brewer.pal(n=7, name="Dark2"), brewer.pal(n=7, name="Set1"),
brewer.pal(n=8, name="Accent"), brewer.pal(n=10, name="Spectral"), brewer.pal(n=8, name="Set3"),
brewer.pal(n=8, name="BuGn"))
}
}
if (!is.null(CLS$phen.names)) {
phen.names <- CLS$phen.names
# if (is.vector(cls.list)) {
# cls.phen <- paste(phen.names, cls.phen, collapse="_")
# } else {
# for (i in 1:length(cls.phen)) {
# for (j in 1:length(cls.phen[[i]])) {
# cls.phen[[i]][j] <- paste(phen.names[i], cls.phen[[i]][j], collapse="_")
# }
# }
# }
} else {
phen.names <- "NA"
}
cls.phen.index <- unlist(cls.phen)
cls.phen.colors <- c.test[1:length(cls.phen.index)]
n.classes <- vector(length=n.phen, mode="numeric")
if (n.phen == 1) {
max.classes <- length(cls.phen)
n.classes[1] <- max.classes
} else {
max.classes <- max(unlist(lapply(cls.phen, FUN=length)))
for (i in 1:n.phen) {
n.classes[i] <- length(cls.phen[[i]])
}
}
x <- rbind(sample.names, cls.list, cls.labels)
print("before loop")
print(x)
print(cls.phen)
print(phen.names)
filename <- paste(results.dir, test.file.prefix, ".PHEN.MARKERS.", markers.metric, ".pdf", sep="")
pdf(file=filename, height = 10, width = 10)
# Loop over phenotypes
for (k.phen in 1:n.phen) {
if (is.vector(cls.labels)) {
k.phen.labels <- cls.labels
k.phen.list <- cls.list
} else {
k.phen.labels <- as.vector(cls.labels[k.phen,])
k.phen.list <- as.vector(cls.list[k.phen,])
}
# Sort according to current phenotype
if(sort.expression == T) {
phen.index <- order(k.phen.labels, decreasing=sort.decreasing)
} else {
phen.index <- seq(1, length(k.phen.labels))
}
if (is.vector(cls.labels)) {
cls.labels2 <- cls.labels[phen.index]
cls.list2 <- cls.list[phen.index]
} else {
cls.labels2 <- cls.labels[, phen.index]
cls.list2 <- cls.list[, phen.index]
}
k.phen.labels <- k.phen.labels[phen.index]
k.phen.list <- k.phen.list[phen.index]
sample.names2 <- sample.names[phen.index]
m2 <- m[, phen.index]
x <- rbind(sample.names2, cls.list2, cls.labels2)
print(paste("inside loop phen=", k.phen))
print(x)
print(cls.phen)
print(phen.names)
# Markers for each class
if (is.vector(cls.labels2)) {
classes <- unique(cls.list2)
} else {
classes <- unique(cls.list2[k.phen, ])
}
if (length(classes) > 2) {
k.only.up <- T
} else {
k.only.up <- only.up
}
if(length(classes) == 2) classes <- classes[1]
markers <- NULL
markers.descs <- NULL
metric.list <- NULL
p.val.list <- NULL
k.class <- NULL
for (k in classes) {
if (is.vector(cls.labels2)) {
bin.class <- ifelse(cls.list2 == k, 0, 1)
} else {
bin.class <- ifelse(cls.list2[k.phen, ] == k, 0, 1)
}
if (markers.metric == "T.TEST") {
metric <- vector(length=n.models, mode="numeric")
p.val <- vector(length=n.models, mode="numeric")
for (i in 1:n.models) {
temp <- split(m2[i, ], bin.class)
x <- temp[[1]]
y <- temp[[2]]
metric[i] <- signif(t.test(x=x, y=y)$statistic, digits=3)
p.val[i] <- signif(t.test(x=x, y=y)$p.value, digits=3)
}
} else if (markers.metric == "ROC") {
bin.class <- ifelse(bin.class == 1, 0, 1)
metric <- vector(length=n.models, mode="numeric")
p.val <- vector(length=n.models, mode="numeric")
for (i in 1:n.models) {
m.score <- m2[i,]
m.score.norm <- (m.score - min(m.score))/(max(m.score) - min(m.score))
if (length(table(bin.class)) > 1) {
perf.auc <- roc.area(bin.class, m.score.norm)
metric[i] <- signif(perf.auc$A, digits=3)
p.val[i] <- signif(perf.auc$p.value, digits=3)
} else {
metric[i] <- 1
p.val[i] <- 1
}
}
} else if (markers.metric == "MEAN.DIFF") {
bin.class <- ifelse(bin.class == 1, 0, 1)
metric <- vector(length=n.models, mode="numeric")
p.val <- vector(length=n.models, mode="numeric")
for (i in 1:n.models) {
temp <- split(m2[i, ], bin.class)
x <- temp[[1]]
y <- temp[[2]]
metric[i] <- signif(mean(x) - mean(y), digits=3)
p.val[i] <- signif(t.test(x=x, y=y)$p.value, digits=3)
}
}
if (is.na(sort.decreasing.genes)) {
metric.order <- seq(1, length(metric))
} else {
metric.order <- order(metric, decreasing=sort.decreasing.genes)
}
if (only.up == TRUE) {
k.markers.num <- ifelse(markers.num > n.models, n.models, markers.num)
# if (length(classes) == 2) {
# k.markers.num <- ifelse(markers.num > n.models, n.models, markers.num)
# } else {
# k.markers.num <- ifelse(length(classes)*markers.num > n.models,
# floor(n.models/length(classes)), markers.num)
# }
markers <- c(markers, model.names[metric.order][1:k.markers.num])
markers.descs <- c(markers.descs, model.descs[metric.order][1:k.markers.num])
metric.list <- c(metric.list, metric[metric.order][1:k.markers.num])
p.val.list <- c(p.val.list, p.val[metric.order][1:k.markers.num])
k.class <- c(k.class, rep(k, k.markers.num))
} else {
k.markers.num <- ifelse(length(classes)*markers.num > n.models, floor(n.models/length(classes)),
markers.num)
markers <- c(markers, model.names[metric.order][1:k.markers.num],
model.names[metric.order][(length(model.names) - k.markers.num +1):length(model.names)])
markers.descs <- c(markers.descs, model.descs[metric.order][1:k.markers.num],
model.descs[metric.order][(length(model.names) - k.markers.num + 1):length(model.names)])
metric.list <- c(metric.list, metric[metric.order][1:k.markers.num],
metric[metric.order][(length(model.names) - k.markers.num + 1):length(model.names)])
p.val.list <- c(p.val.list, p.val[metric.order][1:k.markers.num],
p.val[metric.order][(length(model.names) - k.markers.num + 1):length(model.names)])
k.class <- c(k.class, rep(k, k.markers.num), rep(paste("not", k), k.markers.num))
}
}
V3 <- m2[markers,]
print(V3)
print(markers)
if (show.desc == T) {
model.descs2 <- paste(metric.list, p.val.list, k.class, markers.descs)
} else {
model.descs2 <- paste(metric.list, p.val.list)
}
height <- ifelse(length(markers) + n.phen >= 9, 10, (length(markers) + n.phen)*0.44 + 5)
# char.res <- 0.0085 * length(markers) + 0.65
# Sort markers inside each phenotype class
if(sort.expression == T) {
for (j in unique(k.phen.labels)) {
V4 <- V3[ , k.phen.labels == j]
sn <- sample.names2[k.phen.labels == j]
if (is.vector(cls.labels)) {
clab <- cls.labels2[k.phen.labels == j]
clis <- cls.list2[k.phen.labels == j]
} else {
clab <- cls.labels2[, k.phen.labels == j]
clis <- cls.list2[, k.phen.labels == j]
}
l.phen <- sum(k.phen.labels == j)
if (l.phen > 1) {
dist.matrix <- dist(t(V4))
HC <- hclust(dist.matrix, method="complete")
HC.order <- HC$order
V4 <- V4[ , HC.order]
sn <- sn[HC.order]
if (is.vector(cls.labels2)) {
clab <- clab[HC.order]
clis <- clis[HC.order]
} else {
clab <- clab[, HC.order]
clis <- clis[, HC.order]
}
}
V3[ , k.phen.labels == j] <- V4
sample.names2[k.phen.labels == j] <- sn
if (is.vector(cls.labels2)) {
cls.labels2[k.phen.labels == j] <- clab
cls.list2[k.phen.labels == j] <- clis
} else {
cls.labels2[, k.phen.labels == j] <- clab
cls.list2[, k.phen.labels == j] <- clis
}
}
}
x <- rbind(sample.names2, cls.list2, cls.labels2)
print(paste("inside loop after in-class sort phen=", k.phen))
print(x)
print(cls.phen)
print(phen.names)
# Recompute cls.phen and cls.labels2 as order may have changed
cls.phen2 <- list(NULL)
if (is.vector(cls.labels2)) {
classes <- unique(cls.list2)
cls.phen2 <- classes
cls.labels2 <- match(cls.list2, cls.phen2)
} else {
for (kk in 1:length(cls.list2[, 1])) {
classes <- unique(cls.list2[kk,])
cls.phen2[[kk]] <- classes
cls.labels2[kk,] <- match(cls.list2[kk,], cls.phen2[[kk]])
}
}
x <- rbind(sample.names2, cls.list2, cls.labels2)
print(paste("inside loop after cls.phen renorm phen=", k.phen))
print(cls.phen2)
print(phen.names)
library(gmodels)
if (!is.vector(cls.labels2)) {
if (sort.phenotypes == T) {
phen.score <- vector(length=n.phen, mode="numeric")
for (k.lab in 1:n.phen) {
tab <- table(as.vector(cls.list2[k.lab,]), k.phen.list)
print(tab)
# phen.score[k.lab] <- 1 - chisq.test(tab)$p.value
# phen.score[k.lab] <- 1 - fisher.test(tab)$p.value
if ((length(tab[,1]) > 1) && (length(tab[1,]) > 1)) {
CT <- CrossTable(tab, chisq=T)
phen.score[k.lab] <- CT$chisq$p.value
print(phen.score[k.lab])
} else {
phen.score[k.lab] <- 0.50
print(phen.score[k.lab])
}
}
phen.order <- order(phen.score, decreasing= T)
print(phen.order)
cls.labels2 <- cls.labels2[phen.order,]
cls.phen2 <- cls.phen2[phen.order]
phen.names2 <- phen.names[phen.order]
main.string <- paste(test.file.prefix, " - ", phen.names2[n.phen], markers.metric, " order")
} else {
phen.names2 <- phen.names
main.string <- paste(test.file.prefix, " - ", phen.names2[k.phen], markers.metric, " order")
}
} else {
phen.names2 <- phen.names[1]
main.string <- paste(test.file.prefix, " - ", phen.names2, markers.metric, " order")
}
# windows(width=15, height=height)
x <- rbind(sample.names2, cls.list2, cls.labels2)
print(paste("inside loop after phen sort before figure phen=", k.phen))
print(x)
print(cls.phen2)
print(phen.names2)
phen.list <- unlist(cls.phen2)
colors.list <- cls.phen.colors[match(phen.list, cls.phen.index)]
print(rbind(phen.list, colors.list))
if (show.desc == T) {
markers <- paste(markers, seq(1, length(markers)), sep="_")
}
MSIG.HeatMapPlot.7(V = V3, row.names = markers,
row.names2 = model.descs2, col.labels = cls.labels2,
col.classes = cls.phen2, phen.cmap = colors.list, phen.names = phen.names2,
col.names = sample.names2, main = main.string, xlab=" ", ylab=" ",
row.norm = row.norm,
cmap.type = cmap.type, char.rescale = char.res, legend=legend)
V3 <- data.frame(V3)
colnames(V3) <- sample.names2
row.names(V3) <- markers
if (!is.null(markers.file)) {
write.gct(gct.data.frame = V3, descs = model.descs2, filename = markers.file)
}
} # end loop over phenotypes
dev.off()
}
OPAM.sort.projection.by.score.2 <- function(
input.ds,
input.cls,
results.dir,
normalize.score = T,
normalization.type = "zero.one",
model,
target.phen = NA,
target.class = NA,
user.colors = NA,
decreasing.order = T,
output.dataset = NA,
char.rescale = 1,
cmap.type = 3,
row.norm = T)
{
library(gtools)
library(verification)
library(ROCR)
library(MASS)
library(RColorBrewer)
library(heatmap.plus)
dataset <- MSIG.Gct2Frame(filename = input.ds) # Read gene expression dataset (GCT format)
m <- data.matrix(dataset$ds)
model.names <- dataset$row.names
model.descs <- dataset$descs
Ns <- length(m[1,])
dim(m)
sample.names <- dataset$names
n.models <- length(m[,1])
temp <- strsplit(input.ds, split="/") # Extract test file name
s <- length(temp[[1]])
test.file.name <- temp[[1]][s]
temp <- strsplit(test.file.name, split=".gct")
test.file.prefix <- temp[[1]][1]
char.res <- 0.013 * n.models + 0.65
# normalize scores
if (normalize.score == T) {
if (normalization.type == "zero.one") {
for (i in 1:n.models) {
m[i,] <- (m[i,] - min(m[i,]))/(max(m[i,]) - min(m[i,]))
}
} else if (normalization.type == "z.score") {
for (i in 1:n.models) {
m[i,] <- (m[i,] - mean(m[i,]))/sd(m[i,])
}
} else if (normalization.type == "r.z.score") {
for (i in 1:n.models) {
m[i,] <- (m[i,] - median(m[i,]))/mad(m[i,])
}
}
}
CLS <- MSIG.ReadPhenFile.2(file = input.cls) # Read phenotype file (CLS format)
cls.labels <- CLS$class.v
cls.phen <- CLS$phen
cls.list <- CLS$class.list
if (is.vector(cls.labels)) {
n.phen <- 1
} else {
n.phen <- length(cls.labels[,1])
}
if (!is.na(user.colors[1])) {
c.test <- user.colors
} else {
if (!is.null(CLS$col.phen)) {
c.test <- CLS$col.phen
} else {
c.test <- c(brewer.pal(n=7, name="Set2"), brewer.pal(n=7, name="Dark2"), brewer.pal(n=7, name="Set1"),
brewer.pal(n=8, name="Accent"), brewer.pal(n=9, name="Spectral"), brewer.pal(n=8, name="Set3"),
brewer.pal(n=8, name="BuGn"),
brewer.pal(n=7, name="Set2"), brewer.pal(n=7, name="Dark2"), brewer.pal(n=7, name="Set1"),
brewer.pal(n=8, name="Accent"), brewer.pal(n=10, name="Spectral"), brewer.pal(n=8, name="Set3"),
brewer.pal(n=8, name="BuGn"),
brewer.pal(n=7, name="Set2"), brewer.pal(n=7, name="Dark2"), brewer.pal(n=7, name="Set1"),
brewer.pal(n=8, name="Accent"), brewer.pal(n=10, name="Spectral"), brewer.pal(n=8, name="Set3"),
brewer.pal(n=8, name="BuGn"))
}
}
if (!is.null(CLS$phen.names)) {
phen.names <- CLS$phen.names
} else {
phen.names <- "NA"
}
cls.phen.index <- unlist(cls.phen)
cls.phen.colors <- c.test[1:length(cls.phen.index)]
n.classes <- vector(length=n.phen, mode="numeric")
if (n.phen == 1) {
max.classes <- length(cls.phen)
n.classes[1] <- max.classes
} else {
max.classes <- max(unlist(lapply(cls.phen, FUN=length)))
for (i in 1:n.phen) {
n.classes[i] <- length(cls.phen[[i]])
}
}
filename <- paste(results.dir, test.file.prefix, ".SORT.PROJ", sep="")
# pdf(file=paste(filename, ".pdf", sep=""), height = 11, width = 9)
pdf(file=paste(filename, ".pdf", sep=""), height = 8.5, width = 11)
# windows(width=12, height=8)
loc <- match(model, model.names)
print(c("loc:", loc))
s.order <- order(m[loc,], decreasing = decreasing.order)
m2 <- m[, s.order]
sample.names2 <- sample.names[s.order]
if (is.vector(cls.labels)) {
cls.labels2 <- cls.labels[s.order]
cls.list2 <- cls.list[s.order]
} else {
cls.labels2 <- cls.labels[, s.order]
cls.list2 <- cls.list[, s.order]
}
# Recompute cls.phen and cls.labels2 as order may have changed
cls.phen2 <- NULL
if (is.vector(cls.labels)) {
classes <- unique(cls.list2)
cls.phen2 <- classes
cls.labels2 <- match(cls.list2, cls.phen2)
} else {
for (kk in 1:length(cls.list2[, 1])) {
classes <- unique(cls.list2[kk,])
# cls.phen2[[kk]] <- classes
cls.phen2 <- c(cls.phen2, classes)
cls.labels2[kk,] <- match(cls.list2[kk,], classes)
}
}
correl <- cor(t(m2))[, loc]
m.order <- order(correl, decreasing=T)
correl2 <- correl[m.order]
m2 <- m2[m.order,]
model.names2 <- model.names[m.order]
model.descs2 <- paste(model.descs[m.order], signif(correl2, digits=3))
phen.list <- unlist(cls.phen2)
colors.list <- cls.phen.colors[match(phen.list, cls.phen.index)]
if (!is.na(target.phen)) {
bin.class <- ifelse(cls.list2[target.phen,] == target.class, 1, 0)
} else {
bin.class <- ifelse(cls.list2[1,] == cls.list2[1,1], 1, 0)
}
sample.names2 <- paste(sample.names, bin.class, sep="_")
print(bin.class)
print(paste("n models:", n.models))
for (i in 1:n.models) {
print(paste(i, model.names2[i]))
m.score <- m2[i,]
m.score.norm <- (m.score - min(m.score))/(max(m.score) - min(m.score))
print(m.score.norm)
if (length(unique(bin.class)) > 1) {
perf.auc <- roc.area(bin.class, m.score.norm)
roc <- signif(perf.auc$A, digits=3)
p.val <- signif(perf.auc$p.value, digits=3)
} else {
roc <- p.val <- "-"
}
print(paste("ROC=", roc, " p-val=", p.val))
model.descs2[i] <- paste(roc, " (", p.val, ")")
}
MSIG.HeatMapPlot.7(V = m2, row.names = model.names2,
row.names2 = model.descs2, col.labels = cls.labels2,
col.classes = cls.phen2, phen.cmap = colors.list, phen.names = phen.names,
col.names = sample.names2, main = " ", xlab=" ", ylab=" ", row.norm = row.norm,
cmap.type = cmap.type, char.rescale = char.rescale, legend=T)
dev.off()
if (!is.na(output.dataset)) {
V.GCT <- data.frame(m2)
colnames(V.GCT) <- sample.names2
row.names(V.GCT) <- model.names2
write.gct.2(gct.data.frame = V.GCT, descs = model.descs2, filename =output.dataset)
}
}
OPAM.sort.projection.by.score.3 <- function(
input.ds,
input.cls,
results.dir,
normalize.score = T,
normalization.type = "zero.one",
target.phen = NA,
target.class = NA,
user.colors = NA,
decreasing.order = T,
output.dataset = NA,
char.rescale = 1,
cmap.type = 3,
row.norm = T)
# Calls MSIG.HeatMapPlot.8 and makes a plot sorted by the highest-scoring
# signatures and abnormalities (gene mutations or copy number alterations)
# i.e. doesn't require a "model" to score by as OPAM.sort.projection.by.score.2 does.
{
library(gtools)
library(verification)
library(ROCR)
library(MASS)
library(RColorBrewer)
library(heatmap.plus)
dataset <- MSIG.Gct2Frame(filename = input.ds) # Read gene expression dataset (GCT format)
m <- data.matrix(dataset$ds)
model.names <- dataset$row.names
# model.descs <- dataset$descs
Ns <- length(m[1,])
dim(m)
sample.names <- dataset$names
n.models <- length(m[,1])
temp <- strsplit(input.ds, split="/") # Extract test file name
s <- length(temp[[1]])
test.file.name <- temp[[1]][s]
temp <- strsplit(test.file.name, split=".gct")
test.file.prefix <- temp[[1]][1]
char.res <- 0.013 * n.models + 0.65
# normalize scores
if (normalize.score == T) {
if (normalization.type == "zero.one") {
for (i in 1:n.models) {
m[i,] <- (m[i,] - min(m[i,]))/(max(m[i,]) - min(m[i,]))
}
} else if (normalization.type == "z.score") {
for (i in 1:n.models) {
m[i,] <- (m[i,] - mean(m[i,]))/sd(m[i,])
}
} else if (normalization.type == "r.z.score") {
for (i in 1:n.models) {
m[i,] <- (m[i,] - median(m[i,]))/mad(m[i,])
}
}
}
CLS <- MSIG.ReadPhenFile.2(file = input.cls) # Read phenotype file (CLS format)
cls.labels <- CLS$class.v
cls.phen <- CLS$phen
cls.list <- CLS$class.list
# browser()
if (is.vector(cls.labels)) {
n.phen <- 1
} else {
n.phen <- length(cls.labels[,1])
}
if (!is.na(user.colors[1])) {
c.test <- user.colors
} else {
if (!is.null(CLS$col.phen)) {
c.test <- CLS$col.phen
} else {
c.test <- c(brewer.pal(n=7, name="Set2"), brewer.pal(n=7, name="Dark2"), brewer.pal(n=7, name="Set1"),
brewer.pal(n=8, name="Accent"), brewer.pal(n=9, name="Spectral"), brewer.pal(n=8, name="Set3"),
brewer.pal(n=8, name="BuGn"),
brewer.pal(n=7, name="Set2"), brewer.pal(n=7, name="Dark2"), brewer.pal(n=7, name="Set1"),
brewer.pal(n=8, name="Accent"), brewer.pal(n=10, name="Spectral"), brewer.pal(n=8, name="Set3"),
brewer.pal(n=8, name="BuGn"),
brewer.pal(n=7, name="Set2"), brewer.pal(n=7, name="Dark2"), brewer.pal(n=7, name="Set1"),
brewer.pal(n=8, name="Accent"), brewer.pal(n=10, name="Spectral"), brewer.pal(n=8, name="Set3"),
brewer.pal(n=8, name="BuGn"))
}
}
if (!is.null(CLS$phen.names)) {
phen.names <- CLS$phen.names
} else {
phen.names <- "NA"
}
cls.phen.index <- unlist(cls.phen)
cls.phen.colors <- c.test[1:length(cls.phen.index)]
print("cls.phen.colors:")
print(cls.phen.colors)
n.classes <- vector(length=n.phen, mode="numeric")
if (n.phen == 1) {
max.classes <- length(cls.phen)
n.classes[1] <- max.classes
} else {
max.classes <- max(unlist(lapply(cls.phen, FUN=length)))
for (i in 1:n.phen) {
n.classes[i] <- length(cls.phen[[i]])
}
}
filename <- paste(results.dir, test.file.prefix, ".SORT.PROJ", sep="")
# pdf(file=paste(filename, ".pdf", sep=""), height = 11, width = 9)
pdf(file=paste(filename, ".pdf", sep=""), height = 11, width = 17 )
# windows(width=12, height=8)
roc.list = vector( length=n.models, mode="numeric" )
p.val.list = vector( length=n.models, mode="numeric" )
if (!is.na(target.phen)) {
bin.class <- ifelse(cls.list[target.phen,] == target.class, 1, 0)
} else {
bin.class <- ifelse(cls.list[1,] == cls.list2[1,1], 1, 0)
}
# browser()
model.descs2 = vector(length = n.models, mode="character")
for (i in 1:n.models) {
m.score <- m[i,]
m.score.norm <- (m.score - min(m.score))/(max(m.score) - min(m.score))
# browser()
if (length(unique(bin.class)) > 1) {
perf.auc <- roc.area(bin.class, m.score.norm)
roc <- signif(perf.auc$A, digits=3)
p.val <- signif(perf.auc$p.value, digits=3)
} else {
roc <- p.val <- "-"
}
print(paste("ROC=", roc, " p-val=", p.val))
roc.list[i] = roc
p.val.list[i] = p.val
model.descs2[i] <- paste(roc, " (", p.val, ")")
}
# browser()
m.order = order(roc.list, decreasing=TRUE)
model.descs2 = model.descs2[m.order]
loc = m.order[1]
m2 <- m[m.order, ]
model.names <- model.names[m.order]
# loc <- match(model, model.names)
print(c("loc:", loc))
s.order <- order(m[loc,], decreasing = TRUE)
m2 <- m2[, s.order]
sample.names2 <- sample.names[s.order]
# if (is.vector(cls.labels)) {
# cls.labels2 <- cls.labels[s.order]
# cls.list2 <- cls.list[s.order]
# } else {
# cls.labels2 <- cls.labels[, s.order]
# cls.list2 <- cls.list[, s.order]
# }
# Recompute cls.phen and cls.labels2 as order may have changed
# cls.phen2 <- list(NULL)
# if (is.vector(cls.labels)) {
# classes <- unique(cls.list2)
# cls.phen2 <- classes
# cls.labels2 <- match(cls.list2, cls.phen2)
# } else {
# for (kk in 1:length(cls.list2[, 1])) {
# classes <- unique(cls.list2[kk,])
# cls.phen2[[kk]] <- classes
# cls.labels2[kk,] <- match(cls.list2[kk,], cls.phen2[[kk]])
# }
# }
# browser()
if (is.vector(cls.labels)) {
cls.labels2 <- cls.labels[s.order]
cls.list2 <- cls.list[s.order]
} else {
cls.labels2 <- cls.labels[, s.order]
cls.list2 <- cls.list[, s.order]
}
#browser()
# browser()
m.score <- m2[1,]
m.score.norm <- (m.score - min(m.score))/(max(m.score) - min(m.score))
roc.list = vector(mode="numeric", length=n.phen)
phen.descs = vector(mode="character", length=n.phen)
for( i in 1:n.phen ){
bin.gene = ifelse( cls.list2[i,]=="WT", 0, 1)
if (length(unique(bin.gene)) > 1) {
perf.auc <- roc.area(bin.gene, m.score.norm)
roc <- signif(perf.auc$A, digits=3)
p.val <- signif(perf.auc$p.value, digits=3)
} else {
roc <- "-"
p.val <- "-"
}
print(paste("ROC=", roc, " p-val=", p.val))
roc.list[i] = roc
# p.val.list[i] = p.val
phen.descs[i] <- paste(roc, " (", p.val, ")")
}
# browser()
g.order = c(1, 2, order(roc.list[3:n.phen], decreasing=TRUE)+2) # skip PATHWAY.MUT and COPY.NUMBER
phen.descs2 = phen.descs[g.order][1:40]
cls.list2= cls.list2[g.order,][1:40,]
phen.names = phen.names[g.order][1:40]
# Recompute cls.list2 as some mutations or copy numbers may have been removed
# Recompute cls.phen and cls.labels2 as order may have changed
cls.phen2 <- NULL
if (is.vector(cls.labels)) {
classes <- unique(cls.list2)
cls.phen2 <- classes
cls.labels2 <- match(cls.list2, cls.phen2)
} else {
for (kk in 1:length(cls.list2[, 1])) {
classes <- unique(cls.list2[kk,])
# cls.phen2[[kk]] <- classes
cls.phen2 <- c(cls.phen2, classes)
cls.labels2[kk,] <- match(cls.list2[kk,], classes)
}
}
cls.labels2 = cls.labels2[1:40,]
# browser()
# correl <- cor(t(m2))[, loc]
# m.order <- order(correl, decreasing=decreasing.order)
# correl2 <- correl[m.order]
# model.descs2 <- paste(model.descs[m.order], signif(correl2, digits=3))
phen.list <- unlist(cls.phen2)
# colors.list <- ifelse(unlist(cls.phen2) == target.class,
# ifelse(unlist(cls.phen2) == "DEL" | unlist(cls.phen2) == "AMP",
# ifelse(unlist(cls.phen2) == "DEL", cls.phen.colors[3], cls.phen.colors[4]), cls.phen.colors[1]), cls.phen.colors[2])
colors.list = rep( "gray", length(phen.list))
colors.list[phen.list=="MUT"] = cls.phen.colors[1]
colors.list[phen.list=="DEL"] = cls.phen.colors[3]
colors.list[phen.list=="AMP"] = cls.phen.colors[4]
colors.list[phen.list=="ALT"] = cls.phen.colors[5]
# roc.list = vector( length=n.models, mode="numeric" )
# p.val.list = vector( length=n.models, mode="numeric" )
#
# if (!is.na(target.phen)) {
# bin.class <- ifelse(cls.list2[target.phen,] == target.class, 1, 0)
# } else {
# bin.class <- ifelse(cls.list2[1,] == cls.list2[1,1], 1, 0)
# }
## browser()
# for (i in 1:n.models) {
# m.score <- m2[i,]
# m.score.norm <- (m.score - min(m.score))/(max(m.score) - min(m.score))
## browser()
# if (length(unique(bin.class)) > 1) {
# perf.auc <- roc.area(bin.class, m.score.norm)
# roc <- signif(perf.auc$A, digits=3)
# p.val <- signif(perf.auc$p.value, digits=3)
# } else {
# roc <- NA
# p.val <- NA
# }
# print(paste("ROC=", roc, " p-val=", p.val))
# roc.list[i] = roc
# p.val.list[i] = p.val
# model.descs2[i] <- paste(roc, " (", p.val, ")")
# }
# m.order = order(roc.list, decreasing=TRUE)
# loc = which(m.order==1)
## correl <- cor(t(m2))[, loc]
# print(c("loc:", loc))
# s.order <- order(m[loc,], decreasing = decreasing.order)
# m2 <- m[, s.order]
# cls.phen2 = cls.phen2[s.order]
# cls.labels2 = cls.labels2[s.order]
#
# correl2 <- correl[m.order]
# m2 = m2[m.order,]
# model.names2 = model.names2[m.order]
# model.descs2 = model.descs2[m.order]
print("cls.phen2:")
print(unlist(cls.phen2))
print("cls.phen:")
print(unlist(cls.phen))
print("colors.list:")
print(colors.list)
# browser()
MSIG.HeatMapPlot.8(V = m2, row.names = model.names,
row.names2 = model.descs2,
col.labels = cls.labels2,
col.classes = cls.phen2, phen.cmap = colors.list, phen.names = phen.names,
phen.names2 = phen.descs2,
col.names = sample.names2, main = " ", xlab=" ", ylab=" ", row.norm = row.norm,
cmap.type = cmap.type, char.rescale = char.rescale, legend=F)
dev.off()
if (!is.na(output.dataset)) {
V.GCT <- m2
colnames(V.GCT) <- sample.names2
row.names(V.GCT) <- model.names2
write.gct(gct.data.frame = V.GCT, descs = model.descs2, filename =output.dataset)
}
}
OPAM.sort.projection.by.score.4 <- function(
input.ds,
input.cls,
tissue = "NA",
results.dir,
normalize.score = T,
normalization.type = "zero.one",
model = "NA",
target.phen = NA,
target.class = NA,
user.colors = NA,
decreasing.order = T,
output.dataset = NA,
char.rescale = 1,
cmap.type = 3,
row.norm = T,
u.gene.names.known = "NA"
)
# Calls MSIG.HeatMapPlot.8 and makes a plot sorted by the highest-scoring
# signatures and abnormalities (gene mutations or copy number alterations)
# i.e. doesn't require a "model" to score by as OPAM.sort.projection.by.score.2 does.
# However, it *will* use "model" if it cannot calculate p-values on the gene signatures, which
# happens when every cell line has a genomic aberration.
#
# Runs 3 passes on the data:
# 1st pass: looks at the genes and copy number alterations specified by u.gene.names.known
# 2nd pass: looks at only the top abnormalities (using a p-value cutoff) from the 1st pass, and adjusts
# the PATHWAY.MUT vector accordingly (only according to the genes, not by copy number data)
# 3rd pass: Takes the winning signature from the 2nd pass and then looks all the genes available
{
library(gtools)
library(verification)
library(ROCR)
library(MASS)
library(RColorBrewer)
library(heatmap.plus)
dataset <- MSIG.Gct2Frame(filename = input.ds) # Read gene expression dataset (GCT format)
m <- data.matrix(dataset$ds)
model.names <- dataset$row.names
# model.descs <- dataset$descs
Ns <- length(m[1,])
dim(m)
sample.names <- dataset$names
n.models <- length(m[,1])
temp <- strsplit(input.ds, split="/") # Extract test file name
s <- length(temp[[1]])
test.file.name <- temp[[1]][s]
temp <- strsplit(test.file.name, split=".gct")
test.file.prefix <- temp[[1]][1]
char.res <- 0.013 * n.models + 0.65
# normalize scores
if (normalize.score == T) {
if (normalization.type == "zero.one") {
for (i in 1:n.models) {
m[i,] <- (m[i,] - min(m[i,]))/(max(m[i,]) - min(m[i,]))
}
} else if (normalization.type == "z.score") {
for (i in 1:n.models) {
m[i,] <- (m[i,] - mean(m[i,]))/sd(m[i,])
}
} else if (normalization.type == "r.z.score") {
for (i in 1:n.models) {
m[i,] <- (m[i,] - median(m[i,]))/mad(m[i,])
}
}
}
CLS <- MSIG.ReadPhenFile.2(file = input.cls) # Read phenotype file (CLS format)
cls.labels <- CLS$class.v
cls.phen <- CLS$phen
cls.list <- CLS$class.list
if (is.vector(cls.labels)) {
n.phen <- 1
} else {
n.phen <- length(cls.labels[,1])
}
if (!is.na(user.colors[1])) {
c.test <- user.colors
} else {
if (!is.null(CLS$col.phen)) {
c.test <- CLS$col.phen
} else {
c.test <- c(brewer.pal(n=7, name="Set2"), brewer.pal(n=7, name="Dark2"), brewer.pal(n=7, name="Set1"),
brewer.pal(n=8, name="Accent"), brewer.pal(n=9, name="Spectral"), brewer.pal(n=8, name="Set3"),
brewer.pal(n=8, name="BuGn"),
brewer.pal(n=7, name="Set2"), brewer.pal(n=7, name="Dark2"), brewer.pal(n=7, name="Set1"),
brewer.pal(n=8, name="Accent"), brewer.pal(n=10, name="Spectral"), brewer.pal(n=8, name="Set3"),
brewer.pal(n=8, name="BuGn"),
brewer.pal(n=7, name="Set2"), brewer.pal(n=7, name="Dark2"), brewer.pal(n=7, name="Set1"),
brewer.pal(n=8, name="Accent"), brewer.pal(n=10, name="Spectral"), brewer.pal(n=8, name="Set3"),
brewer.pal(n=8, name="BuGn"))
}
}
if (!is.null(CLS$phen.names)) {
phen.names <- CLS$phen.names
} else {
phen.names <- "NA"
}
cls.phen.index <- unlist(cls.phen)
cls.phen.colors <- c.test[1:length(cls.phen.index)]
# print("cls.phen.colors:")
# print(cls.phen.colors)
n.classes <- vector(length=n.phen, mode="numeric")
if (n.phen == 1) {
max.classes <- length(cls.phen)
n.classes[1] <- max.classes
} else {
max.classes <- max(unlist(lapply(cls.phen, FUN=length)))
for (i in 1:n.phen) {
n.classes[i] <- length(cls.phen[[i]])
}
}
print("--- Begin Pass 1 ---")
# model.names.original = model.names
# m.original = m
phen.pass1 = c( "PATHWAY.MUT", u.gene.names.known)
n.phen.pass1 = length(u.gene.names.known)+1
ind.phen.pass1 = which( phen.names %in% phen.pass1 )
phen.pass1 = phen.names[ind.phen.pass1]
roc.list.pass1 = vector( length=n.models, mode="numeric" )
p.val.list.pass1 = vector( length=n.models, mode="numeric" )
cls.list.pass1 = cls.list[ind.phen.pass1,]
cls.list.pass1.2 = ifelse(cls.list.pass1 == "WT", 0, 1)
cls.labels.pass1 = cls.labels[ind.phen.pass1,]
# browser()
if (!is.na(target.phen) && length(phen.pass1) > 2 ) {
# bin.class.pass1 = apply( cls.list2.pass1.2[3:n.phen,], MARGIN=2, FUN=sum)/(n.phen-2)
# bin.class.pass1 = ( bin.class.pass1 - min(bin.class.pass1))/(max(bin.class.pass1) - min(bin.class.pass1))
# bin.class.pass1 <- ifelse(cls.list[target.phen,] == target.class, 1, 0)
bin.class.pass1 <- ifelse(apply( cls.list.pass1.2[2:n.phen.pass1,], MARGIN=2, FUN=sum) > 0, 1, 0)
cls.labels.pass1[1,] = bin.class.pass1
cls.list.pass1[1,] = ifelse(bin.class.pass1 == 1, "MUT", "WT")
# if( length(unique(bin.class.pass1)) == 1) {
# cls.list.3 = ifelse( cls.list == "DEL" | cls.list == "AMP", 1, 0)
# copy.number.pass1 = ifelse( apply(cls.list.3[3:n.phen,], MARGIN=2, FUN=sum) >= 1, "ALT", "WT")
# copy.class.pass1 = ifelse( copy.number.pass1 == "ALT", 1, 0)
# bin.class.pass1 = copy.class.pass1
# print( "Calculating p-value with respect to copy number alterations")
# }
} else if (length(phen.pass1)==2 ) {
bin.class.pass1 = ifelse(cls.list[2,]== "WT", 0,1)
cls.labels.pass1[1,] = bin.class.pass1
cls.list.pass1[1,] = ifelse(bin.class.pass1 == 1, "MUT", "WT")
} else {
bin.class.pass1 <- ifelse(cls.list[1,] == cls.list2[1,1], 1, 0)
}
# browser()
#pdf("ROCplots.pdf")
model.descs2.pass1 = vector(length = n.models, mode="character")
for (i in 1:n.models) {
m.score <- m[i,]
m.score.norm <- (m.score - min(m.score))/(max(m.score) - min(m.score))
# browser()
if (length(unique(bin.class.pass1)) > 1) {
perf.auc <- roc.area(bin.class.pass1, m.score.norm)
roc <- signif(perf.auc$A, digits=3)
p.val <- signif(perf.auc$p.value, digits=3)
roc.list.pass1[i] = perf.auc$A
p.val.list.pass1[i] = perf.auc$p.value
roc.plot(bin.class.pass1, m.score.norm)
} else {
roc <- p.val <- "-"
roc.list.pass1[i] = NA
p.val.list.pass1[i] = NA
}
print(paste(format(rownames(m)[i], width=30), "ROC=", roc, " p-val=", p.val))
model.descs2.pass1[i] <- paste(roc, " (", p.val, ")")
}
dev.off()
# browser()
if( is.na(roc.list.pass1[1]) ){
loc <- match(model, model.names)
s.order.pass1 <- order(m[loc,], decreasing = decreasing.order)
# loc = s.order.pass1[1]
m2.pass1 <- m[, s.order.pass1]
correl <- cor(t(m2.pass1))[, loc]
m.order.pass1 <- order(correl, decreasing=T)
m2.pass1 <- m2.pass1[m.order.pass1, ]
} else{
m.order.pass1 = order(roc.list.pass1, decreasing=TRUE, na.last=TRUE)
m2.pass1 <- m[m.order.pass1, ]
s.order.pass1 <- order(m2.pass1[1,], decreasing = TRUE )
m2.pass1 <- m2.pass1[, s.order.pass1]
}
# m2.pass1 <- m2.pass1[m.order.pass1, ]
model.descs2.pass1 = model.descs2.pass1[m.order.pass1]
sample.names2.pass1 <- colnames(m2.pass1)
model.names.pass1 <- rownames(m2.pass1)
if (is.vector(cls.labels)) {
cls.labels2.pass1 <- cls.labels.pass1[s.order.pass1]
cls.list2.pass1 <- cls.list.pass1[s.order.pass1]
} else {
cls.labels2.pass1 <- cls.labels.pass1[, s.order.pass1]
cls.list2.pass1 <- cls.list.pass1[, s.order.pass1]
}
#browser()
# browser()
m.score.pass1 <- m2.pass1[1,]
m.score.norm.pass1 <- (m.score.pass1 - min(m.score.pass1))/(max(m.score.pass1) - min(m.score.pass1))
roc.list.phen.pass1 = vector(mode="numeric", length=n.phen)
p.val.list.phen.pass1 = vector(mode="numeric", length=n.phen)
phen.descs.pass1 = vector(mode="character", length=n.phen)
for( i in 1:n.phen.pass1 ){
bin.gene = ifelse( cls.list2.pass1[i,]=="WT", 0, 1)
if (length(unique(bin.gene)) > 1) {
perf.auc <- roc.area(bin.gene, m.score.norm.pass1)
# if( perf.auc$A < 0.5 ){
## browser()
# roc = signif(1 - perf.auc$A, digits=3)
# p.val = signif(1- perf.auc$p.val, digits=3)
# abnormality = unique(cls.list2.pass1[i,])[which(unique(cls.list2.pass1[i,]) != "WT")]
## cls.list2.pass1[i,] = ifelse( cls.list2.pass1[i,] == "WT", abnormality, "WT" )
# phen.names[i] = paste(phen.names[i], "-opposite", sep="")
# roc.list.phen.pass1[i] = 1 - perf.auc$A
# p.val.list.phen.pass1[i] = perf.auc$p.val # Don't want to use these "opposite" genomic aberrations in Pass 2
# # because they make PATHWAY.MUT+COPY.NUMBER too dense
# } else{
roc <- signif(perf.auc$A, digits=3)
p.val <- signif(perf.auc$p.value, digits=3)
roc.list.phen.pass1[i] = perf.auc$A
p.val.list.phen.pass1[i] = perf.auc$p.val
# }
} else {
roc <- "-"
p.val <- "-"
roc.list.phen.pass1[i] = NA
p.val.list.phen.pass1[i] = NA
}
print(paste(format(phen.pass1[i], width=12), "ROC=", roc, " p-val=", p.val))
# p.val.list[i] = p.val
phen.descs.pass1[i] <- paste(roc, " (", p.val, ")")
}
# browser()
g.order.pass1 = c(1, order(roc.list.phen.pass1[2:n.phen.pass1], decreasing=TRUE, na.last=TRUE)+1) # keep PATHWAY.MUT as 1
roc.list.phen.pass1 = roc.list.phen.pass1[g.order.pass1]
p.val.list.phen.pass1 = p.val.list.phen.pass1[g.order.pass1]
phen.descs2.pass1 = phen.descs.pass1[g.order.pass1][1:n.phen.pass1]
cls.list2.pass1 = cls.list2.pass1[g.order.pass1,][1:n.phen.pass1,]
phen.names.pass1 = phen.pass1[g.order.pass1][1:n.phen.pass1]
# Recompute cls.list2 as some mutations or copy numbers may have been removed
# Recompute cls.phen and cls.labels2 as order may have changed
cls.phen2.pass1 <- NULL
if (is.vector(cls.labels)) {
classes <- unique(cls.list2.pass1)
cls.phen2.pass1 <- classes
cls.labels2.pass1 <- match(cls.list2.pass1, cls.phen2.pass1)
} else {
for (kk in 1:length(cls.list2.pass1[, 1])) {
classes <- unique(cls.list2.pass1[kk,])
# cls.phen2[[kk]] <- classes
cls.phen2.pass1 <- c(cls.phen2.pass1, classes)
cls.labels2.pass1[kk,] <- match(cls.list2.pass1[kk,], classes)
}
}
cls.labels2.pass1 = cls.labels2.pass1[1:n.phen.pass1,]
# browser()
# correl <- cor(t(m2))[, loc]
# m.order <- order(correl, decreasing=decreasing.order)
# correl2 <- correl[m.order]
# model.descs2 <- paste(model.descs[m.order], signif(correl2, digits=3))
phen.list.pass1 <- unlist(cls.phen2.pass1)
# colors.list <- ifelse(unlist(cls.phen2) == target.class,
# ifelse(unlist(cls.phen2) == "DEL" | unlist(cls.phen2) == "AMP",
# ifelse(unlist(cls.phen2) == "DEL", cls.phen.colors[3], cls.phen.colors[4]), cls.phen.colors[1]), cls.phen.colors[2])
colors.list.pass1 = rep( "gray", length(phen.list.pass1))
colors.list.pass1[phen.list.pass1=="MUT"] = cls.phen.colors[1]
colors.list.pass1[phen.list.pass1=="DEL"] = cls.phen.colors[3]
colors.list.pass1[phen.list.pass1=="AMP"] = cls.phen.colors[4]
colors.list.pass1[phen.list.pass1=="ALT"] = cls.phen.colors[5]
# print("cls.phen2:")
# print(unlist(cls.phen2))
#
# print("cls.phen:")
# print(unlist(cls.phen))
#
# print("colors.list:")
# print(colors.list)
# browser()
filename <- paste(results.dir, test.file.prefix, ".3Passes.ROC", sep="")
# pdf(file=paste(filename, ".pdf", sep=""), height = 11, width = 9)
pdf(file=paste(filename, ".pdf", sep=""), height = 11, width = 17 )
# windows(width=12, height=8)
MSIG.HeatMapPlot.9(V = m2.pass1, row.names = model.names.pass1,
row.names2 = model.descs2.pass1,
col.labels = cls.labels2.pass1,
col.classes = cls.phen2.pass1,
phen.cmap = colors.list.pass1, phen.names = phen.names.pass1,
phen.names2 = phen.descs2.pass1,
col.names = sample.names2.pass1, main = paste(tissue, "- Phase 1: Known KRAS Pathway Abnormalities (ROC)"),
xlab=" ", ylab=" ", row.norm = row.norm,
cmap.type = cmap.type, char.rescale = char.rescale, legend=F)
# dev.off()
# break
### Begin Pass 2 ###
print( "--- Begin Pass 2 ---")
# browser()
# p.val.threshold = 0.1
roc.threshold = 0.65
# ind.top.pval = which(p.val.list.phen.pass1[2:n.phen.pass1] <= p.val.threshold )+1
ind.top.roc = which(roc.list.phen.pass1[-1] >= roc.threshold)+1
if( length(ind.top.roc) > 0){
ind.roc.threshold = c(1, ind.top.roc)
}else{
roc.threshold = 0.6
ind.top.roc = which(roc.list.phen.pass1[-1] >= roc.threshold)+1
ind.roc.threshold = c(1, ind.top.roc)
}
# if( length(ind.top.pval) > 0 ){
# ind.p.val.threshold = c(1, ind.top.pval)
# } else if( length(ind.top.pval) < 1 ) {
# p.val.threshold = 0.15
# ind.top.pval = which(p.val.list.phen.pass1[2:n.phen.pass1] <= p.val.threshold )+1
# ind.p.val.threshold = c(1, ind.top.pval) }
# if( length(ind.top.pval) < 1){
# p.val.threshold = 0.2
# ind.top.pval = which(p.val.list.phen.pass1[2:n.phen.pass1] <= p.val.threshold )+1
# ind.p.val.threshold = c(1, ind.top.pval)
# }
# if( length(ind.top.pval) < 1){
# p.val.threshold = 0.25
# ind.top.pval = which(p.val.list.phen.pass1[2:n.phen.pass1] <= p.val.threshold )+1
# ind.p.val.threshold = c(1, ind.top.pval)
# }
# if( length(ind.top.pval) < 1){
# p.val.threshold = 0.3
# ind.top.pval = which(p.val.list.phen.pass1[2:n.phen.pass1] <= p.val.threshold )+1
# ind.p.val.threshold = c(1, ind.top.pval)
# }
# if( length( ind.top.pval) < 1 ) {
# ind.top = which(!is.na(p.val.list.phen.pass1[-1]))+1
# ind.p.val.threshold = c( 1, ind.top )
# }
n.phen.pass2 = length(ind.roc.threshold)
# phen.descs.pass2 = phen.descs2.pass1[1:n.phen.pass2]
cls.list2.pass2 = cls.list2.pass1[ind.roc.threshold,]
phen.names.pass2 = phen.names.pass1[ind.roc.threshold]
# phen.names.pass2[1] = "PATHWAY.MUT + COPY.NUMBER"
cls.labels2.pass2 = cls.labels2.pass1[ind.roc.threshold,]
# browser()
cls.list2.pass2.2 = ifelse( cls.list2.pass2 == "WT", 0, 1)
cls.list2.pass2.3 = ifelse( cls.list2.pass2 == "DEL" | cls.list2.pass2 == "AMP", 1, 0)
if( n.phen.pass2 > 2 ){
pathway.mut.pass2 = ifelse( apply(cls.list2.pass2.2[2:n.phen.pass2,], MARGIN=2, FUN=sum) >= 1, "MUT", "WT")
# copy.number.pass2 = ifelse( apply(cls.list2.pass2.3[3:n.phen.pass2,], MARGIN=2, FUN=sum) >= 1, "ALT", "WT")
} else{
pathway.mut.pass2 = ifelse( cls.list2.pass2.2[2,] == 1, "MUT", "WT")
# copy.number.pass2 = ifelse( cls.list2.pass2.3[3,] == 1, "ALT", "WT")
}
# browser()
bin.class.pass2 = ifelse( pathway.mut.pass2 == "MUT", 1, 0 )
# copy.class.pass2 = ifelse( copy.number.pass2 == "ALT", 1, 0)
# if( length(unique(bin.class.pass2)) == 1) {
# bin.class.pass2 = copy.class.pass2
# print( "Calculating p-value with respect to copy number alterations")
# }
cls.list2.pass2[1,] = pathway.mut.pass2
# cls.list2.pass2[2,] = copy.number.pass2
roc.list.pass2 = vector( length=n.models, mode="numeric" )
p.val.list.pass2 = vector( length=n.models, mode="numeric" )
# browser()
model.descs2.pass2 = vector(length = n.models, mode="character")
for (i in 1:n.models) {
m.score <- m2.pass1[i,]
m.score.norm <- (m.score - min(m.score))/(max(m.score) - min(m.score))
# browser()
if (length(unique(bin.class.pass2)) > 1) {
perf.auc <- roc.area(bin.class.pass2, m.score.norm)
roc <- signif(perf.auc$A, digits=3)
p.val <- signif(perf.auc$p.value, digits=3)
roc.list.pass2[i] = perf.auc$A
p.val.list.pass2[i] = perf.auc$p.value
} else {
roc <- p.val <- "-"
roc.list.pass2[i] = NA
p.val.list.pass2[i] = NA
}
print(paste(format(rownames(m2.pass1)[i], width=30), "ROC=", roc, " p-val=", p.val))
model.descs2.pass2[i] <- paste(roc, " (", p.val, ")")
}
# browser()
m.order.pass2 = order(roc.list.pass2, decreasing=TRUE, na.last=TRUE)
model.descs2.pass2 = model.descs2.pass2[m.order.pass2]
# loc.pass2 = m.order.pass2[1]
m2.pass2 <- m2.pass1[m.order.pass2, ]
model.names.pass2 <- rownames(m2.pass2)
# print(c("loc.pass2:", loc.pass2))
s.order.pass2 <- order(m2.pass2[1,], decreasing = TRUE)
m2.pass2 <- m2.pass2[, s.order.pass2]
sample.names2.pass2 <- colnames(m2.pass2)
if (is.vector(cls.labels)) {
cls.labels2.pass2 <- cls.labels2.pass2[s.order.pass2]
cls.list2.pass2 <- cls.list2.pass2[s.order.pass2]
} else {
cls.labels2.pass2 <- cls.labels2.pass2[, s.order.pass2]
cls.list2.pass2 <- cls.list2.pass2[, s.order.pass2]
}
# browser()
# browser()
m.score.pass2 <- m2.pass2[1,]
m.score.norm.pass2 <- (m.score.pass2 - min(m.score.pass2))/(max(m.score.pass2) - min(m.score.pass2))
roc.list.phen.pass2 = vector(mode="numeric", length=n.phen.pass2)
phen.descs.pass2 = vector(mode="character", length=n.phen.pass2)
for( i in 1:n.phen.pass2 ){
bin.gene = ifelse( cls.list2.pass2[i,]=="WT", 0, 1)
if (length(unique(bin.gene)) > 1) {
perf.auc <- roc.area(bin.gene, m.score.norm.pass2)
# if( perf.auc$A < 0.5 ){
## browser()
# roc = signif(1 - perf.auc$A, digits=3)
# p.val = signif(1 - perf.auc$A, digits=3)
# abnormality = unique(cls.list2.pass2[i,])[which(unique(cls.list2.pass2[i,]) != "WT")]
# cls.list2.pass2 = ifelse( cls.list2.pass2[i,] == "WT", abnormality, "WT" )
# phen.names.pass2[i] = paste(phen.names.pass2[i], "-opposite")
# } else{
roc <- signif(perf.auc$A, digits=3)
p.val <- signif(perf.auc$p.value, digits=3)
roc.list.phen.pass2[i] = perf.auc$A
# }
} else {
roc <- "-"
p.val <- "-"
roc.list.phen.pass2[i] = NA
}
print(paste(format(phen.names.pass2[i], width=12), "ROC=", roc, " p-val=", p.val))
# p.val.list[i] = p.val
phen.descs.pass2[i] <- paste(roc, " (", p.val, ")")
}
# browser()
g.order.pass2 = c(1, order(roc.list.phen.pass2[2:n.phen.pass2], decreasing=TRUE, na.last=TRUE)+1) # skip PATHWAY.MUT
phen.descs2.pass2 = phen.descs.pass2[g.order.pass2]
cls.list2.pass2 = cls.list2.pass2[g.order.pass2,]
phen.names.pass2 = phen.names.pass2[g.order.pass2]
# browser()
# Recompute cls.list2 as some mutations or copy numbers may have been removed
# Recompute cls.phen and cls.labels2 as order may have changed
cls.phen2.pass2 <- NULL
if (is.vector(cls.labels)) {
classes <- unique(as.vector(cls.list2.pass2))
cls.phen2.pass2 <- classes
cls.labels2.pass2 <- match(cls.list2.pass2, cls.phen2.pass2)
} else {
for (kk in 1:length(cls.list2.pass2[, 1])) {
classes <- unique(cls.list2.pass2[kk,])
# cls.phen2[[kk]] <- classes
cls.phen2.pass2 <- c(cls.phen2.pass2, classes)
cls.labels2.pass2[kk,] <- match(cls.list2.pass2[kk,], classes)
}
}
cls.labels2.pass2 = cls.labels2.pass2[1:n.phen.pass2,]
# browser()
# correl <- cor(t(m2))[, loc]
# m.order <- order(correl, decreasing=decreasing.order)
# correl2 <- correl[m.order]
# model.descs2 <- paste(model.descs[m.order], signif(correl2, digits=3))
phen.list.pass2 <- unlist(cls.phen2.pass2)
# colors.list <- ifelse(unlist(cls.phen2) == target.class,
# ifelse(unlist(cls.phen2) == "DEL" | unlist(cls.phen2) == "AMP",
# ifelse(unlist(cls.phen2) == "DEL", cls.phen.colors[3], cls.phen.colors[4]), cls.phen.colors[1]), cls.phen.colors[2])
colors.list.pass2 = rep( "gray", length(phen.list.pass2))
colors.list.pass2[phen.list.pass2=="MUT"] = cls.phen.colors[1]
colors.list.pass2[phen.list.pass2=="DEL"] = cls.phen.colors[3]
colors.list.pass2[phen.list.pass2=="AMP"] = cls.phen.colors[4]
colors.list.pass2[phen.list.pass2=="ALT"] = cls.phen.colors[5]
MSIG.HeatMapPlot.9(V = m2.pass2, row.names = model.names.pass2,
row.names2 = model.descs2.pass2,
col.labels = cls.labels2.pass2,
col.classes = cls.phen2.pass2,
phen.cmap = colors.list.pass2, phen.names = phen.names.pass2,
phen.names2 = phen.descs2.pass2,
col.names = sample.names2.pass2, main = paste(tissue, "- Phase 2: only ROC >=", roc.threshold,"from 1st pass (ROC)"),
xlab=" ", ylab=" ", row.norm = row.norm,
cmap.type = cmap.type, char.rescale = char.rescale, legend=F)
### 3rd Pass ###
print( "--- Begin Pass 3 ---")
# browser()
m2.pass3 = m2.pass2
model.names.pass3 = rownames(m2.pass3)
sample.names2.pass3 = colnames(m2.pass3)
# model.descs2.pass3 = model.descs2.pass2
n.phen.pass3 = 40
# phen.descs.pass2 = phen.descs2.pass1[1:n.phen.pass2]
cls.list2.pass3 = cls.list[, s.order.pass1][, s.order.pass2]
cls.labels2.pass3 = cls.labels[, s.order.pass1][, s.order.pass2]
# browser()
phen.names.pass3 = phen.names
m.score.pass3 <- m2.pass3[1,]
m.score.norm.pass3 <- (m.score.pass3 - min(m.score.pass3))/(max(m.score.pass3) - min(m.score.pass3))
roc.list.phen.pass3 = vector(mode="numeric", length=n.phen)
phen.descs.pass3 = vector(mode="character", length=n.phen)
p.val.list.phen.pass3 = vector(mode="numeric", length=n.phen)
for( i in 1:n.phen ){
bin.gene = ifelse( cls.list2.pass3[i,]=="WT", 0, 1)
if (length(unique(bin.gene)) > 1) {
perf.auc <- roc.area(bin.gene, m.score.norm.pass3)
# if( perf.auc$A < 0.5 ){
## browser()
# roc = signif(1 - perf.auc$A, digits=3)
# p.val = signif(1- perf.auc$p.value, digits=3)
# abnormality = unique(cls.list2.pass3[i,])[which(unique(cls.list2.pass3[i,]) != "WT")]
# cls.list2.pass3[i,] = ifelse( cls.list2.pass3[i,] == "WT", abnormality, "WT" )
# phen.names.pass3[i] = paste(phen.names.pass3[i], "-opposite", sep="")
# roc.list.phen.pass3[i] = 1-perf.auc$A
# p.val.list.phen.pass3[i] = 1- perf.auc$p.value
# } else{
roc <- signif(perf.auc$A, digits=3)
p.val <- signif(perf.auc$p.value, digits=3)
roc.list.phen.pass3[i] = perf.auc$A
p.val.list.phen.pass3[i] = perf.auc$p.value
# }
} else {
roc <- NA
p.val <- NA
roc.list.phen.pass3[i] = NA
p.val.list.phen.pass3[i] = NA
}
# print(paste("ROC=", roc, " p-val=", p.val))
# p.val.list[i] = p.val
phen.descs.pass3[i] <- paste(roc, " (", p.val, ")")
}
# browser()
# p.val.threshold = 0.1
roc.threshold = 0.65
len = length(which(roc.list.phen.pass3[-1:-2] >= roc.threshold)) + 2
# len = length(which(p.val.list.phen.pass3[-1:-2] <= p.val.threshold))+2
# if( len == 2 ){
# p.val.threshold = 0.15
# len = length(which(p.val.list.phen.pass3[-1:-2] <= p.val.threshold))+2
# }
# if( len == 2 ){
# p.val.threshold = 0.2
# len = length(which(p.val.list.phen.pass3[-1:-2] <= p.val.threshold))+2
# }
if( len > 40 ) len = 40
# g.order.pass3.1 = c(1, 2, order(p.val.list.phen.pass3[3:n.phen], decreasing=FALSE, na.last=TRUE)+2 )
g.order.pass3 = c(1, 2, order(p.val.list.phen.pass3[-1:-2], decreasing=FALSE, na.last=TRUE)+2 )[1:len] # skip PATHWAY.MUT and COPY.NUMBER
phen.descs2.pass3 = phen.descs.pass3[g.order.pass3]
cls.list2.pass3 = cls.list2.pass3[g.order.pass3,]
cls.labels2.pass3 = cls.labels2.pass3[g.order.pass3,]
phen.names.pass3 = phen.names.pass3[g.order.pass3]
cls.list.mut = ifelse(cls.list2.pass3[-1:-2,] == "MUT", 1, 0)
cls.list.alt = ifelse(cls.list2.pass3[-1:-2,] == "DEL" | cls.list2.pass3[-1:-2,] == "AMP", 1, 0)
# browser()
if( !is.vector(cls.list.alt) ){
cls.list.mut.sum = apply(cls.list.mut, MARGIN=2, FUN=sum)
cls.list.alt.sum = apply(cls.list.alt, MARGIN=2, FUN=sum)
cls.list.mut.sum = ifelse(cls.list.mut.sum + cls.list.alt.sum > 0, 1, 0)
cls.list2.pass3[1,] = ifelse( cls.list.mut.sum >= 1, "MUT", "WT")
cls.list2.pass3[2,] = ifelse( cls.list.alt.sum >= 1, "ALT", "WT")
bin.class.pass3 = cls.list.mut.sum
} else{
cls.list2.pass3[1,] = ifelse(cls.list.mut == 1, "MUT", "WT")
cls.list2.pass3[2,] = ifelse(cls.list.alt == 1, "ALT", "WT")
bin.class.pass3 = cls.list.mut
}
for( i in 1:2 ){ # Recalculate ROC and p-value for PATHWAY.MUT and COPY.NUMBER
bin.gene = ifelse( cls.list2.pass3[i,]=="WT", 0, 1)
if (length(unique(bin.gene)) > 1) {
perf.auc <- roc.area(bin.gene, m.score.norm.pass3)
# if( perf.auc$A < 0.5 ){
## browser()
# roc = signif(1 - perf.auc$A, digits=3)
# p.val = signif(1- perf.auc$p.value, digits=3)
# abnormality = unique(cls.list2.pass3[i,])[which(unique(cls.list2.pass3[i,]) != "WT")]
# cls.list2.pass3[i,] = ifelse( cls.list2.pass3[i,] == "WT", abnormality, "WT" )
# phen.names.pass3[i] = paste(phen.names.pass3[i], "-opposite", sep="")
# roc.list.phen.pass3[i] = 1-perf.auc$A
# p.val.list.phen.pass3[i] = 1- perf.auc$p.value
# } else{
roc <- signif(perf.auc$A, digits=3)
p.val <- signif(perf.auc$p.value, digits=3)
roc.list.phen.pass3[i] = perf.auc$A
p.val.list.phen.pass3[i] = perf.auc$p.value
# }
} else {
roc <- NA
p.val <- NA
roc.list.phen.pass3[i] = NA
p.val.list.phen.pass3[i] = NA
}
print(paste(format(phen.names.pass3[i], width=12), "ROC=", roc, " p-val=", p.val))
# p.val.list[i] = p.val
phen.descs2.pass3[i] <- paste(roc, " (", p.val, ")")
}
# browser()
model.descs2.pass3 = vector(length = n.models, mode="character")
roc.list.pass3 = vector(length=n.models, mode="double")
p.val.list.pass3 = vector(length=n.models, mode="double")
for (i in 1:n.models) {
m.score <- m2.pass3[i,]
m.score.norm <- (m.score - min(m.score))/(max(m.score) - min(m.score))
# browser()
if (length(unique(bin.class.pass3)) > 1) {
perf.auc <- roc.area(bin.class.pass3, m.score.norm)
roc <- signif(perf.auc$A, digits=3)
p.val <- signif(perf.auc$p.value, digits=3)
roc.list.pass3[i] = perf.auc$A
p.val.list.pass3[i] = perf.auc$p.value
} else {
roc <- p.val <- "-"
roc.list.pass3[i] = NA
p.val.list.pass3[i] = NA
}
print(paste(format(rownames(m2.pass3)[i], width=30), "ROC=", roc, " p-val=", p.val))
model.descs2.pass3[i] <- paste(roc, " (", p.val, ")")
}
m.order.pass3 = order(roc.list.pass3, decreasing=TRUE)
m2.pass3 = m2.pass3[m.order.pass3,]
model.descs2.pass3 = model.descs2.pass3[m.order.pass3]
s.order.pass3 = order(m2.pass3[1,], decreasing=TRUE)
m2.pass3 = m2.pass3[,s.order.pass3]
sample.names2.pass3 = colnames(m2.pass3)
model.names.pass3 = rownames(m2.pass3)
cls.phen2.pass3 <- NULL
if (is.vector(cls.labels)) {
classes <- unique(as.vector(cls.list2.pass3))
cls.phen2.pass3 <- classes
cls.labels2.pass3 <- match(cls.list2.pass3, cls.phen2.pass3)
} else {
# browser()
for (kk in 1:length(cls.list2.pass3[, 1])) {
# browser()
classes <- unique(cls.list2.pass3[kk,])
# cls.phen2[[kk]] <- classes
cls.phen2.pass3 <- c(cls.phen2.pass3, classes)
cls.labels2.pass3[kk,] <- match(cls.list2.pass3[kk,], classes)
}
}
# cls.labels2.pass3 = cls.labels2.pass3[1:n.phen.pass3,]
# browser()
# correl <- cor(t(m2))[, loc]
# m.order <- order(correl, decreasing=decreasing.order)
# correl2 <- correl[m.order]
# model.descs2 <- paste(model.descs[m.order], signif(correl2, digits=3))
phen.list.pass3 <- unlist(cls.phen2.pass3)
# colors.list <- ifelse(unlist(cls.phen2) == target.class,
# ifelse(unlist(cls.phen2) == "DEL" | unlist(cls.phen2) == "AMP",
# ifelse(unlist(cls.phen2) == "DEL", cls.phen.colors[3], cls.phen.colors[4]), cls.phen.colors[1]), cls.phen.colors[2])
colors.list.pass3 = rep( "gray", length(phen.list.pass3))
colors.list.pass3[phen.list.pass3=="MUT"] = cls.phen.colors[1]
colors.list.pass3[phen.list.pass3=="DEL"] = cls.phen.colors[3]
colors.list.pass3[phen.list.pass3=="AMP"] = cls.phen.colors[4]
colors.list.pass3[phen.list.pass3=="ALT"] = cls.phen.colors[5]
phen.names.pass3[1] = "PATHWAY.MUT+COPY.NUMBER"
# browser()
MSIG.HeatMapPlot.9(V = m2.pass3, row.names = model.names.pass3,
row.names2 = model.descs2.pass3,
col.labels = cls.labels2.pass3,
col.classes = cls.phen2.pass3,
phen.cmap = colors.list.pass3, phen.names = phen.names.pass3,
phen.names2 = phen.descs2.pass3,
col.names = sample.names2.pass3, main = paste(tissue, "- 3rd Pass: Top signature from 2nd pass with all genes ( ROC >=", roc.threshold, ") (ROC)"),
xlab=" ", ylab=" ", row.norm = row.norm,
cmap.type = cmap.type, char.rescale = char.rescale, legend=F)
dev.off()
if (!is.na(output.dataset)) {
V.GCT <- m2
colnames(V.GCT) <- sample.names2
row.names(V.GCT) <- model.names2
write.gct(gct.data.frame = V.GCT, descs = model.descs2, filename =output.dataset)
}
}
OPAM.sort.projection.by.score.5 <- function(
input.ds,
input.cls,
tissue = "NA",
results.dir,
normalize.score = T,
normalization.type = "zero.one",
model = "NA",
target.phen = NA,
target.class = NA,
user.colors = NA,
decreasing.order = T,
output.dataset = NA,
char.rescale = 1,
cmap.type = 3,
row.norm = T,
u.gene.names.known = "NA"
)
# Calls MSIG.HeatMapPlot.9 and makes a plot sorted by the highest-scoring
# signatures and abnormalities (gene mutations or copy number alterations)
# i.e. doesn't require a "model" to score by as OPAM.sort.projection.by.score.2 does.
# However, it *will* use "model" if it cannot calculate p-values on the gene signatures, which
# happens when every cell line has a genomic aberration.
#
# Runs 3 passes on the data:
# 1st pass: looks at the genes and copy number alterations specified by u.gene.names.known
# 2nd pass: looks at only the top abnormalities (using a p-value cutoff) from the 1st pass, and adjusts
# the PATHWAY.MUT vector accordingly (only according to the genes, not by copy number data)
# 3rd pass: Takes the winning signature from the 2nd pass and then looks all the genes available
#
# Very similar to OPAM.sort.projection.by.score.4, however this version uses rec.area instead of
# roc.area to calculate REC/ROC scores and p-values for PATHWAY.MUT, the vector of total genomic aberrations
# in all samples
{
library(gtools)
library(verification)
library(ROCR)
library(MASS)
library(RColorBrewer)
library(heatmap.plus)
dataset <- MSIG.Gct2Frame(filename = input.ds) # Read gene expression dataset (GCT format)
m <- data.matrix(dataset$ds)
model.names <- dataset$row.names
# model.descs <- dataset$descs
Ns <- length(m[1,])
dim(m)
sample.names <- dataset$names
n.models <- length(m[,1])
temp <- strsplit(input.ds, split="/") # Extract test file name
s <- length(temp[[1]])
test.file.name <- temp[[1]][s]
temp <- strsplit(test.file.name, split=".gct")
test.file.prefix <- temp[[1]][1]
char.res <- 0.013 * n.models + 0.65
# normalize scores
if (normalize.score == T) {
if (normalization.type == "zero.one") {
for (i in 1:n.models) {
m[i,] <- (m[i,] - min(m[i,]))/(max(m[i,]) - min(m[i,]))
}
} else if (normalization.type == "z.score") {
for (i in 1:n.models) {
m[i,] <- (m[i,] - mean(m[i,]))/sd(m[i,])
}
} else if (normalization.type == "r.z.score") {
for (i in 1:n.models) {
m[i,] <- (m[i,] - median(m[i,]))/mad(m[i,])
}
}
}
CLS <- MSIG.ReadPhenFile.2(file = input.cls) # Read phenotype file (CLS format)
cls.labels <- CLS$class.v
cls.phen <- CLS$phen
cls.list <- CLS$class.list
if (is.vector(cls.labels)) {
n.phen <- 1
} else {
n.phen <- length(cls.labels[,1])
}
if (!is.na(user.colors[1])) {
c.test <- user.colors
} else {
if (!is.null(CLS$col.phen)) {
c.test <- CLS$col.phen
} else {
c.test <- c(brewer.pal(n=7, name="Set2"), brewer.pal(n=7, name="Dark2"), brewer.pal(n=7, name="Set1"),
brewer.pal(n=8, name="Accent"), brewer.pal(n=9, name="Spectral"), brewer.pal(n=8, name="Set3"),
brewer.pal(n=8, name="BuGn"),
brewer.pal(n=7, name="Set2"), brewer.pal(n=7, name="Dark2"), brewer.pal(n=7, name="Set1"),
brewer.pal(n=8, name="Accent"), brewer.pal(n=10, name="Spectral"), brewer.pal(n=8, name="Set3"),
brewer.pal(n=8, name="BuGn"),
brewer.pal(n=7, name="Set2"), brewer.pal(n=7, name="Dark2"), brewer.pal(n=7, name="Set1"),
brewer.pal(n=8, name="Accent"), brewer.pal(n=10, name="Spectral"), brewer.pal(n=8, name="Set3"),
brewer.pal(n=8, name="BuGn"))
}
}
if (!is.null(CLS$phen.names)) {
phen.names <- CLS$phen.names
} else {
phen.names <- "NA"
}
cls.phen.index <- unlist(cls.phen)
cls.phen.colors <- c.test[1:length(cls.phen.index)]
# print("cls.phen.colors:")
# print(cls.phen.colors)
n.classes <- vector(length=n.phen, mode="numeric")
if (n.phen == 1) {
max.classes <- length(cls.phen)
n.classes[1] <- max.classes
} else {
max.classes <- max(unlist(lapply(cls.phen, FUN=length)))
for (i in 1:n.phen) {
n.classes[i] <- length(cls.phen[[i]])
}
}
# model.names.original = model.names
# m.original = m
phen.pass1 = c( "PATHWAY.MUT", u.gene.names.known)
n.phen.pass1 = length(u.gene.names.known)+1
ind.phen.pass1 = which( phen.names %in% phen.pass1 )
phen.pass1 = phen.names[ind.phen.pass1]
roc.list.pass1 = vector( length=n.models, mode="numeric" )
p.val.list.pass1 = vector( length=n.models, mode="numeric" )
cls.list.pass1 = cls.list[ind.phen.pass1,]
cls.list.pass1.2 = ifelse(cls.list.pass1 == "WT", 0, 1)
cls.labels.pass1 = cls.labels[ind.phen.pass1,]
# browser()
if (!is.na(target.phen)) {
bin.class.pass1 = apply( cls.list.pass1.2[-1,], MARGIN=2, FUN=sum)
# Normalize bin.class.pass1
if( length(unique(bin.class.pass1)) > 1){
bin.class.pass1 = ( bin.class.pass1 - min(bin.class.pass1))/(max(bin.class.pass1) - min(bin.class.pass1))
} else if ( length(unique(bin.class.pass1)) > 1){
bin.class = rep(1, length(cls.list[1,]))
}
# bin.class.pass1 <- ifelse(cls.list[target.phen,] == target.class, 1, 0)
# bin.class.pass1 <- ifelse(apply( cls.list.pass1.2[2:n.phen.pass1,], MARGIN=2, FUN=sum) > 0, 1, 0)
# cls.labels.pass1[1,] = bin.class.pass1
cls.list.pass1[1,] = ifelse(bin.class.pass1 > 0, "MUT", "WT")
# if( length(unique(bin.class.pass1)) == 1) {
# cls.list.3 = ifelse( cls.list == "DEL" | cls.list == "AMP", 1, 0)
# copy.number.pass1 = ifelse( apply(cls.list.3[3:n.phen,], MARGIN=2, FUN=sum) >= 1, "ALT", "WT")
# copy.class.pass1 = ifelse( copy.number.pass1 == "ALT", 1, 0)
# bin.class.pass1 = copy.class.pass1
# print( "Calculating p-value with respect to copy number alterations")
# }
} else {
bin.class.pass1 <- ifelse(cls.list[1,] == cls.list2[1,1], 1, 0)
}
# browser()
model.descs2.pass1 = vector(length = n.models, mode="character")
for (i in 1:n.models) {
m.score <- m[i,]
m.score.norm <- (m.score - min(m.score))/(max(m.score) - min(m.score))
# browser()
if (length(unique(bin.class.pass1)) > 1) {
perf.auc <- rec.area(bin.class.pass1, m.score.norm)
roc <- signif(perf.auc$A, digits=3)
p.val <- signif(perf.auc$p.value, digits=3)
roc.list.pass1[i] = perf.auc$A
p.val.list.pass1[i] = perf.auc$p.value
} else {
roc <- p.val <- "-"
roc.list.pass1[i] = NA
p.val.list.pass1[i] = NA
}
# print(paste("REC=", roc, " p-val=", p.val))
model.descs2.pass1[i] <- paste(roc, " (", p.val, ")")
}
# browser()
if( is.na(roc.list.pass1[1]) ){
loc <- match(model, model.names)
s.order.pass1 <- order(m[loc,], decreasing = decreasing.order)
# loc = s.order.pass1[1]
m2.pass1 <- m[, s.order.pass1]
correl <- cor(t(m2.pass1))[, loc]
m.order.pass1 <- order(correl, decreasing=T)
m2.pass1 <- m2.pass1[m.order.pass1, ]
} else{
m.order.pass1 = order(roc.list.pass1, decreasing=TRUE, na.last=TRUE)
# m.order.pass1 = order(p.val.list.pass1, decreasing=FALSE, na.last=TRUE)
m2.pass1 <- m[m.order.pass1, ]
s.order.pass1 <- order(m2.pass1[1,], decreasing = TRUE )
m2.pass1 <- m2.pass1[, s.order.pass1]
}
# m2.pass1 <- m2.pass1[m.order.pass1, ]
model.descs2.pass1 = model.descs2.pass1[m.order.pass1]
sample.names2.pass1 <- colnames(m2.pass1)
model.names.pass1 <- rownames(m2.pass1)
# browser()
if (is.vector(cls.labels)) {
cls.labels2.pass1 <- cls.labels.pass1[s.order.pass1]
cls.list2.pass1 <- cls.list.pass1[s.order.pass1]
} else {
cls.labels2.pass1 <- cls.labels.pass1[, s.order.pass1]
cls.list2.pass1 <- cls.list.pass1[, s.order.pass1]
}
#browser()
# browser()
m.score.pass1 <- m2.pass1[1,]
m.score.norm.pass1 <- (m.score.pass1 - min(m.score.pass1))/(max(m.score.pass1) - min(m.score.pass1))
roc.list.phen.pass1 = vector(mode="numeric", length=n.phen)
p.val.list.phen.pass1 = vector(mode="numeric", length=n.phen)
phen.descs.pass1 = vector(mode="character", length=n.phen)
for( i in 1:n.phen.pass1 ){
bin.gene = ifelse( cls.list2.pass1[i,]=="WT", 0, 1)
if (length(unique(bin.gene)) > 1) {
perf.auc <- roc.area(bin.gene, m.score.norm.pass1)
# if( perf.auc$A < 0.5 ){
## browser()
# roc = signif(1 - perf.auc$A, digits=3)
# p.val = signif(1- perf.auc$p.val, digits=3)
# abnormality = unique(cls.list2.pass1[i,])[which(unique(cls.list2.pass1[i,]) != "WT")]
## cls.list2.pass1[i,] = ifelse( cls.list2.pass1[i,] == "WT", abnormality, "WT" )
# phen.names[i] = paste(phen.names[i], "-opposite", sep="")
# roc.list.phen.pass1[i] = 1 - perf.auc$A
# p.val.list.phen.pass1[i] = perf.auc$p.val # Don't want to use these "opposite" genomic aberrations in Pass 2
# # because they make PATHWAY.MUT+COPY.NUMBER too dense
# } else{
roc <- signif(perf.auc$A, digits=3)
p.val <- signif(perf.auc$p.value, digits=3)
roc.list.phen.pass1[i] = perf.auc$A
p.val.list.phen.pass1[i] = perf.auc$p.val
# }
} else {
roc <- "-"
p.val <- "-"
roc.list.phen.pass1[i] = NA
p.val.list.phen.pass1[i] = NA
}
# print(paste("ROC=", roc, " p-val=", p.val))
# p.val.list[i] = p.val
phen.descs.pass1[i] <- paste(roc, " (", p.val, ")")
}
# browser()
g.order.pass1 = c(1, order(roc.list.phen.pass1[2:n.phen.pass1], decreasing=TRUE, na.last=TRUE)+1) # keep PATHWAY.MUT and COPY.NUMBER as 1 and 2
roc.list.phen.pass1 = roc.list.phen.pass1[g.order.pass1]
p.val.list.phen.pass1 = p.val.list.phen.pass1[g.order.pass1]
phen.descs2.pass1 = phen.descs.pass1[g.order.pass1][1:n.phen.pass1]
cls.list2.pass1 = cls.list2.pass1[g.order.pass1,][1:n.phen.pass1,]
phen.names.pass1 = phen.pass1[g.order.pass1][1:n.phen.pass1]
# Recompute cls.list2 as some mutations or copy numbers may have been removed
# Recompute cls.phen and cls.labels2 as order may have changed
cls.phen2.pass1 <- NULL
if (is.vector(cls.labels)) {
classes <- unique(cls.list2.pass1)
cls.phen2.pass1 <- classes
cls.labels2.pass1 <- match(cls.list2.pass1, cls.phen2.pass1)
} else {
for (kk in 1:length(cls.list2.pass1[, 1])) {
classes <- unique(cls.list2.pass1[kk,])
# cls.phen2[[kk]] <- classes
cls.phen2.pass1 <- c(cls.phen2.pass1, classes)
cls.labels2.pass1[kk,] <- match(cls.list2.pass1[kk,], classes)
}
}
cls.labels2.pass1 = cls.labels2.pass1[1:n.phen.pass1,]
# browser()
# correl <- cor(t(m2))[, loc]
# m.order <- order(correl, decreasing=decreasing.order)
# correl2 <- correl[m.order]
# model.descs2 <- paste(model.descs[m.order], signif(correl2, digits=3))
phen.list.pass1 <- unlist(cls.phen2.pass1)
# colors.list <- ifelse(unlist(cls.phen2) == target.class,
# ifelse(unlist(cls.phen2) == "DEL" | unlist(cls.phen2) == "AMP",
# ifelse(unlist(cls.phen2) == "DEL", cls.phen.colors[3], cls.phen.colors[4]), cls.phen.colors[1]), cls.phen.colors[2])
colors.list.pass1 = rep( "gray", length(phen.list.pass1))
colors.list.pass1[phen.list.pass1=="MUT"] = cls.phen.colors[1]
colors.list.pass1[phen.list.pass1=="DEL"] = cls.phen.colors[3]
colors.list.pass1[phen.list.pass1=="AMP"] = cls.phen.colors[4]
colors.list.pass1[phen.list.pass1=="ALT"] = cls.phen.colors[5]
# print("cls.phen2:")
# print(unlist(cls.phen2))
#
# print("cls.phen:")
# print(unlist(cls.phen))
#
# print("colors.list:")
# print(colors.list)
# browser()
filename <- paste(results.dir, test.file.prefix, ".3Passes.REC_ks", sep="")
# pdf(file=paste(filename, ".pdf", sep=""), height = 11, width = 9)
pdf(file=paste(filename, ".pdf", sep=""), height = 11, width = 17 )
# windows(width=12, height=8)
MSIG.HeatMapPlot.9(V = m2.pass1, row.names = model.names.pass1,
row.names2 = model.descs2.pass1,
col.labels = cls.labels2.pass1,
col.classes = cls.phen2.pass1,
phen.cmap = colors.list.pass1, phen.names = phen.names.pass1,
phen.names2 = phen.descs2.pass1,
col.names = sample.names2.pass1, main = paste(tissue, "- 1st Pass: Known KRAS Pathway Abnormalities (REC)"),
xlab=" ", ylab=" ", row.norm = row.norm,
cmap.type = cmap.type, char.rescale = char.rescale, legend=F)
### Begin Pass 2 ###
print( "--- Begin Pass 2 ---")
# browser()
p.val.threshold = 0.1
ind.top.pval = which(p.val.list.phen.pass1[2:n.phen.pass1] <= p.val.threshold )+1
if( length(ind.top.pval) > 0 ){
ind.p.val.threshold = c(1, ind.top.pval)
} else if( length(ind.top.pval) < 1 ) {
p.val.threshold = 0.15
ind.top.pval = which(p.val.list.phen.pass1[2:n.phen.pass1] <= p.val.threshold )+1
ind.p.val.threshold = c(1, ind.top.pval) }
if( length(ind.top.pval) < 1){
p.val.threshold = 0.2
ind.top.pval = which(p.val.list.phen.pass1[2:n.phen.pass1] <= p.val.threshold )+1
ind.p.val.threshold = c(1, ind.top.pval)
}
if( length(ind.top.pval) < 1){
p.val.threshold = 0.25
ind.top.pval = which(p.val.list.phen.pass1[2:n.phen.pass1] <= p.val.threshold )+1
ind.p.val.threshold = c(1, ind.top.pval)
}
if( length(ind.top.pval) < 1){
p.val.threshold = 0.3
ind.top.pval = which(p.val.list.phen.pass1[2:n.phen.pass1] <= p.val.threshold )+1
ind.p.val.threshold = c(1, ind.top.pval)
}
if( length( ind.top.pval) < 1 ) {
ind.top = which(!is.na(p.val.list.phen.pass1[-1]))+1
ind.p.val.threshold = c( 1, ind.top )
}
n.phen.pass2 = length(ind.p.val.threshold)
# phen.descs.pass2 = phen.descs2.pass1[1:n.phen.pass2]
cls.list2.pass2 = cls.list2.pass1[ind.p.val.threshold,]
phen.names.pass2 = phen.names.pass1[ind.p.val.threshold]
# phen.names.pass2[1] = "PATHWAY.MUT + COPY.NUMBER"
cls.labels2.pass2 = cls.labels2.pass1[ind.p.val.threshold,]
# browser()
cls.list2.pass2.2 = ifelse( cls.list2.pass2 == "WT", 0, 1)
cls.list2.pass2.3 = ifelse( cls.list2.pass2 == "DEL" | cls.list2.pass2 == "AMP", 1, 0)
if( n.phen.pass2 > 2 ){
pathway.mut.pass2 = apply(cls.list2.pass2.2[2:n.phen.pass2,], MARGIN=2, FUN=sum)
bin.class.pass2 = pathway.mut.pass2/length(pathway.mut.pass2)
bin.class.pass2 = ( bin.class.pass2 - min(bin.class.pass2))/(max(bin.class.pass2) - min(bin.class.pass2))
cls.list2.pass2[1,] = ifelse( bin.class.pass2 > 0, "MUT", "WT")
# copy.number.pass2 = ifelse( apply(cls.list2.pass2.3[3:n.phen.pass2,], MARGIN=2, FUN=sum) >= 1, "ALT", "WT")
} else{
pathway.mut.pass2 = ifelse( cls.list2.pass2.2[2,] == 1, "MUT", "WT")
bin.class.pass2 = ifelse( pathway.mut.pass2 == "MUT", 1, 0 )
cls.list2.pass2[1,] = pathway.mut.pass2
# copy.number.pass2 = ifelse( cls.list2.pass2.3[3,] == 1, "ALT", "WT")
}
# browser()
# bin.class.pass2 =
# copy.class.pass2 = ifelse( copy.number.pass2 == "ALT", 1, 0)
# if( length(unique(bin.class.pass2)) == 1) {
# bin.class.pass2 = copy.class.pass2
# print( "Calculating p-value with respect to copy number alterations")
# }
# cls.list2.pass2[2,] = copy.number.pass2
roc.list.pass2 = vector( length=n.models, mode="numeric" )
p.val.list.pass2 = vector( length=n.models, mode="numeric" )
# browser()
model.descs2.pass2 = vector(length = n.models, mode="character")
for (i in 1:n.models) {
m.score <- m2.pass1[i,]
m.score.norm <- (m.score - min(m.score))/(max(m.score) - min(m.score))
# browser()
if (length(unique(bin.class.pass2)) > 1) {
perf.auc <- rec.area(bin.class.pass2, m.score.norm)
roc <- signif(perf.auc$A, digits=3)
p.val <- signif(perf.auc$p.value, digits=3)
roc.list.pass2[i] = perf.auc$A
p.val.list.pass2[i] = perf.auc$p.value
} else {
roc <- p.val <- "-"
roc.list.pass2[i] = NA
p.val.list.pass2[i] = NA
}
print(paste("REC=", roc, " p-val=", p.val))
model.descs2.pass2[i] <- paste(roc, " (", p.val, ")")
}
# browser()
m.order.pass2 = order(roc.list.pass2, decreasing=TRUE, na.last=TRUE)
# m.order.pass2 = order(p.val.list.pass2, decreasing=FALSE, na.last=TRUE)
model.descs2.pass2 = model.descs2.pass2[m.order.pass2]
# loc.pass2 = m.order.pass2[1]
m2.pass2 <- m2.pass1[m.order.pass2, ]
model.names.pass2 <- rownames(m2.pass2)
# print(c("loc.pass2:", loc.pass2))
s.order.pass2 <- order(m2.pass2[1,], decreasing = TRUE)
m2.pass2 <- m2.pass2[, s.order.pass2]
sample.names2.pass2 <- colnames(m2.pass2)
if (is.vector(cls.labels)) {
cls.labels2.pass2 <- cls.labels2.pass2[s.order.pass2]
cls.list2.pass2 <- cls.list2.pass2[s.order.pass2]
} else {
cls.labels2.pass2 <- cls.labels2.pass2[, s.order.pass2]
cls.list2.pass2 <- cls.list2.pass2[, s.order.pass2]
}
# browser()
# browser()
m.score.pass2 <- m2.pass2[1,]
m.score.norm.pass2 <- (m.score.pass2 - min(m.score.pass2))/(max(m.score.pass2) - min(m.score.pass2))
roc.list.phen.pass2 = vector(mode="numeric", length=n.phen.pass2)
phen.descs.pass2 = vector(mode="character", length=n.phen.pass2)
for( i in 1:n.phen.pass2 ){
bin.gene = ifelse( cls.list2.pass2[i,]=="WT", 0, 1)
if (length(unique(bin.gene)) > 1) {
perf.auc <- roc.area(bin.gene, m.score.norm.pass2)
# if( perf.auc$A < 0.5 ){
## browser()
# roc = signif(1 - perf.auc$A, digits=3)
# p.val = signif(1 - perf.auc$A, digits=3)
# abnormality = unique(cls.list2.pass2[i,])[which(unique(cls.list2.pass2[i,]) != "WT")]
# cls.list2.pass2 = ifelse( cls.list2.pass2[i,] == "WT", abnormality, "WT" )
# phen.names.pass2[i] = paste(phen.names.pass2[i], "-opposite")
# } else{
roc <- signif(perf.auc$A, digits=3)
p.val <- signif(perf.auc$p.value, digits=3)
roc.list.phen.pass2[i] = perf.auc$A
# }
} else {
roc <- "-"
p.val <- "-"
roc.list.phen.pass2[i] = NA
}
print(paste("ROC=", roc, " p-val=", p.val))
# p.val.list[i] = p.val
phen.descs.pass2[i] <- paste(roc, " (", p.val, ")")
}
# browser()
g.order.pass2 = c(1, order(roc.list.phen.pass2[2:n.phen.pass2], decreasing=TRUE, na.last=TRUE)+1) # skip PATHWAY.MUT
phen.descs2.pass2 = phen.descs.pass2[g.order.pass2]
cls.list2.pass2 = cls.list2.pass2[g.order.pass2,]
phen.names.pass2 = phen.names.pass2[g.order.pass2]
# browser()
# Recompute cls.list2 as some mutations or copy numbers may have been removed
# Recompute cls.phen and cls.labels2 as order may have changed
cls.phen2.pass2 <- NULL
if (is.vector(cls.labels)) {
classes <- unique(as.vector(cls.list2.pass2))
cls.phen2.pass2 <- classes
cls.labels2.pass2 <- match(cls.list2.pass2, cls.phen2.pass2)
} else {
for (kk in 1:length(cls.list2.pass2[, 1])) {
classes <- unique(cls.list2.pass2[kk,])
# cls.phen2[[kk]] <- classes
cls.phen2.pass2 <- c(cls.phen2.pass2, classes)
cls.labels2.pass2[kk,] <- match(cls.list2.pass2[kk,], classes)
}
}
cls.labels2.pass2 = cls.labels2.pass2[1:n.phen.pass2,]
# browser()
# correl <- cor(t(m2))[, loc]
# m.order <- order(correl, decreasing=decreasing.order)
# correl2 <- correl[m.order]
# model.descs2 <- paste(model.descs[m.order], signif(correl2, digits=3))
phen.list.pass2 <- unlist(cls.phen2.pass2)
# colors.list <- ifelse(unlist(cls.phen2) == target.class,
# ifelse(unlist(cls.phen2) == "DEL" | unlist(cls.phen2) == "AMP",
# ifelse(unlist(cls.phen2) == "DEL", cls.phen.colors[3], cls.phen.colors[4]), cls.phen.colors[1]), cls.phen.colors[2])
colors.list.pass2 = rep( "gray", length(phen.list.pass2))
colors.list.pass2[phen.list.pass2=="MUT"] = cls.phen.colors[1]
colors.list.pass2[phen.list.pass2=="DEL"] = cls.phen.colors[3]
colors.list.pass2[phen.list.pass2=="AMP"] = cls.phen.colors[4]
colors.list.pass2[phen.list.pass2=="ALT"] = cls.phen.colors[5]
MSIG.HeatMapPlot.9(V = m2.pass2, row.names = model.names.pass2,
row.names2 = model.descs2.pass2,
col.labels = cls.labels2.pass2,
col.classes = cls.phen2.pass2,
phen.cmap = colors.list.pass2, phen.names = phen.names.pass2,
phen.names2 = phen.descs2.pass2,
col.names = sample.names2.pass2, main = paste(tissue, "- 2nd Pass: only p-values <=", p.val.threshold,"from 1st pass (REC)"),
xlab=" ", ylab=" ", row.norm = row.norm,
cmap.type = cmap.type, char.rescale = char.rescale, legend=F)
### 3rd Pass ###
print( "--- Begin Pass 3 ---")
# browser()
m2.pass3 = m2.pass2
model.names.pass3 = rownames(m2.pass3)
sample.names2.pass3 = colnames(m2.pass3)
# model.descs2.pass3 = model.descs2.pass2
n.phen.pass3 = 40
# phen.descs.pass2 = phen.descs2.pass1[1:n.phen.pass2]
cls.list2.pass3 = cls.list[, s.order.pass1][, s.order.pass2]
cls.labels2.pass3 = cls.labels[, s.order.pass1][, s.order.pass2]
# browser()
phen.names.pass3 = phen.names
m.score.pass3 <- m2.pass3[1,]
m.score.norm.pass3 <- (m.score.pass3 - min(m.score.pass3))/(max(m.score.pass3) - min(m.score.pass3))
roc.list.phen.pass3 = vector(mode="numeric", length=n.phen)
phen.descs.pass3 = vector(mode="character", length=n.phen)
p.val.list.phen.pass3 = vector(mode="numeric", length=n.phen)
for( i in 1:n.phen ){
bin.gene = ifelse( cls.list2.pass3[i,]=="WT", 0, 1)
if (length(unique(bin.gene)) > 1) {
perf.auc <- roc.area(bin.gene, m.score.norm.pass3)
# if( perf.auc$A < 0.5 ){
## browser()
# roc = signif(1 - perf.auc$A, digits=3)
# p.val = signif(1- perf.auc$p.value, digits=3)
# abnormality = unique(cls.list2.pass3[i,])[which(unique(cls.list2.pass3[i,]) != "WT")]
# cls.list2.pass3[i,] = ifelse( cls.list2.pass3[i,] == "WT", abnormality, "WT" )
# phen.names.pass3[i] = paste(phen.names.pass3[i], "-opposite", sep="")
# roc.list.phen.pass3[i] = 1-perf.auc$A
# p.val.list.phen.pass3[i] = 1- perf.auc$p.value
# } else{
roc <- signif(perf.auc$A, digits=3)
p.val <- signif(perf.auc$p.value, digits=3)
roc.list.phen.pass3[i] = perf.auc$A
p.val.list.phen.pass3[i] = perf.auc$p.value
# }
} else {
roc <- NA
p.val <- NA
roc.list.phen.pass3[i] = NA
p.val.list.phen.pass3[i] = NA
}
# print(paste("ROC=", roc, " p-val=", p.val))
# p.val.list[i] = p.val
phen.descs.pass3[i] <- paste(roc, " (", p.val, ")")
}
# browser()
p.val.threshold = 0.1
len = length(which(p.val.list.phen.pass3[-1:-2] <= p.val.threshold))+2
if( len == 2 ){
p.val.threshold = 0.15
len = length(which(p.val.list.phen.pass3[-1:-2] <= p.val.threshold))+2
}
if( len == 2 ){
p.val.threshold = 0.2
len = length(which(p.val.list.phen.pass3[-1:-2] <= p.val.threshold))+2
}
if( len>40 ) len = 40
# g.order.pass3.1 = c(1, 2, order(p.val.list.phen.pass3[3:n.phen], decreasing=FALSE, na.last=TRUE)+2 )
g.order.pass3 = c(1, 2, order(p.val.list.phen.pass3[-1:-2], decreasing=FALSE, na.last=TRUE)+2 )[1:len] # skip PATHWAY.MUT and COPY.NUMBER
phen.descs2.pass3 = phen.descs.pass3[g.order.pass3]
cls.list2.pass3 = cls.list2.pass3[g.order.pass3,]
cls.labels2.pass3 = cls.labels2.pass3[g.order.pass3,]
phen.names.pass3 = phen.names.pass3[g.order.pass3]
cls.list.mut = ifelse(cls.list2.pass3[-1:-2,] == "MUT", 1, 0)
cls.list.alt = ifelse(cls.list2.pass3[-1:-2,] == "DEL" | cls.list2.pass3[-1:-2,] == "AMP", 1, 0)
# browser()
if( !is.vector(cls.list.alt) ){
cls.list.mut.sum = apply(cls.list.mut, MARGIN=2, FUN=sum)
cls.list.alt.sum = apply(cls.list.alt, MARGIN=2, FUN=sum)
bin.class.pass3 = cls.list.mut.sum + cls.list.alt.sum
bin.class.pass3 = ( bin.class.pass3 - min(bin.class.pass3))/(max(bin.class.pass3) - min(bin.class.pass3))
cls.list.mut.sum = ifelse(cls.list.mut.sum + cls.list.alt.sum > 0, 1, 0)
cls.list2.pass3[1,] = ifelse( cls.list.mut.sum >= 1, "MUT", "WT")
cls.list2.pass3[2,] = ifelse( cls.list.alt.sum >= 1, "ALT", "WT")
} else{
cls.list2.pass3[2,] = ifelse(cls.list.alt == 1, "ALT", "WT")
bin.class.pass3 = cls.list.mut+cls.list.alt
bin.class.pass3 = ( bin.class.pass3 - min(bin.class.pass3))/(max(bin.class.pass3) - min(bin.class.pass3))
cls.list2.pass3[1,] = ifelse(bin.class.pass3 > 0 , "MUT", "WT")
}
# browser()
for( i in 1:2 ){ # Recalculate ROC and p-value for PATHWAY.MUT and COPY.NUMBER
bin.gene = ifelse( cls.list2.pass3[i,]=="WT", 0, 1)
if (length(unique(bin.gene)) > 1) {
perf.auc <- rec.area(bin.gene, m.score.norm.pass3)
# if( perf.auc$A < 0.5 ){
## browser()
# roc = signif(1 - perf.auc$A, digits=3)
# p.val = signif(1- perf.auc$p.value, digits=3)
# abnormality = unique(cls.list2.pass3[i,])[which(unique(cls.list2.pass3[i,]) != "WT")]
# cls.list2.pass3[i,] = ifelse( cls.list2.pass3[i,] == "WT", abnormality, "WT" )
# phen.names.pass3[i] = paste(phen.names.pass3[i], "-opposite", sep="")
# roc.list.phen.pass3[i] = 1-perf.auc$A
# p.val.list.phen.pass3[i] = 1- perf.auc$p.value
# } else{
roc <- signif(perf.auc$A, digits=3)
p.val <- signif(perf.auc$p.value, digits=3)
roc.list.phen.pass3[i] = perf.auc$A
p.val.list.phen.pass3[i] = perf.auc$p.value
# }
} else {
roc <- NA
p.val <- NA
roc.list.phen.pass3[i] = NA
p.val.list.phen.pass3[i] = NA
}
print(paste("ROC=", roc, " p-val=", p.val))
# p.val.list[i] = p.val
phen.descs2.pass3[i] <- paste(roc, " (", p.val, ")")
}
# browser()
model.descs2.pass3 = vector(length = n.models, mode="character")
for (i in 1:n.models) {
m.score <- m2.pass3[i,]
m.score.norm <- (m.score - min(m.score))/(max(m.score) - min(m.score))
# browser()
if (length(unique(bin.class.pass3)) > 1) {
perf.auc <- rec.area(bin.class.pass3, m.score.norm)
roc <- signif(perf.auc$A, digits=3)
p.val <- signif(perf.auc$p.value, digits=3)
roc.list.pass2[i] = perf.auc$A
p.val.list.pass2[i] = perf.auc$p.value
} else {
roc <- p.val <- "-"
roc.list.pass2[i] = NA
p.val.list.pass2[i] = NA
}
print(paste("REC=", roc, " p-val=", p.val))
model.descs2.pass3[i] <- paste(roc, " (", p.val, ")")
}
cls.phen2.pass3 <- NULL
if (is.vector(cls.labels)) {
classes <- unique(as.vector(cls.list2.pass3))
cls.phen2.pass3 <- classes
cls.labels2.pass3 <- match(cls.list2.pass3, cls.phen2.pass3)
} else {
# browser()
for (kk in 1:length(cls.list2.pass3[, 1])) {
# browser()
classes <- unique(cls.list2.pass3[kk,])
# cls.phen2[[kk]] <- classes
cls.phen2.pass3 <- c(cls.phen2.pass3, classes)
cls.labels2.pass3[kk,] <- match(cls.list2.pass3[kk,], classes)
}
}
# cls.labels2.pass3 = cls.labels2.pass3[1:n.phen.pass3,]
# browser()
# correl <- cor(t(m2))[, loc]
# m.order <- order(correl, decreasing=decreasing.order)
# correl2 <- correl[m.order]
# model.descs2 <- paste(model.descs[m.order], signif(correl2, digits=3))
phen.list.pass3 <- unlist(cls.phen2.pass3)
# colors.list <- ifelse(unlist(cls.phen2) == target.class,
# ifelse(unlist(cls.phen2) == "DEL" | unlist(cls.phen2) == "AMP",
# ifelse(unlist(cls.phen2) == "DEL", cls.phen.colors[3], cls.phen.colors[4]), cls.phen.colors[1]), cls.phen.colors[2])
colors.list.pass3 = rep( "gray", length(phen.list.pass3))
colors.list.pass3[phen.list.pass3=="MUT"] = cls.phen.colors[1]
colors.list.pass3[phen.list.pass3=="DEL"] = cls.phen.colors[3]
colors.list.pass3[phen.list.pass3=="AMP"] = cls.phen.colors[4]
colors.list.pass3[phen.list.pass3=="ALT"] = cls.phen.colors[5]
phen.names.pass3[1] = "PATHWAY.MUT+COPY.NUMBER"
# browser()
MSIG.HeatMapPlot.9(V = m2.pass3, row.names = model.names.pass3,
row.names2 = model.descs2.pass3,
col.labels = cls.labels2.pass3,
col.classes = cls.phen2.pass3,
phen.cmap = colors.list.pass3, phen.names = phen.names.pass3,
phen.names2 = phen.descs2.pass3,
col.names = sample.names2.pass3, main = paste(tissue, "- 3rd Pass: Top signature from 2nd pass with all genes ( p-value <=", p.val.threshold, ") (REC)"),
xlab=" ", ylab=" ", row.norm = row.norm,
cmap.type = cmap.type, char.rescale = char.rescale, legend=F)
dev.off()
if (!is.na(output.dataset)) {
V.GCT <- m2
colnames(V.GCT) <- sample.names2
row.names(V.GCT) <- model.names2
write.gct(gct.data.frame = V.GCT, descs = model.descs2, filename =output.dataset)
}
}
OPAM.sort.projection.by.score.6 <- function(
input.ds,
input.cls,
tissue = "NA",
results.dir,
normalize.score = T,
normalization.type = "zero.one",
model = "NA",
target.phen = NA,
target.class = NA,
user.colors = NA,
decreasing.order = T,
output.dataset = NA,
char.rescale = 1,
cmap.type = 3,
row.norm = T,
u.gene.names.known = "NA"
)
# Calls MSIG.HeatMapPlot.9 and makes a plot sorted by the highest-scoring
# signatures and abnormalities (gene mutations or copy number alterations)
# i.e. doesn't require a "model" to score by as OPAM.sort.projection.by.score.2 does.
# However, it *will* use "model" if it cannot calculate p-values on the gene signatures, which
# happens when every cell line has a genomic aberration.
#
# Runs 3 passes on the data:
# 1st pass: looks at the genes and copy number alterations specified by u.gene.names.known
# 2nd pass: looks at only the top abnormalities (using a p-value cutoff) from the 1st pass, and adjusts
# the PATHWAY.MUT vector accordingly (only according to the genes, not by copy number data)
# 3rd pass: Takes the winning signature from the 2nd pass and then looks all the genes available
#
# Very similar to OPAM.sort.projection.by.score.4, however this version uses mutual.inf instead of
# roc.area to calculate mutual information scores and p-values for PATHWAY.MUT, the vector of total genomic aberrations
# in all samples
{
library(gtools)
library(verification)
library(ROCR)
library(MASS)
library(RColorBrewer)
library(heatmap.plus)
dataset <- MSIG.Gct2Frame(filename = input.ds) # Read gene expression dataset (GCT format)
m <- data.matrix(dataset$ds)
model.names <- dataset$row.names
# model.descs <- dataset$descs
Ns <- length(m[1,])
dim(m)
sample.names <- dataset$names
n.models <- length(m[,1])
temp <- strsplit(input.ds, split="/") # Extract test file name
s <- length(temp[[1]])
test.file.name <- temp[[1]][s]
temp <- strsplit(test.file.name, split=".gct")
test.file.prefix <- temp[[1]][1]
char.res <- 0.013 * n.models + 0.65
# normalize scores
if (normalize.score == T) {
if (normalization.type == "zero.one") {
for (i in 1:n.models) {
m[i,] <- (m[i,] - min(m[i,]))/(max(m[i,]) - min(m[i,]))
}
} else if (normalization.type == "z.score") {
for (i in 1:n.models) {
m[i,] <- (m[i,] - mean(m[i,]))/sd(m[i,])
}
} else if (normalization.type == "r.z.score") {
for (i in 1:n.models) {
m[i,] <- (m[i,] - median(m[i,]))/mad(m[i,])
}
}
}
CLS <- MSIG.ReadPhenFile.2(file = input.cls) # Read phenotype file (CLS format)
cls.labels <- CLS$class.v
cls.phen <- CLS$phen
cls.list <- CLS$class.list
if (is.vector(cls.labels)) {
n.phen <- 1
} else {
n.phen <- length(cls.labels[,1])
}
if (!is.na(user.colors[1])) {
c.test <- user.colors
} else {
if (!is.null(CLS$col.phen)) {
c.test <- CLS$col.phen
} else {
c.test <- c(brewer.pal(n=7, name="Set2"), brewer.pal(n=7, name="Dark2"), brewer.pal(n=7, name="Set1"),
brewer.pal(n=8, name="Accent"), brewer.pal(n=9, name="Spectral"), brewer.pal(n=8, name="Set3"),
brewer.pal(n=8, name="BuGn"),
brewer.pal(n=7, name="Set2"), brewer.pal(n=7, name="Dark2"), brewer.pal(n=7, name="Set1"),
brewer.pal(n=8, name="Accent"), brewer.pal(n=10, name="Spectral"), brewer.pal(n=8, name="Set3"),
brewer.pal(n=8, name="BuGn"),
brewer.pal(n=7, name="Set2"), brewer.pal(n=7, name="Dark2"), brewer.pal(n=7, name="Set1"),
brewer.pal(n=8, name="Accent"), brewer.pal(n=10, name="Spectral"), brewer.pal(n=8, name="Set3"),
brewer.pal(n=8, name="BuGn"))
}
}
if (!is.null(CLS$phen.names)) {
phen.names <- CLS$phen.names
} else {
phen.names <- "NA"
}
cls.phen.index <- unlist(cls.phen)
cls.phen.colors <- c.test[1:length(cls.phen.index)]
# print("cls.phen.colors:")
# print(cls.phen.colors)
n.classes <- vector(length=n.phen, mode="numeric")
if (n.phen == 1) {
max.classes <- length(cls.phen)
n.classes[1] <- max.classes
} else {
max.classes <- max(unlist(lapply(cls.phen, FUN=length)))
for (i in 1:n.phen) {
n.classes[i] <- length(cls.phen[[i]])
}
}
print("--- Begin Pass 1 ---")
# model.names.original = model.names
# m.original = m
phen.pass1 = c( "PATHWAY.MUT", u.gene.names.known)
n.phen.pass1 = length(u.gene.names.known)+1
ind.phen.pass1 = which( phen.names %in% phen.pass1 )
phen.pass1 = phen.names[ind.phen.pass1]
phen.pass1[1] = "SUMMARY"
MI.list.pass1 = vector( length=n.models, mode="numeric" )
# p.val.list.pass1 = vector( length=n.models, mode="numeric" )
cls.list.pass1 = cls.list[ind.phen.pass1,]
cls.list.pass1.2 = ifelse(cls.list.pass1 == "WT", 0, 1)
cls.labels.pass1 = cls.labels[ind.phen.pass1,]
browser()
if (!is.na(target.phen)) {
bin.class.pass1 = apply( cls.list.pass1.2[-1,], MARGIN=2, FUN=sum)
# Normalize bin.class.pass1
if( length(unique(bin.class.pass1)) > 1){
bin.class.pass1 = ( bin.class.pass1 - min(bin.class.pass1))/(max(bin.class.pass1) - min(bin.class.pass1))
} else if ( length(unique(bin.class.pass1)) == 1){
bin.class = rep(1, length(cls.list[1,]))
}
# bin.class.pass1 <- ifelse(cls.list[target.phen,] == target.class, 1, 0)
# bin.class.pass1 <- ifelse(apply( cls.list.pass1.2[2:n.phen.pass1,], MARGIN=2, FUN=sum) > 0, 1, 0)
# cls.labels.pass1[1,] = bin.class.pass1
cls.list.pass1[1,] = ifelse(bin.class.pass1 > 0, "MUT", "WT")
# if( length(unique(bin.class.pass1)) == 1) {
# cls.list.3 = ifelse( cls.list == "DEL" | cls.list == "AMP", 1, 0)
# copy.number.pass1 = ifelse( apply(cls.list.3[3:n.phen,], MARGIN=2, FUN=sum) >= 1, "ALT", "WT")
# copy.class.pass1 = ifelse( copy.number.pass1 == "ALT", 1, 0)
# bin.class.pass1 = copy.class.pass1
# print( "Calculating p-value with respect to copy number alterations")
# }
} else {
bin.class.pass1 <- ifelse(cls.list[1,] == cls.list2[1,1], 1, 0)
}
# browser()
# MI.ref.models.pass1 = mutual.inf.2(bin.class.pass1, bin.class.pass1)$MI
# print(paste("MI.ref.models.pass1 =", MI.ref.models.pass1))
# browser()
model.descs2.pass1 = vector(length = n.models, mode="character")
for (i in 1:n.models) {
m.score <- m[i,]
m.score.norm <- (m.score - min(m.score))/(max(m.score) - min(m.score))
# browser()
if (length(unique(bin.class.pass1)) > 1) {
# browser()
MI <- (mutual.inf.2(bin.class.pass1, m.score.norm)$MI)# /MI.ref.models.pass1
# roc <- signif(perf.auc$A, digits=3)
# p.val <- signif(perf.auc$p.value, digits=3)
MI.list.pass1[i] = MI
MI.signif <- signif(MI, digits=3)
# p.val.list.pass1[i] = perf.auc$p.value
} else {
MI.signif <- "-"
MI.list.pass1[i] = NA
# p.val.list.pass1[i] = NA
}
# browser()
# print(paste("REC=", roc, " p-val=", p.val))
print(paste( format(rownames(m)[i], width=30), "mutual.inf =", MI.signif))
# browser()
model.descs2.pass1[i] <- paste(MI.signif)
}
# browser()
if( is.na(MI.list.pass1[1]) ){
loc <- match(model, model.names)
s.order.pass1 <- order(m[loc,], decreasing = decreasing.order)
# loc = s.order.pass1[1]
# s.order.pass1 = 1:Ns
m2.pass1 <- m[, s.order.pass1]
correl <- cor(t(m2.pass1))[, loc]
m.order.pass1 <- order(correl, decreasing=T)
# m.order.pass1 = 1:n.models
m2.pass1 <- m2.pass1[m.order.pass1, ]
} else{
m.order.pass1 = order(MI.list.pass1, decreasing=TRUE, na.last=TRUE)
#m.order.pass1 = 1:n.models
m2.pass1 <- m[m.order.pass1, ]
s.order.pass1 <- order(m2.pass1[1,], decreasing = TRUE )
# s.order.pass1 = 1:Ns
m2.pass1 <- m2.pass1[, s.order.pass1]
}
bin.class.pass1 = bin.class.pass1[s.order.pass1]
m2.pass1 <- m2.pass1[m.order.pass1, ]
model.descs2.pass1 = model.descs2.pass1[m.order.pass1]
sample.names2.pass1 <- colnames(m2.pass1)
model.names.pass1 <- rownames(m2.pass1)
# browser()
if (is.vector(cls.labels)) {
cls.labels2.pass1 <- cls.labels.pass1[s.order.pass1]
cls.list2.pass1 <- cls.list.pass1[s.order.pass1]
} else {
cls.labels2.pass1 <- cls.labels.pass1[, s.order.pass1]
cls.list2.pass1 <- cls.list.pass1[, s.order.pass1]
}
#browser()
# pathway.name <- "KRAS_ALL_UP"
# pathway <- m[1,]
# pathway0 <- ifelse(pathway < median(pathway), 0, 1) # disctretized version
# MI.ref.genes.pass1 <- mutual.inf.2(m[1,], m[1,])$MI
# browser()
m.score.pass1 <- m2.pass1[1,]
m.score.norm.pass1 <- (m.score.pass1 - min(m.score.pass1))/(max(m.score.pass1) - min(m.score.pass1))
# m.score.pass1 = ifelse( m.score.pass1 < median(m.score.pass1), -1, 1) # discretized version
# MI.ref.genes.pass1 <- mutual.inf.2(m.score.norm.pass1, m.score.norm.pass1)$MI
# print(paste("MI.ref.genes.pass1 =", MI.ref.genes.pass1))
MI.list.phen.pass1 = vector(mode="numeric", length=n.phen.pass1)
# p.val.list.phen.pass1 = vector(mode="numeric", length=n.phen)
phen.descs.pass1 = vector(mode="character", length=n.phen.pass1)
if( length(unique(bin.class.pass1)) > 1){
MI <-(mutual.inf.2(bin.class.pass1, m.score.norm.pass1)$MI)#/MI.ref.genes.pass1
MI.signif <- signif(MI, digits=3)
MI.list.phen.pass1[1] = MI
} else{
MI.signif <- "-"
MI.list.phen.pass1[1] = NA
}
print(paste(format(phen.pass1[1], width=12), "mutual.inf =", MI.signif))
phen.descs.pass1[1] <- paste(MI.signif)
# print(m.score.pass1)
for( i in 2:n.phen.pass1 ){
# browser()
bin.gene = ifelse( cls.list2.pass1[i,]=="WT", 0, 1)
# add random noise so the quantile calculation in mutual.inf doesn't return 0
if (length(unique(bin.gene)) > 1) {
# print(bin.gene)
# browser()
MI <- (mutual.inf.2(bin.gene, m.score.norm.pass1)$MI)#/MI.ref.genes.pass1
MI.signif <- signif(MI, digits=3)
# if( perf.auc$A < 0.5 ){
## browser()
# roc = signif(1 - perf.auc$A, digits=3)
# p.val = signif(1- perf.auc$p.val, digits=3)
# abnormality = unique(cls.list2.pass1[i,])[which(unique(cls.list2.pass1[i,]) != "WT")]
## cls.list2.pass1[i,] = ifelse( cls.list2.pass1[i,] == "WT", abnormality, "WT" )
# phen.names[i] = paste(phen.names[i], "-opposite", sep="")
# roc.list.phen.pass1[i] = 1 - perf.auc$A
# p.val.list.phen.pass1[i] = perf.auc$p.val # Don't want to use these "opposite" genomic aberrations in Pass 2
# # because they make PATHWAY.MUT+COPY.NUMBER too dense
# } else{
# roc <- signif(perf.auc$A, digits=3)
# p.val <- signif(perf.auc$p.value, digits=3)
MI.list.phen.pass1[i] = MI
# p.val.list.phen.pass1[i] = perf.auc$p.val
# }
} else {
MI.signif <- "-"
# p.val <- "-"
MI.list.phen.pass1[i] = NA
# p.val.list.phen.pass1[i] = NA
}
# browser()
# print(paste("ROC=", roc, " p-val=", p.val))
print(paste(format(phen.pass1[i], width=12), "mutual.inf =", MI.signif))
# p.val.list[i] = p.val
phen.descs.pass1[i] <- paste(MI.signif)
}
# browser()
#g.order.pass1 = 1:n.phen.pass1
g.order.pass1 = c(1, order(MI.list.phen.pass1[2:n.phen.pass1], decreasing=TRUE, na.last=TRUE)+1) # keep PATHWAY.MUT as 1
# MI.list.phen.pass1 = MI.list.phen.pass1[g.order.pass1]
# p.val.list.phen.pass1 = p.val.list.phen.pass1[g.order.pass1]
MI.list.phen.pass1 = MI.list.phen.pass1[g.order.pass1]
phen.descs2.pass1 = phen.descs.pass1[g.order.pass1]#[1:n.phen.pass1]
cls.list2.pass1 = cls.list2.pass1[g.order.pass1,]#[1:n.phen.pass1,]
phen.names.pass1 = phen.pass1[g.order.pass1]#[1:n.phen.pass1]
# Recompute cls.list2 as some mutations or copy numbers may have been removed
# Recompute cls.phen and cls.labels2 as order may have changed
cls.phen2.pass1 <- NULL
if (is.vector(cls.labels)) {
classes <- unique(cls.list2.pass1)
cls.phen2.pass1 <- classes
cls.labels2.pass1 <- match(cls.list2.pass1, cls.phen2.pass1)
} else {
for (kk in 1:length(cls.list2.pass1[, 1])) {
classes <- unique(cls.list2.pass1[kk,])
# cls.phen2[[kk]] <- classes
cls.phen2.pass1 <- c(cls.phen2.pass1, classes)
cls.labels2.pass1[kk,] <- match(cls.list2.pass1[kk,], classes)
}
}
cls.labels2.pass1 = cls.labels2.pass1[1:n.phen.pass1,]
# browser()
# correl <- cor(t(m2))[, loc]
# m.order <- order(correl, decreasing=decreasing.order)
# correl2 <- correl[m.order]
# model.descs2 <- paste(model.descs[m.order], signif(correl2, digits=3))
phen.list.pass1 <- unlist(cls.phen2.pass1)
colors.list.pass1 = rep( "gray", length(phen.list.pass1))
colors.list.pass1[phen.list.pass1=="MUT"] = cls.phen.colors[1]
colors.list.pass1[phen.list.pass1=="DEL"] = cls.phen.colors[3]
colors.list.pass1[phen.list.pass1=="AMP"] = cls.phen.colors[4]
colors.list.pass1[phen.list.pass1=="ALT"] = cls.phen.colors[5]
# browser()
# colors.list.pass1[1,] = grey(bin.class.pass1)
# print("cls.phen2:")
# print(unlist(cls.phen2))
#
# print("cls.phen:")
# print(unlist(cls.phen))
#
# print("colors.list:")
# print(colors.list)
browser()
filename <- paste(results.dir, test.file.prefix, ".Phase1-2.MI", sep="")
# pdf(file=paste(filename, ".pdf", sep=""), height = 11, width = 9)
pdf(file=paste(filename, ".pdf", sep=""), height = 11, width = 17 )
# windows(width=12, height=8)
MSIG.HeatMapPlot.10(V = m2.pass1,
pathway.mut = bin.class.pass1,
row.names = model.names.pass1,
row.names2 = model.descs2.pass1,
col.labels = cls.labels2.pass1,
col.classes = cls.phen2.pass1,
phen.cmap = colors.list.pass1,
phen.names = phen.names.pass1,
phen.names2 = phen.descs2.pass1,
col.names = sample.names2.pass1,
main = paste(tissue, "- Phase 1: Known KRAS Pathway Abnormalities (MI)"),
xlab=" ", ylab=" ", row.norm = row.norm,
cmap.type = cmap.type, char.rescale = char.rescale, legend=F)
### Begin Pass 2 ###
print( "--- Begin Phase 2 ---")
# browser()
MI.thresholds = c(0.2, 0.1, 0.08, 0.05, 0.03, 0.025, 0.02, 0.015, 0.01, 0)
# MI.threshold = 0.03
ind.top.MI = vector(mode="integer")
MI.i = 1
while( length(ind.top.MI) < 1)
{
MI.i = MI.i + 1
ind.top.MI = which( MI.list.phen.pass1[-1] >= MI.thresholds[MI.i] ) + 1
}
ind.MI.threshold = c(1, ind.top.MI)
# ind.top.MI = which(MI.list.phen.pass1[-1] >= MI.threshold.vector[1] )+1
# if( length(ind.top.MI) > 0 ){
# ind.MI.threshold = c(1, ind.top.MI)
# }
# if( length(ind.top.MI) < 1 ){
# MI.threshold = 0.025
# ind.top.MI = which(MI.list.phen.pass1[-1] >= MI.threshold )+1
# ind.MI.threshold = c(1, ind.top.MI)
# }
# if( length(ind.top.MI) < 1 ){
# MI.threshold = 0.02
# ind.top.MI = which(MI.list.phen.pass1[-1] >= MI.threshold )+1
# ind.MI.threshold = c(1, ind.top.MI)
# }
# if( length(ind.top.MI) < 1 ){
# MI.threshold = 0.015
# ind.top.MI = which(MI.list.phen.pass1[-1] >= MI.threshold )+1
# ind.MI.threshold = c(1, ind.top.MI)
# }
# if( length(ind.top.MI) < 1 ){
# MI.threshold = 0.01
# ind.top.MI = which(MI.list.phen.pass1[-1] >= MI.threshold )+1
# ind.MI.threshold = c(1, ind.top.MI)
# }
# if( length(ind.top.MI) < 1 ) {
# MI.threshold = 0
# ind.top.MI = which(MI.list.phen.pass1[-1] > 0 )+1
# ind.MI.threshold = c(1, ind.top.MI) }
# if( length(ind.top.MI) < 1){
# MI.threshold = 0.2
# ind.top.MI = which(MI.list.phen.pass1[2:n.phen.pass1] <= MI.threshold )+1
# ind.MI.threshold = c(1, ind.top.MI)
# }
# if( length(ind.top.MI) < 1){
# MI.threshold = 0.25
# ind.top.MI = which(MI.list.phen.pass1[2:n.phen.pass1] <= MI.threshold )+1
# ind.MI.threshold = c(1, ind.top.MI)
# }
# if( length(ind.top.MI) < 1){
# MI.threshold = 0.3
# ind.top.MI = which(MI.list.phen.pass1[2:n.phen.pass1] <= MI.threshold )+1
# ind.MI.threshold = c(1, ind.top.MI)
# }
# if( length( ind.top.MI) < 1 ) {
# ind.top = which(!is.na(MI.list.phen.pass1[-1]))+1
# ind.MI.threshold = c( 1, ind.top )
# }
n.phen.pass2 = length(ind.MI.threshold)
# phen.descs.pass2 = phen.descs2.pass1[1:n.phen.pass2]
cls.list2.pass2 = cls.list2.pass1[ind.MI.threshold,]
phen.names.pass2 = phen.names.pass1[ind.MI.threshold]
# phen.names.pass2[1] = "PATHWAY.MUT + COPY.NUMBER"
cls.labels2.pass2 = cls.labels2.pass1[ind.MI.threshold,]
# browser()
cls.list2.pass2.2 = ifelse( cls.list2.pass2 == "WT", 0, 1)
cls.list2.pass2.3 = ifelse( cls.list2.pass2 == "DEL" | cls.list2.pass2 == "AMP", 1, 0)
if( n.phen.pass2 > 2 ){
pathway.mut.pass2 = apply(cls.list2.pass2.2[2:n.phen.pass2,], MARGIN=2, FUN=sum)
bin.class.pass2 = pathway.mut.pass2/length(pathway.mut.pass2)
bin.class.pass2 = ( bin.class.pass2 - min(bin.class.pass2))/(max(bin.class.pass2) - min(bin.class.pass2))
bin.class.pass2.noisy = bin.class.pass2
cls.list2.pass2[1,] = ifelse( bin.class.pass2 > 0, "MUT", "WT")
# copy.number.pass2 = ifelse( apply(cls.list2.pass2.3[3:n.phen.pass2,], MARGIN=2, FUN=sum) >= 1, "ALT", "WT")
} else{
pathway.mut.pass2 = ifelse( cls.list2.pass2.2[2,] == 1, "MUT", "WT")
bin.class.pass2 = ifelse( pathway.mut.pass2 == "MUT", 1, 0 ) #+ runif(Ns, min=-.05, max=.05)
# bin.class.pass2.noisy = bin.class.pass2 + runif(Ns, min=-.05, max=.05)
# bin.class.pass2.noisy = ( bin.class.pass2.noisy - min(bin.class.pass2.noisy))/(max(bin.class.pass2.noisy) - min(bin.class.pass2.noisy))
cls.list2.pass2[1,] = pathway.mut.pass2
# copy.number.pass2 = ifelse( cls.list2.pass2.3[3,] == 1, "ALT", "WT")
}
# browser()
# bin.class.pass2 =
# copy.class.pass2 = ifelse( copy.number.pass2 == "ALT", 1, 0)
# if( length(unique(bin.class.pass2)) == 1) {
# bin.class.pass2 = copy.class.pass2
# print( "Calculating p-value with respect to copy number alterations")
# }
# cls.list2.pass2[2,] = copy.number.pass2
MI.list.pass2 = vector( length=n.models, mode="numeric" )
# MI.ref.models.pass2 = mutual.inf.2(bin.class.pass2, bin.class.pass2)$MI
# print(paste("MI.ref.models.pass2 =", MI.ref.models.pass2))
# p.val.list.pass2 = vector( length=n.models, mode="numeric" )
# browser()
model.descs2.pass2 = vector(length = n.models, mode="character")
for (i in 1:n.models) {
m.score <- m2.pass1[i,]
m.score.norm <- (m.score - min(m.score))/(max(m.score) - min(m.score))
# browser()
if (length(unique(bin.class.pass2)) > 1) {
MI <- (mutual.inf.2(bin.class.pass2, m.score.norm)$MI)#/MI.ref.models.pass2
MI.signif <- signif(MI, digits=3)
# p.val <- signif(perf.auc$p.value, digits=3)
MI.list.pass2[i] = MI
# p.val.list.pass2[i] = perf.auc$p.value
} else {
MI.signif <- "-"
MI.list.pass2[i] = NA
# p.val.list.pass2[i] = NA
}
print(paste(format(rownames(m2.pass1)[i], width=30),"mutual.inf =", MI.signif))
model.descs2.pass2[i] <- paste(MI.signif)
}
# browser()
m.order.pass2 = order(MI.list.pass2, decreasing=TRUE, na.last=TRUE)
# m.order.pass2 = order(p.val.list.pass2, decreasing=FALSE, na.last=TRUE)
model.descs2.pass2 = model.descs2.pass2[m.order.pass2]
# loc.pass2 = m.order.pass2[1]
m2.pass2 <- m2.pass1[m.order.pass2, ]
model.names.pass2 <- rownames(m2.pass2)
# print(c("loc.pass2:", loc.pass2))
s.order.pass2 <- order(m2.pass2[1,], decreasing = TRUE)
m2.pass2 <- m2.pass2[, s.order.pass2]
bin.class.pass2 = bin.class.pass2[s.order.pass2]
# bin.class.pass2.noisy = bin.class.pass2.noisy[s.order.pass2]
sample.names2.pass2 <- colnames(m2.pass2)
if (is.vector(cls.labels)) {
cls.labels2.pass2 <- cls.labels2.pass2[s.order.pass2]
cls.list2.pass2 <- cls.list2.pass2[s.order.pass2]
} else {
cls.labels2.pass2 <- cls.labels2.pass2[, s.order.pass2]
cls.list2.pass2 <- cls.list2.pass2[, s.order.pass2]
}
# browser()
# browser()
m.score.pass2 <- m2.pass2[1,]
m.score.norm.pass2 <- (m.score.pass2 - min(m.score.pass2))/(max(m.score.pass2) - min(m.score.pass2))
# MI.ref.genes.pass2 = mutual.inf.2(m.score.norm.pass2, m.score.norm.pass2)$MI
# print(paste("MI.ref.genes.pass2 =", MI.ref.genes.pass2))
MI.list.phen.pass2 = vector(mode="numeric", length=n.phen.pass2)
phen.descs.pass2 = vector(mode="character", length=n.phen.pass2)
if( length(unique(bin.class.pass2)) > 1){
MI <- (mutual.inf.2(bin.class.pass2, m.score.norm.pass2)$MI)#/MI.ref.genes.pass2
MI.signif <- signif(MI, digits=3)
MI.list.phen.pass1[1] = MI
} else{
MI.signif <- "-"
MI <- NA
MI.list.phen.pass2[1] = MI
}
print(paste(format(phen.names.pass2[1], width=12), "mutual.inf =", MI.signif))
phen.descs.pass2[1] <- paste(MI.signif)
if( n.phen.pass2 == 2 ){
phen.descs.pass2[2] <- paste(MI.signif)
MI.list.phen.pass2[2] = MI
g.order.pass2 = c(1,2)
} else{
for( i in 2:n.phen.pass2 ){
bin.gene = ifelse( cls.list2.pass2[i,]=="WT", 0, 1)
if (length(unique(bin.gene)) > 1) {
MI <- (mutual.inf.2(bin.gene, m.score.norm.pass2)$MI)#/MI.ref.genes.pass2
# if( perf.auc$A < 0.5 ){
## browser()
# roc = signif(1 - perf.auc$A, digits=3)
# p.val = signif(1 - perf.auc$A, digits=3)
# abnormality = unique(cls.list2.pass2[i,])[which(unique(cls.list2.pass2[i,]) != "WT")]
# cls.list2.pass2 = ifelse( cls.list2.pass2[i,] == "WT", abnormality, "WT" )
# phen.names.pass2[i] = paste(phen.names.pass2[i], "-opposite")
# } else{
MI.signif <- signif(MI, digits=3)
# p.val <- signif(perf.auc$p.value, digits=3)
MI.list.phen.pass2[i] = MI
# }
} else {
MI <- "-"
# p.val <- "-"
MI.list.phen.pass2[i] = NA
}
print(paste(format(phen.names.pass2[i], width=12),"mutual.inf =", MI.signif))
# p.val.list[i] = p.val
phen.descs.pass2[i] <- paste(MI.signif)
}
# browser()
g.order.pass2 = c(1, order(MI.list.phen.pass2[2:n.phen.pass2], decreasing=TRUE, na.last=TRUE)+1) # skip PATHWAY.MUT
}
phen.descs2.pass2 = phen.descs.pass2[g.order.pass2]
cls.list2.pass2 = cls.list2.pass2[g.order.pass2,]
phen.names.pass2 = phen.names.pass2[g.order.pass2]
# browser()
# Recompute cls.list2 as some mutations or copy numbers may have been removed
# Recompute cls.phen and cls.labels2 as order may have changed
cls.phen2.pass2 <- NULL
if (is.vector(cls.labels)) {
classes <- unique(as.vector(cls.list2.pass2))
cls.phen2.pass2 <- classes
cls.labels2.pass2 <- match(cls.list2.pass2, cls.phen2.pass2)
} else {
for (kk in 1:length(cls.list2.pass2[, 1])) {
classes <- unique(cls.list2.pass2[kk,])
# cls.phen2[[kk]] <- classes
cls.phen2.pass2 <- c(cls.phen2.pass2, classes)
cls.labels2.pass2[kk,] <- match(cls.list2.pass2[kk,], classes)
}
}
cls.labels2.pass2 = cls.labels2.pass2[1:n.phen.pass2,]
# browser()
# correl <- cor(t(m2))[, loc]
# m.order <- order(correl, decreasing=decreasing.order)
# correl2 <- correl[m.order]
# model.descs2 <- paste(model.descs[m.order], signif(correl2, digits=3))
phen.list.pass2 <- unlist(cls.phen2.pass2)
# colors.list <- ifelse(unlist(cls.phen2) == target.class,
# ifelse(unlist(cls.phen2) == "DEL" | unlist(cls.phen2) == "AMP",
# ifelse(unlist(cls.phen2) == "DEL", cls.phen.colors[3], cls.phen.colors[4]), cls.phen.colors[1]), cls.phen.colors[2])
colors.list.pass2 = rep( "gray", length(phen.list.pass2))
colors.list.pass2[phen.list.pass2=="MUT"] = cls.phen.colors[1]
colors.list.pass2[phen.list.pass2=="DEL"] = cls.phen.colors[3]
colors.list.pass2[phen.list.pass2=="AMP"] = cls.phen.colors[4]
colors.list.pass2[phen.list.pass2=="ALT"] = cls.phen.colors[5]
MSIG.HeatMapPlot.10(V = m2.pass2,
pathway.mut = bin.class.pass2,
row.names = model.names.pass2,
row.names2 = model.descs2.pass2,
col.labels = cls.labels2.pass2,
col.classes = cls.phen2.pass2,
phen.cmap = colors.list.pass2, phen.names = phen.names.pass2,
phen.names2 = phen.descs2.pass2,
col.names = sample.names2.pass2, main = paste(tissue, "- Phase 2: only mutual information >=", MI.thresholds[MI.i],"from Phase 1 (MI)"),
xlab=" ", ylab=" ", row.norm = row.norm,
cmap.type = cmap.type, char.rescale = char.rescale, legend=F)
### 3rd Pass ###
# print( "--- Begin Pass 3 ---")
## browser()
# m2.pass3 = m2.pass2
# model.names.pass3 = rownames(m2.pass3)
# sample.names2.pass3 = colnames(m2.pass3)
## model.descs2.pass3 = model.descs2.pass2
# n.phen.pass3 = 40
## phen.descs.pass2 = phen.descs2.pass1[1:n.phen.pass2]
# cls.list2.pass3 = cls.list[, s.order.pass1][, s.order.pass2]
# cls.labels2.pass3 = cls.labels[, s.order.pass1][, s.order.pass2]
#
## browser()
# phen.names.pass3 = phen.names
# m.score.pass3 <- m2.pass3[1,]
# m.score.norm.pass3 <- (m.score.pass3 - min(m.score.pass3))/(max(m.score.pass3) - min(m.score.pass3))
# MI.list.phen.pass3 = vector(mode="numeric", length=n.phen)
# phen.descs.pass3 = vector(mode="character", length=n.phen)
## p.val.list.phen.pass3 = vector(mode="numeric", length=n.phen)
# for( i in 3:n.phen ){
# bin.gene = ifelse( cls.list2.pass3[i,]=="WT", 0, 1)
# if (length(unique(bin.gene)) > 1) {
# (MI <- mutual.inf.2(bin.gene #+ runif(Ns, min=-.01, max=.01)
# , m.score.norm.pass3)$MI)/MI.ref
## if( perf.auc$A < 0.5 ){
# ## browser()
## roc = signif(1 - perf.auc$A, digits=3)
## p.val = signif(1- perf.auc$p.value, digits=3)
## abnormality = unique(cls.list2.pass3[i,])[which(unique(cls.list2.pass3[i,]) != "WT")]
## cls.list2.pass3[i,] = ifelse( cls.list2.pass3[i,] == "WT", abnormality, "WT" )
## phen.names.pass3[i] = paste(phen.names.pass3[i], "-opposite", sep="")
## roc.list.phen.pass3[i] = 1-perf.auc$A
## p.val.list.phen.pass3[i] = 1- perf.auc$p.value
## } else{
# MI.signif <- signif(MI, digits=3)
## p.val <- signif(perf.auc$p.value, digits=3)
# MI.list.phen.pass3[i] = MI
## p.val.list.phen.pass3[i] = perf.auc$p.value
## }
# } else {
# MI.signif <- NA
## p.val <- NA
# MI.list.phen.pass3[i] = NA
## p.val.list.phen.pass3[i] = NA
# }
## print(paste("ROC=", roc, " p-val=", p.val))
#
## p.val.list[i] = p.val
# phen.descs.pass3[i] <- paste(MI.signif)
# }
## browser()
## MI.threshold = 0.20
## len = length(which(MI.list.phen.pass3[-1:-2] >= MI.threshold))+2
## if( len>40 )
# len=40
# ind.u = match(order(unique(MI.list.phen.pass3[-1:-2]), decreasing=FALSE, na.last=TRUE), MI.list.phen.pass3[-1:-2])
## if( len == 2 ){
## MI.threshold = 0.15
## len = length(which(MI.list.phen.pass3[-1:-2] >= MI.threshold))+2
## }
## if( len == 2 ){
## MI.threshold = 0.2
## len = length(which(MI.list.phen.pass3[-1:-2] >= MI.threshold))+2
## }
## g.order.pass3.1 = c(1, 2, order(p.val.list.phen.pass3[3:n.phen], decreasing=FALSE, na.last=TRUE)+2 )
# g.order.pass3 = c(1, 2, order(MI.list.phen.pass3[-1:-2], decreasing=FALSE, na.last=TRUE)+2 )[1:len] # skip PATHWAY.MUT and COPY.NUMBER
# phen.descs2.pass3 = phen.descs.pass3[g.order.pass3]
# cls.list2.pass3 = cls.list2.pass3[g.order.pass3,]
# cls.labels2.pass3 = cls.labels2.pass3[g.order.pass3,]
# phen.names.pass3 = phen.names.pass3[g.order.pass3]
#
#
# cls.list.mut = ifelse(cls.list2.pass3[-1:-2,] == "MUT", 1, 0)
# cls.list.alt = ifelse(cls.list2.pass3[-1:-2,] == "DEL" | cls.list2.pass3[-1:-2,] == "AMP", 1, 0)
#
## browser()
# if( !is.vector(cls.list.alt) ){
# cls.list.mut.sum = apply(cls.list.mut, MARGIN=2, FUN=sum)
# cls.list.alt.sum = apply(cls.list.alt, MARGIN=2, FUN=sum)
# bin.class.pass3 = cls.list.mut.sum + cls.list.alt.sum
# bin.class.pass3 = ( bin.class.pass3 - min(bin.class.pass3))/(max(bin.class.pass3) - min(bin.class.pass3))
# cls.list.mut.sum = ifelse(cls.list.mut.sum + cls.list.alt.sum > 0, 1, 0)
# cls.list2.pass3[1,] = ifelse( cls.list.mut.sum >= 1, "MUT", "WT")
# cls.list2.pass3[2,] = ifelse( cls.list.alt.sum >= 1, "ALT", "WT")
#
#
# } else{
#
# cls.list2.pass3[2,] = ifelse(cls.list.alt == 1, "ALT", "WT")
# bin.class.pass3 = cls.list.mut+cls.list.alt #+ runif(Ns, min=-.1, max=.1)
# bin.class.pass3 = ( bin.class.pass3 - min(bin.class.pass3))/(max(bin.class.pass3) - min(bin.class.pass3))
# cls.list2.pass3[1,] = ifelse(bin.class.pass3 > 0 , "MUT", "WT")
# }
#
## browser()
#
# if( length(unique(bin.class.pass3)) > 1){
# MI <- mutual.inf.2(bin.class.pass3, m.score.norm.pass3)$MI
# MI.signif <- signif(MI, digits=3)
# MI.list.phen.pass3[1] = MI
# } else{
# MI.signif <- "-"
# MI.list.phen.pass3[1] = NA
# }
# print(paste(format(phen.names.pass3[1], width=12), "mutual.inf =", MI.signif))
# phen.descs2.pass3[1] <- paste(MI.signif)
# for( i in 2 ){ # Recalculate MI for PATHWAY.MUT and COPY.NUMBER
# bin.gene = ifelse( cls.list2.pass3[i,]=="WT", 0, 1)
# if (length(unique(bin.gene)) > 1) {
# MI <- (mutual.inf.2(bin.gene #+ runif(Ns, min=-.01, max=.01)
# , m.score.norm.pass3)$MI)/MI.ref
## if( perf.auc$A < 0.5 ){
# ## browser()
## roc = signif(1 - perf.auc$A, digits=3)
## p.val = signif(1- perf.auc$p.value, digits=3)
## abnormality = unique(cls.list2.pass3[i,])[which(unique(cls.list2.pass3[i,]) != "WT")]
## cls.list2.pass3[i,] = ifelse( cls.list2.pass3[i,] == "WT", abnormality, "WT" )
## phen.names.pass3[i] = paste(phen.names.pass3[i], "-opposite", sep="")
## roc.list.phen.pass3[i] = 1-perf.auc$A
## p.val.list.phen.pass3[i] = 1- perf.auc$p.value
## } else{
# MI.signif <- signif(MI, digits=3)
## p.val <- signif(perf.auc$p.value, digits=3)
# MI.list.phen.pass3[i] = MI
## p.val.list.phen.pass3[i] = perf.auc$p.value
## }
# } else {
# MI <- NA
## p.val <- NA
# MI.list.phen.pass3[i] = NA
## p.val.list.phen.pass3[i] = NA
# }
# print(paste(format(phen.names.pass3[i], width=12), "mutual.inf =", MI.signif))
#
## p.val.list[i] = p.val
# phen.descs2.pass3[i] <- paste(MI.signif)
# }
#
## browser()
# model.descs2.pass3 = vector(length = n.models, mode="character")
# MI.list.pass3 = vector( length=n.models, mode="character")
# for (i in 1:n.models) {
# m.score <- m2.pass3[i,]
# m.score.norm <- (m.score - min(m.score))/(max(m.score) - min(m.score))
## browser()
# if (length(unique(bin.class.pass3)) > 1) {
# MI <- mutual.inf.2(bin.class.pass3, m.score.norm)$MI
# MI.signif <- signif(MI, digits=3)
## p.val <- signif(perf.auc$p.value, digits=3)
# MI.list.pass3[i] = MI
## p.val.list.pass2[i] = perf.auc$p.value
# } else {
# MI.signif <- "-"
# MI.list.pass3[i] = NA
## p.val.list.pass2[i] = NA
# }
# print(paste(format(rownames(m2.pass3)[i], width=30), "mutual.inf =", MI.signif))
#
# model.descs2.pass3[i] <- paste(MI.signif)
# }
# m.order.pass3 = order(MI.list.pass3, na.last=TRUE, decreasing=TRUE)
# m2.pass3 = m2.pass3[m.order.pass3,]
# model.descs2.pass3 = model.descs2.pass3[m.order.pass3]
# s.order.pass3 = order(m2.pass3, decreasing=TRUE)
# m2.pass3 = m2.pass3[,s.order.pass3]
# bin.class.pass3 = bin.class.pass1[s.order.pass3]
# model.names.pass3 = rownames(m2.pass3)
# sample.names2.pass3 = colnames(m2.pass3)
#
# cls.phen2.pass3 <- NULL
# if (is.vector(cls.labels)) {
# classes <- unique(as.vector(cls.list2.pass3))
# cls.phen2.pass3 <- classes
# cls.labels2.pass3 <- match(cls.list2.pass3, cls.phen2.pass3)
# } else {
## browser()
# for (kk in 1:length(cls.list2.pass3[, 1])) {
## browser()
# classes <- unique(cls.list2.pass3[kk,])
## cls.phen2[[kk]] <- classes
# cls.phen2.pass3 <- c(cls.phen2.pass3, classes)
# cls.labels2.pass3[kk,] <- match(cls.list2.pass3[kk,], classes)
# }
# }
## cls.labels2.pass3 = cls.labels2.pass3[1:n.phen.pass3,]
#
#
## browser()
## correl <- cor(t(m2))[, loc]
## m.order <- order(correl, decreasing=decreasing.order)
## correl2 <- correl[m.order]
#
## model.descs2 <- paste(model.descs[m.order], signif(correl2, digits=3))
# phen.list.pass3 <- unlist(cls.phen2.pass3)
#
## colors.list <- ifelse(unlist(cls.phen2) == target.class,
## ifelse(unlist(cls.phen2) == "DEL" | unlist(cls.phen2) == "AMP",
## ifelse(unlist(cls.phen2) == "DEL", cls.phen.colors[3], cls.phen.colors[4]), cls.phen.colors[1]), cls.phen.colors[2])
# colors.list.pass3 = rep( "gray", length(phen.list.pass3))
# colors.list.pass3[phen.list.pass3=="MUT"] = cls.phen.colors[1]
# colors.list.pass3[phen.list.pass3=="DEL"] = cls.phen.colors[3]
# colors.list.pass3[phen.list.pass3=="AMP"] = cls.phen.colors[4]
# colors.list.pass3[phen.list.pass3=="ALT"] = cls.phen.colors[5]
# phen.names.pass3[1] = "PATHWAY.MUT+COPY.NUMBER"
## browser()
# MSIG.HeatMapPlot.10(V = m2.pass3,
# pathway.mut = bin.class.pass3,
# row.names = model.names.pass3,
# row.names2 = model.descs2.pass3,
# col.labels = cls.labels2.pass3,
# col.classes = cls.phen2.pass3,
# phen.cmap = colors.list.pass3, phen.names = phen.names.pass3,
# phen.names2 = phen.descs2.pass3,
# col.names = sample.names2.pass3, main = paste(tissue, "- Phase 3: Top signature from Phase 2 with all genes (Top 40 genes) (MI)"),
# xlab=" ", ylab=" ", row.norm = row.norm,
# cmap.type = cmap.type, char.rescale = char.rescale, legend=F)
dev.off()
if (!is.na(output.dataset)) {
V.GCT <- m2
colnames(V.GCT) <- sample.names2
row.names(V.GCT) <- model.names2
write.gct(gct.data.frame = V.GCT, descs = model.descs2, filename =output.dataset)
}
}
OPAM.sort.projection.by.score.7 <- function(
# input.ds,
signatures = "NA",
input.all.pathways.ds,
input.cls,
tissue = "NA",
results.dir,
normalize.score = T,
normalization.type = "zero.one",
model = "NA",
target.phen = NA,
target.class = NA,
user.colors = NA,
decreasing.order = T,
output.dataset = NA,
char.rescale = 1,
cmap.type = 3,
row.norm = T,
u.gene.names.known = "NA",
add.amp.del = FALSE,
#n.random.signatures = 10,
multiple.tissues = FALSE,
cls.has.chrom.locs = FALSE,
file.suffix = "",
skip.iterative = FALSE,
add.mut = FALSE,
n.iter = 5,
pdf.height = 11,
pdf.width = 17,
do.mRMR = F,
skip.step2 = FALSE,
todd.version = FALSE
)
# Calls MSIG.HeatMapPlot.9 and makes a plot sorted by the highest-scoring
# signatures and abnormalities (gene mutations or copy number alterations)
# i.e. doesn't require a "model" to score by as OPAM.sort.projection.by.score.2 does.
# However, it *will* use "model" if it cannot calculate p-values on the gene signatures, which
# happens when every cell line has a genomic aberration.
#
# Runs 3 passes on the data:
# 1st pass: looks at the genes and copy number alterations specified by u.gene.names.known
# 2nd pass: looks at only the top abnormalities (using a p-value cutoff) from the 1st pass, and adjusts
# the PATHWAY.MUT vector accordingly (only according to the genes, not by copy number data)
# 3rd pass: Takes the winning signature from the 2nd pass and then looks all the genes available
#
# Very similar to OPAM.sort.projection.by.score.4, however this version uses mutual.inf instead of
# roc.area to calculate mutual information scores and p-values for PATHWAY.MUT, the vector of total genomic aberrations
# in all samples
#
# Differs from OPAM.sort.projection.by.score.6 by requiring the gct file of expression in
# all pathways by the input tissue ("input.all.pathways.ds")
{
library(gtools)
library(verification)
library(ROCR)
library(MASS)
library(RColorBrewer)
library(heatmap.plus)
dataset.all <- MSIG.Gct2Frame( filename = input.all.pathways.ds)
m.all <- data.matrix(dataset.all$ds)
model.names.all <- dataset.all$row.names
Ns = length(m.all[1,])
sample.names = dataset.all$names
if( multiple.tissues ){
tissue.type <- vector(length=Ns, mode="character")
# temp = strsplit(sample.names, split="_")
for (k in 1:Ns) {
temp <- strsplit(sample.names[k], split="_")
tissue.type[k] <- paste(temp[[1]][2:length(temp[[1]])], collapse="_")
}
tissue.names = unique(tissue.type)
tissue.labels = match(tissue.type, tissue.names)
} else{
tissue.names = tissue
tissue.labels = rep(1, Ns)
}
if( is.na(signatures[1]) ){
stop("Must provide a vector of signature names to evaluate, or specify 'ALL'")
}
## Remove "Commented out" signatures (with # at beginning of name)
if( length(grep("^#", signatures)) > 0){
signatures = signatures[-grep("^#", signatures)]
}
if( signatures[1] == "ALL"){
model.names = model.names.all
m = m.all
model.descs = dataset.all$descs
} else{
model.names = signatures
model.ind = match(signatures, model.names.all)
m = m.all[model.ind,]
model.descs = dataset.all$descs[model.ind]
if( length(model.ind) == 1 ){
m = t(as.matrix(m))
rownames(m) = model.names
}
rm(list=c("m.all", "dataset.all"))
# browser()
}
n.models <- length(m[,1])
temp <- strsplit(input.all.pathways.ds, split="/") # Extract test file name
s <- length(temp[[1]])
test.file.name <- temp[[1]][s]
temp <- strsplit(test.file.name, split=".gct")
test.file.prefix <- temp[[1]][1]
char.res <- 0.013 * n.models + 0.65
# normalize scores
if (normalize.score == T) {
if (normalization.type == "zero.one") {
for (i in 1:n.models) {
m[i,] <- (m[i,] - min(m[i,]))/(max(m[i,]) - min(m[i,]))
}
} else if (normalization.type == "z.score") {
for (i in 1:n.models) {
m[i,] <- (m[i,] - mean(m[i,]))/sd(m[i,])
}
} else if (normalization.type == "r.z.score") {
for (i in 1:n.models) {
m[i,] <- (m[i,] - median(m[i,]))/mad(m[i,])
}
}
}
CLS <- MSIG.ReadPhenFile.2(file = input.cls) # Read phenotype file (CLS format)
cls.labels <- CLS$class.v
cls.phen <- CLS$phen
cls.list <- CLS$class.list
if(cls.list[1,1] == "0" | cls.list[1,1] == "1"){
cls.list = ifelse(cls.list=="1", "MUT", "WT")
}
#browser()
if (is.vector(cls.labels)) {
n.phen <- 1
} else {
n.phen <- length(cls.labels[,1])
}
if (!is.na(user.colors[1])) {
c.test <- user.colors
} else {
if (!is.null(CLS$col.phen)) {
c.test <- CLS$col.phen
} else {
c.test <- c(brewer.pal(n=7, name="Set2"), brewer.pal(n=7, name="Dark2"), brewer.pal(n=7, name="Set1"),
brewer.pal(n=8, name="Accent"), brewer.pal(n=9, name="Spectral"), brewer.pal(n=8, name="Set3"),
brewer.pal(n=8, name="BuGn"),
brewer.pal(n=7, name="Set2"), brewer.pal(n=7, name="Dark2"), brewer.pal(n=7, name="Set1"),
brewer.pal(n=8, name="Accent"), brewer.pal(n=10, name="Spectral"), brewer.pal(n=8, name="Set3"),
brewer.pal(n=8, name="BuGn"),
brewer.pal(n=7, name="Set2"), brewer.pal(n=7, name="Dark2"), brewer.pal(n=7, name="Set1"),
brewer.pal(n=8, name="Accent"), brewer.pal(n=10, name="Spectral"), brewer.pal(n=8, name="Set3"),
brewer.pal(n=8, name="BuGn"))
}
}
if (!is.null(CLS$phen.names)) {
phen.names <- CLS$phen.names
} else {
phen.names <- "NA"
}
cls.phen.index <- unlist(cls.phen)
cls.phen.colors <- c.test[1:length(cls.phen.index)]
# print("cls.phen.colors:")
# print(cls.phen.colors)
n.classes <- vector(length=n.phen, mode="numeric")
if (n.phen == 1) {
max.classes <- length(cls.phen)
n.classes[1] <- max.classes
} else {
max.classes <- max(unlist(lapply(cls.phen, FUN=length)))
n.classes = unlist(lapply(cls.phen, length))
# for (i in 1:n.phen) {
# n.classes[i] <- length(cls.phen[[i]])
# }
}
pdf.options(height=pdf.height, width=pdf.width, colormodel="rgb", bg="transparent")
phen.names = c("SUMMARY", phen.names)
cls.list = rbind(rep("WT", length=length(cls.list[1,])),
#rep("WT", length=length(cls.list[1,])),
cls.list)
cls.labels = rbind(rep(1, length=length(cls.labels[1,])),
#rep(1, length=length(cls.labels[1,])),
cls.labels)
if( !todd.version ){
print("--- Begin Pass 1 ---")
# browser()
## Remove "Commented out" gene names (with # at beginning of name)
if( length(grep("^#", u.gene.names.known)) > 0){
u.gene.names.known = u.gene.names.known[-grep("^#", u.gene.names.known)]
}
if ( add.amp.del ){
u.gene.names.known = c( u.gene.names.known, paste(u.gene.names.known, "_AMP", sep=""),
paste(u.gene.names.known, "_DEL", sep="") )
}
if(add.mut){
u.gene.names.known = paste(u.gene.names.known, "_MUT", sep="")
}
phen.pass1 = c( u.gene.names.known )
## Find chromosomal locations of genes specified
## See "if( find.chromosomal.locations)" for more transparent code
if( cls.has.chrom.locs ){
library(org.Hs.eg.db)
phen.pass1.split = strsplit(phen.pass1, split="_")
phen.pass1.noampdel = unlist( lapply(phen.pass1.split, function(x) x[1]))
phen.pass1.egIDs = mget(phen.pass1.noampdel, org.Hs.egALIAS2EG, ifnotfound=NA)
# phen.pass1.no.chrom.loc = which(lapply(lapply(phen.pass1.egIDs, is.na), sum) > 0)
phen.pass1.locs.list = lapply(phen.pass1.egIDs, mget, org.Hs.egMAP)
phen.pass1.locs = vector(mode="character", length=length(phen.pass1))
# phen.pass1.locs[phen.pass1.no.chrom.loc] = "NA"
phen.pass1.locs = unlist(lapply(phen.pass1.locs.list, function(x) paste(unlist(x), collapse="_")))
phen.pass1.w.locs = paste(phen.pass1.noampdel, ".", phen.pass1.locs, sep="")
phen.pass1.ampdel.suffix = unlist(lapply(phen.pass1.split, function(x) x[2]))
#phen.pass1.no.suffix = which(is.na(phen.pass1.ampdel.suffix))
#phen.pass1[phen.pass1.no.suffix] = phen.pass1.w.locs[phen.pass1.no.suffix]
#phen.pass1[-phen.pass1.no.suffix] = paste(phen.pass1.w.locs[-phen.pass1.no.suffix], "_", phen.pass1.ampdel.suffix[-phen.pass1.no.suffix], sep="")
phen.pass1 = paste(phen.pass1.w.locs, "_", phen.pass1.ampdel.suffix, sep="")
}
## Was originally immediately after "ind.phen.pass1 = ..." but since now want to find the chromosomal
## locations of the genes, have to first find the indices of the genes specified at the onset of the
## program, THEN find all the chromosomal locations
#browser()
phen.pass1 = c("SUMMARY", phen.pass1)
ind.phen.pass1 = which( phen.names %in% phen.pass1 )
# phen.names[1] = "SUMMARY"
phen.pass1 = phen.names[ind.phen.pass1]
n.phen.pass1 = length(phen.pass1)
MI.list.pass1 = vector( length=n.models, mode="numeric" )
cls.list.pass1 = cls.list[ind.phen.pass1,]
cls.list.pass1.2 = ifelse(cls.list.pass1 == "WT", 0, 1)
cls.labels.pass1 = cls.labels[ind.phen.pass1,]
# browser()
if (!is.na(target.phen)) {
if( length( phen.pass1) > 2 ){
bin.class.pass1 = apply( cls.list.pass1.2[-1,], MARGIN=2, FUN=sum)
} else{ bin.class.pass1 = cls.list.pass1.2[2,] }
# Normalize bin.class.pass1
if( length(unique(bin.class.pass1)) > 1){
bin.class.pass1 = normalize(bin.class.pass1) #( bin.class.pass1 - min(bin.class.pass1))/(max(bin.class.pass1) - min(bin.class.pass1))
} else if ( length(unique(bin.class.pass1)) == 1){
bin.class = rep(1, length(cls.list[1,]))
}
cls.list.pass1[1,] = ifelse(bin.class.pass1 > 0, "MUT", "WT")
} else {
bin.class.pass1 <- ifelse(cls.list[1,] == cls.list2[1,1], 1, 0)
}
#browser()
### Make initial heatmap ###
cls.phen2.pass1 <- NULL
if (is.vector(cls.labels)) {
classes <- unique(cls.list.pass1)
cls.phen2.pass1 <- classes
cls.labels.pass1 <- match(cls.list.pass1, cls.phen2.pass1)
} else {
for (kk in 1:length(cls.list.pass1[, 1])) {
classes <- unique(cls.list.pass1[kk,])
# cls.phen2[[kk]] <- classes
cls.phen2.pass1 <- c(cls.phen2.pass1, classes)
cls.labels.pass1[kk,] <- match(cls.list.pass1[kk,], classes)
}
}
cls.labels.pass1 = cls.labels.pass1[1:length(phen.pass1),]
phen.list.pass1 <- unlist(cls.phen2.pass1)
colors.list = rep( "gray", length(phen.list.pass1))
colors.list[phen.list.pass1=="MUT"] = cls.phen.colors[1]
filename <- paste(results.dir, test.file.prefix, file.suffix, ".Step0", sep="")
pdf(file=paste(filename, ".pdf", sep=""), height = pdf.height, width = pdf.width )
#quartz(height = 11, width = 17)
if( multiple.tissues ){
#browser()
#quartz(height = 11, width = 17)
MSIG.HeatMapPlot.10.multiple.tissues(V = m,
pathway.mut = bin.class.pass1,
row.names = model.names,
col.labels = cls.labels.pass1,
col.classes = cls.phen2.pass1,
phen.cmap = colors.list,
phen.names = phen.pass1,
col.names = sample.names,
main = paste(tissue, "- Initial Heatmap ('Step 0')"),
xlab=" ", ylab=" ", row.norm = row.norm,
cmap.type = cmap.type, char.rescale = char.rescale, legend=F,
tissue.names = tissue.names,
tissue.labels = tissue.labels)
} else{
MSIG.HeatMapPlot.10(V = m,
pathway.mut = bin.class.pass1,
row.names = model.names,
col.labels = cls.labels.pass1,
col.classes = cls.phen2.pass1,
phen.cmap = colors.list,
phen.names = phen.pass1,
col.names = sample.names,
main = paste(tissue, "- Initial Heatmap ('Step 0')"),
xlab=" ", ylab=" ", row.norm = row.norm,
cmap.type = cmap.type, char.rescale = char.rescale, legend=F)
}
dev.off()
model.descs2.pass1 = vector(length = n.models, mode="character")
if( length(unique(bin.class.pass1)) > 1 ){
if( n.models > 1 ){
MI.results = mutual.inf.3.v2(bin.class.pass1, m,
target.vector.name="SUMMARY",
tissue=tissue)
MI.list.pass1 = MI.results$MI
model.descs2.pass1 <- sapply(MI.results$MI, FUN=signif, 3)
m.order.pass1 = order(MI.list.pass1, decreasing=TRUE, na.last=TRUE)
m2.pass1 <- m[m.order.pass1, ]
s.order.pass1 <- order(m2.pass1[1,], decreasing = TRUE )
m2.pass1 <- m2.pass1[, s.order.pass1]
} else{
#browser()
MI.ref = mutual.inf.2(bin.class.pass1, bin.class.pass1)
MI.list.pass1 = MI.results =
mutual.inf.2(bin.class.pass1, m[1,])/MI.ref
model.descs2.pass1 <- signif(MI.results, digits=3)
m2.pass1 <- m ; m.order.pass1 = 1
s.order.pass1 <- order(m2.pass1[1,], decreasing = TRUE )
m2.pass1 <- t(as.matrix(m[, s.order.pass1]))
rownames(m2.pass1) = model.names
}
} else{
MI.list.pass1 = rep(NA, n.models)
FDR.list.pass1 = rep(NA, n.models)
model.descs2.pass1 = rep(" - (FDR = - )", n.models)
if( n.models > 1 ){
loc <- match(model, model.names)
s.order.pass1 <- order(m[loc,], decreasing = decreasing.order)
m2.pass1 <- m[, s.order.pass1]
correl <- cor(t(m2.pass1))[, loc]
m.order.pass1 <- order(correl, decreasing=T)
m2.pass1 <- m2.pass1[m.order.pass1, ]
} else{
m2.pass1 <- t(as.matrix(m[, s.order.pass1]))
rownames(m2.pass1) = model.names
m.order.pass1 = 1 }
}
MI.list.pass1 = MI.list.pass1[m.order.pass1]
bin.class.pass1 = bin.class.pass1[s.order.pass1]
model.descs2.pass1.all = model.descs2.pass1
model.descs2.pass1 = model.descs2.pass1[m.order.pass1]
sample.names2.pass1 <- colnames(m2.pass1)
model.names.pass1 <- rownames(m2.pass1)
print(matrix(c(model.names.pass1, model.descs2.pass1), ncol=2), quote=F)
if (is.vector(cls.labels)) {
cls.labels2.pass1 <- cls.labels.pass1[s.order.pass1]
cls.list2.pass1 <- cls.list.pass1[s.order.pass1]
} else {
cls.labels2.pass1 <- cls.labels.pass1[, s.order.pass1]
cls.list2.pass1 <- cls.list.pass1[, s.order.pass1]
}
tissue.labels.pass1 = tissue.labels[s.order.pass1]
sample.names2 <- colnames(m2.pass1)
winning.model.ind.pass1 = which(model.names.pass1[1] == rownames(m2.pass1))
MI.list.phen.pass1 = vector(mode="numeric", length=n.phen.pass1)
phen.descs.pass1 = vector(mode="character", length=n.phen.pass1)
if( length(unique(bin.class.pass1)) > 1){
MI.signif <- signif(MI.list.pass1[1], digits=3)
MI.list.phen.pass1[1] = MI.list.pass1[1]
phen.descs.pass1[1] = model.descs2.pass1[1]
} else{
MI.signif <- "-"
MI.list.phen.pass1[1] = NA
}
print(paste(format(phen.pass1[1], width=12), "mutual.inf =", MI.signif
))
print(proc.time()-t1)
print(date())
phen.descs.pass1[1] <- paste(MI.signif,
sep="")
# browser()
if( n.phen.pass1 > 2 ){
bin.gene.matrix = ifelse(cls.list2.pass1[-1,]=="WT", 0, 1)
MI.results = mutual.inf.3.v2(
m2.pass1[winning.model.ind.pass1,],
bin.gene.matrix)
MI.list.phen.pass1[-1] = MI.results$MI
phen.descs.pass1[-1] = sapply(MI.results$MI, FUN=signif, 3)
g.order.pass1 = c(1, order(MI.list.phen.pass1[-1], decreasing=TRUE, na.last=TRUE)+1)
MI.list.phen.pass1 = MI.list.phen.pass1[g.order.pass1]
phen.descs2.pass1 = phen.descs.pass1[g.order.pass1]
cls.list2.pass1 = cls.list2.pass1[g.order.pass1,]
phen.names.pass1 = phen.pass1[g.order.pass1]
} else{
# bin.gene.matrix = ifelse(cls.list2.pass1[-1,]=="WT", 0, 1)
# MI.ref = mutual.inf.2(m2.pass1[winning.model.ind.pass1,],
# m2.pass1[winning.model.ind.pass1,])
# MI.results = mutual.inf.2(
# m2.pass1[winning.model.ind.pass1,],
# bin.gene.matrix)/MI.ref
# MI.list.phen.pass1[-1] = MI.results
phen.descs.pass1[-1] = phen.descs.pass1[1]
g.order.pass1 = 1:2
# MI.list.phen.pass1 = MI.list.phen.pass1[g.order.pass1]
phen.descs2.pass1 = phen.descs.pass1
# cls.list2.pass1 = cls.list2.pass1[g.order.pass1,]
phen.names.pass1 = phen.pass1
}
print(matrix(c(phen.names.pass1, phen.descs2.pass1), ncol=2), quote=F)
print(proc.time()-t1)
print(date())
# Recompute cls.list2 as some mutations or copy numbers may have been removed
# Recompute cls.phen and cls.labels2 as order may have changed
cls.phen2.pass1 <- NULL
if (is.vector(cls.labels)) {
classes <- unique(cls.list2.pass1)
cls.phen2.pass1 <- classes
cls.labels2.pass1 <- match(cls.list2.pass1, cls.phen2.pass1)
} else {
for (kk in 1:length(cls.list2.pass1[, 1])) {
classes <- unique(cls.list2.pass1[kk,])
# cls.phen2[[kk]] <- classes
cls.phen2.pass1 <- c(cls.phen2.pass1, classes)
cls.labels2.pass1[kk,] <- match(cls.list2.pass1[kk,], classes)
}
}
cls.labels2.pass1 = cls.labels2.pass1[1:n.phen.pass1,]
phen.list.pass1 <- unlist(cls.phen2.pass1)
colors.list.pass1 = rep( "gray", length(phen.list.pass1))
colors.list.pass1[phen.list.pass1=="MUT"] = cls.phen.colors[1]
colors.list.pass1[phen.list.pass1=="DEL"] = cls.phen.colors[3]
colors.list.pass1[phen.list.pass1=="AMP"] = cls.phen.colors[4]
colors.list.pass1[phen.list.pass1=="ALT"] = cls.phen.colors[5]
filename <- paste(results.dir, test.file.prefix, file.suffix, ".Step1.MI-HXY", sep="")
# pdf(file=paste(filename, ".pdf", sep=""), height = 11, width = 9)
pdf(file=paste(filename, ".pdf", sep=""), height = pdf.height, width = pdf.width )
#browser()
# windows(width=12, height=8)
if( multiple.tissues ){
MSIG.HeatMapPlot.10.multiple.tissues(V = m2.pass1,
pathway.mut = bin.class.pass1,
row.names = model.names.pass1,
row.names2 = model.descs2.pass1,
col.labels = cls.labels2.pass1,
col.classes = cls.phen2.pass1,
phen.cmap = colors.list.pass1,
phen.names = phen.names.pass1,
phen.names2 = c(" ", phen.descs2.pass1),
col.names = sample.names2.pass1,
main = paste(tissue, "- Step 1: Known KRAS Pathway Abnormalities (MI)"),
xlab=" ", ylab=" ", row.norm = row.norm,
cmap.type = cmap.type, char.rescale = char.rescale, legend=F,
tissue.names = tissue.names,
tissue.labels = tissue.labels)
} else{
MSIG.HeatMapPlot.10(V = m2.pass1,
pathway.mut = bin.class.pass1,
row.names = model.names.pass1,
row.names2 = model.descs2.pass1,
col.labels = cls.labels2.pass1,
col.classes = cls.phen2.pass1,
phen.cmap = colors.list.pass1,
phen.names = phen.names.pass1,
phen.names2 = phen.descs2.pass1,
col.names = sample.names2.pass1,
main = paste(tissue, "- Step 1: Known KRAS Pathway Abnormalities (MI)"),
xlab=" ", ylab=" ", row.norm = row.norm,
cmap.type = cmap.type, char.rescale = char.rescale, legend=F)
}
dev.off()
# stop("Don't do Step 2!")
### Begin Pass 2 ###
if( n.phen.pass1 == 2 || skip.step2 == T )
print( "--- Begin Step 2 ---")
if( is.na(MI.list.pass1[1]) || is.na(MI.list.phen.pass1[1]) ){
dev.off()
return()
}
MI.thresholds = c(0.2, 0.1, 0.08, 0.05, 0.03, 0.025, 0.02, 0.015, 0.01, 0)
# MI.threshold = 0.03
ind.top.MI = vector(mode="integer")
MI.i = 0
# FDR.i = 0
# browser()
while( length(ind.top.MI) < 1)
{
MI.i = MI.i + 1
# FDR.i = FDR.i + 1
if( MI.i > length(MI.thresholds)){
dev.off()
print("Selected genomic aberrations do not have
positive mutual information with a low enough false
discovery rate with the selected pathways")
return()
}
ind.top.MI = which( MI.list.phen.pass1[-1] >= MI.thresholds[MI.i]) +1 #& MI.list.phen.pass1[-1] > 0
# ) + 1
}
ind.MI.threshold = c(1, ind.top.MI)
n.phen.pass2 = length(ind.MI.threshold)
cls.list2.pass2 = cls.list2.pass1[ind.MI.threshold,]
phen.names.pass2 = phen.names.pass1[ind.MI.threshold]
cls.labels2.pass2 = cls.labels2.pass1[ind.MI.threshold,]
# browser()
cls.list2.pass2.2 = ifelse( cls.list2.pass2 == "WT", 0, 1)
cls.list2.pass2.3 = ifelse( cls.list2.pass2 == "DEL" | cls.list2.pass2 == "AMP", 1, 0)
if( n.phen.pass2 > 2 ){
pathway.mut.pass2 = apply(cls.list2.pass2.2[2:n.phen.pass2,], MARGIN=2, FUN=sum)
bin.class.pass2 = pathway.mut.pass2/length(pathway.mut.pass2)
bin.class.pass2 = ( bin.class.pass2 - min(bin.class.pass2))/(max(bin.class.pass2) - min(bin.class.pass2))
cls.list2.pass2[1,] = ifelse( bin.class.pass2 > 0, "MUT", "WT")
} else{
pathway.mut.pass2 = ifelse( cls.list2.pass2.2[2,] == 1, "MUT", "WT")
bin.class.pass2 = ifelse( pathway.mut.pass2 == "MUT", 1, 0 ) #+ runif(Ns, min=-.05, max=.05)
cls.list2.pass2[1,] = pathway.mut.pass2
}
MI.list.pass2 = vector( length=n.models, mode="double" )
#browser()
#### Print Step 2's initial heatmap ###
cls.phen2.pass1.5 <- NULL
cls.labels2.pass1.5 = cls.labels2.pass2
if (is.vector(cls.labels)) {
classes <- unique(cls.list2.pass1)
cls.phen2.pass1.5 <- classes
cls.labels2.pass1.5 <- match(cls.list2.pass1, cls.phen2.pass1.5)
} else {
for (kk in 1:length(cls.list2.pass2[, 1])) {
classes <- unique(cls.list2.pass1[kk,])
# cls.phen2[[kk]] <- classes
cls.phen2.pass1.5 <- c(cls.phen2.pass1.5, classes)
cls.labels2.pass1.5[kk,] <- match(cls.list2.pass2[kk,], classes)
}
}
# cls.labels2.pass1.5 = cls.labels2.pass1.5[1:n.phen.pass2,]
phen.list.pass1.5 <- unlist(cls.phen2.pass1.5)
colors.list.pass1.5 = rep( "gray", n.phen.pass1)
colors.list.pass1.5[phen.list.pass1.5=="MUT"] = cls.phen.colors[1]
filename <- paste(results.dir, test.file.prefix, file.suffix, ".Step1.5.Heatmap", sep="")
pdf(file=paste(filename, ".pdf", sep=""), height = pdf.height, width = pdf.width )
if( multiple.tissues ){
MSIG.HeatMapPlot.10.multiple.tissues(V = m2.pass1,
pathway.mut = bin.class.pass2,
row.names = model.names.pass1,
col.labels = cls.labels2.pass1.5,
col.classes = cls.phen2.pass1.5,
phen.cmap = colors.list.pass1.5,
phen.names = phen.names.pass1[ind.MI.threshold],
col.names = sample.names2.pass1,
main = paste(tissue, "- Post-Step 1, Pre-Step 2 (Step 1.5)"),
xlab=" ", ylab=" ", row.norm = row.norm,
cmap.type = cmap.type, char.rescale = char.rescale, legend=F,
tissue.names = tissue.names,
tissue.labels = tissue.labels)
} else{
MSIG.HeatMapPlot.10(V = m2.pass1,
pathway.mut = bin.class.pass2,
row.names = model.names.pass1,
col.labels = cls.labels2.pass1.5,
col.classes = cls.phen2.pass1.5,
phen.cmap = colors.list.pass1.5,
phen.names = phen.names.pass1[ind.MI.threshold],
col.names = sample.names2.pass1,
main = paste(tissue, "- Post-Step 1, Pre-Step 2 (Step 1.5)"),
xlab=" ", ylab=" ", row.norm = row.norm,
cmap.type = cmap.type, char.rescale = char.rescale, legend=F) }
dev.off()
# browser()
#pdf(file=paste(tissue, n.randomizations, "randomizations.Step2", "pdf", sep="."))
model.descs2.pass2 = vector(length = n.models, mode="character")
if( length(unique(bin.class.pass2)) > 1 ){
if( n.models > 1 ){
MI.results = mutual.inf.3.v2(bin.class.pass2, m2.pass1,
target.vector.name="SUMMARY",
)
MI.list.pass2 = MI.results$MI
model.descs2.pass2 = sapply(MI.results$MI, signif, 3)
m.order.pass2 = order(MI.list.pass2, decreasing=TRUE, na.last=TRUE)
m2.pass2 <- m2.pass1[m.order.pass2, ]
s.order.pass2 <- order(m2.pass2[1,], decreasing = TRUE )
m2.pass2 <- m2.pass2[, s.order.pass2]
} else{
MI.ref = mutual.inf.2(bin.class.pass2, bin.class.pass2)
MI.list.pass2 = MI.results =
mutual.inf.2(bin.class.pass2,
m2.pass1[1,])/MI.ref
#MI.list.pass2 = MI.results$MI
model.descs2.pass2 = signif(MI.list.pass2, digits=3)
m.order.pass2 = 1 #order(MI.list.pass2, decreasing=TRUE, na.last=TRUE)
m2.pass2 <- m2.pass1#[m.order.pass2, ]
s.order.pass2 <- order(m2.pass2[1,], decreasing = TRUE )
m2.pass2 <- t(as.matrix(m2.pass2[, s.order.pass2]))
rownames(m2.pass2) = model.names.pass1
}
} else{
MI.list.pass2 = rep(NA, n.models)
model.descs2.pass2 = rep(" - ", n.models)
if( n.models > 1 ){
loc <- match(model, model.names)
s.order.pass2 <- order(m2.pass1[loc,], decreasing = decreasing.order)
m2.pass2 <- m2.pass1[, s.order.pass2]
correl <- cor(t(m2.pass2))[, loc]
m.order.pass2 <- order(correl, decreasing=T)
m2.pass2 <- m2.pass2[m.order.pass2, ]
} else{
#loc <- match(model, model.names)
s.order.pass2 <- order(m2.pass1[1,], decreasing = decreasing.order)
m2.pass2 <- t(as.matrix(m2.pass1[, s.order.pass2]))
rownames(m2.pass2) = model.names.pass1
m.order.pass2 = 1
# correl <- cor(t(m2.pass2))[, loc]
# m.order.pass2 <- order(correl, decreasing=T)
# m2.pass2 <- m2.pass2[m.order.pass2, ]
}
}
MI.list.pass2 = MI.list.pass2[m.order.pass2]
bin.class.pass2 = bin.class.pass2[s.order.pass2]
tissue.labels = tissue.labels[s.order.pass2]
model.descs2.pass2 = model.descs2.pass2[m.order.pass2]
sample.names2.pass2 <- colnames(m2.pass2)
model.names.pass2 <- rownames(m2.pass2)
print(matrix(c(model.names.pass2, model.descs2.pass2), ncol=2), quote=F)
# browser()
if (is.vector(cls.labels)) {
cls.labels2.pass2 <- cls.labels2.pass2[s.order.pass2]
cls.list2.pass2 <- cls.list2.pass2[s.order.pass2]
} else {
cls.labels2.pass2 <- cls.labels2.pass2[, s.order.pass2]
cls.list2.pass2 <- cls.list2.pass2[, s.order.pass2]
}
tissue.labels.pass2 = tissue.labels.pass1[s.order.pass2]
sample.names2 <- colnames(m2.pass2)
winning.model.ind.pass2 = which(model.names.pass2[1] == rownames(m2.pass2))
MI.list.phen.pass2 = vector(mode="numeric", length=n.phen.pass2)
phen.descs.pass2 = vector(mode="character", length=n.phen.pass2)
if( length(unique(bin.class.pass2)) > 1){
MI.signif <- signif(MI.list.pass2[1], digits=3)
MI.list.phen.pass2[1] = MI.list.pass2[1]
} else{
MI.signif <- "-"
MI.list.phen.pass2[1] = NA
}
print(paste(format(phen.names.pass2[1], width=12), "mutual.inf =", MI.signif#, " FDR =", FDR.signif
))
print(proc.time()-t1)
print(date())
phen.descs.pass2[1] <- paste(MI.signif) #, " (FDR = ", FDR.signif, ")", sep="")
if( n.phen.pass2 == 2 ){
phen.descs.pass2[2] <- phen.descs.pass2[1]
# FDR.list.phen.pass2[2] = FDR.list.phen.pass2[1]
MI.list.phen.pass2[2] = MI.list.phen.pass2[1]
g.order.pass2 = c(1,2)
print(paste(format(phen.names.pass2[2], width=12), "mutual.inf =", MI.signif)) #, " FDR =", FDR.signif))
} else{
bin.gene.matrix = ifelse(cls.list2.pass2[-1,]=="WT", 0, 1)
n.aberrations = apply(bin.gene.matrix, MARGIN=1, FUN=sum)
MI.results = mutual.inf.3.v2(
m2.pass2[winning.model.ind.pass2,],
bin.gene.matrix,
target.vector.name=phen.pass2,
# n.randomizations = n.randomizations
)
MI.list.phen.pass2[-1] = MI.results$MI
phen.descs.pass2[-1] = sapply(MI.results$MI, signif, 3)
ind.zeros = which(n.aberrations==0) + 1
MI.list.phen.pass2[ind.zeros] = NA
# FDR.list.phen.pass2[ind.zeros] = NA
phen.descs.pass2[ind.zeros] = " - "
g.order.pass2 = c(1, order(MI.list.phen.pass2[-1], decreasing=TRUE, na.last=TRUE)+1) # skip PATHWAY.MUT
}
#dev.off()
phen.descs2.pass2 = phen.descs.pass2[g.order.pass2]
cls.list2.pass2 = cls.list2.pass2[g.order.pass2,]
phen.names.pass2 = phen.names.pass2[g.order.pass2]
# browser()
# Recompute cls.list2 as some mutations or copy numbers may have been removed
print(matrix(c(phen.names.pass2, phen.descs2.pass2), ncol=2), quote=F)
# Recompute cls.phen and cls.labels2 as order may have changed
cls.phen2.pass2 <- NULL
if (is.vector(cls.labels)) {
classes <- unique(as.vector(cls.list2.pass2))
cls.phen2.pass2 <- classes
cls.labels2.pass2 <- match(cls.list2.pass2, cls.phen2.pass2)
} else {
for (kk in 1:length(cls.list2.pass2[, 1])) {
classes <- unique(cls.list2.pass2[kk,])
# cls.phen2[[kk]] <- classes
cls.phen2.pass2 <- c(cls.phen2.pass2, classes)
cls.labels2.pass2[kk,] <- match(cls.list2.pass2[kk,], classes)
}
}
cls.labels2.pass2 = cls.labels2.pass2[1:n.phen.pass2,]
phen.list.pass2 <- unlist(cls.phen2.pass2)
colors.list.pass2 = rep( "gray", length(phen.list.pass2))
colors.list.pass2[phen.list.pass2=="MUT"] = cls.phen.colors[1]
colors.list.pass2[phen.list.pass2=="DEL"] = cls.phen.colors[3]
colors.list.pass2[phen.list.pass2=="AMP"] = cls.phen.colors[4]
colors.list.pass2[phen.list.pass2=="ALT"] = cls.phen.colors[5]
filename <- paste(results.dir, test.file.prefix, file.suffix, ".Step2.MI-HXY", sep="")
pdf(file=paste(filename, ".pdf", sep=""), height = pdf.height, width = pdf.width )
if( multiple.tissues ){
MSIG.HeatMapPlot.10.multiple.tissues(V = m2.pass2,
pathway.mut = bin.class.pass2,
row.names = model.names.pass2,
row.names2 = model.descs2.pass2,
col.labels = cls.labels2.pass2,
col.classes = cls.phen2.pass2,
phen.cmap = colors.list.pass2, phen.names = phen.names.pass2,
phen.names2 = c(" ", phen.descs2.pass2),
col.names = sample.names2.pass2, main = paste(tissue,
"- Step 2: only MI >=", MI.thresholds[MI.i],"from Step 1 (MI)"),
xlab=" ", ylab=" ", row.norm = row.norm,
cmap.type = cmap.type, char.rescale = char.rescale, legend=F,
tissue.names = tissue.names,
tissue.labels = tissue.labels)
} else{
MSIG.HeatMapPlot.10(V = m2.pass2,
pathway.mut = bin.class.pass2,
row.names = model.names.pass2,
row.names2 = model.descs2.pass2,
col.labels = cls.labels2.pass2,
col.classes = cls.phen2.pass2,
phen.cmap = colors.list.pass2, phen.names = phen.names.pass2,
phen.names2 = phen.descs2.pass2,
col.names = sample.names2.pass2, main = paste(tissue,
"- Step 2: only MI >=", MI.thresholds[MI.i],"from Step 1 (MI)"),
xlab=" ", ylab=" ", row.norm = row.norm,
cmap.type = cmap.type, char.rescale = char.rescale, legend=F)
}
dev.off()
} else{ print("'todd.version' on -- skipping Steps 1 and 2 and simply 'filling in' from scratch")}
### 3rd Pass ###
print( "--- Begin Pass 3 (Iterative Method)---")
print("2 in explained vector = previous explained vector 1 = new additions")
if( todd.version ){
model.names.pass2 = rownames(m)
m2.pass3 = m2.pass2 = m
cls.list2.pass3 = cls.list2.pass2 = cls.list
cls.labels2.pass3 = cls.labels2.pass2 = cls.labels
phen.names.pass2 = phen.names
file.suffix = paste(file.suffix, "_todd.version", sep="")
} else{
m2.pass3 = m2.pass2
cls.list2.pass3 = cls.list[, s.order.pass1][, s.order.pass2]
cls.labels2.pass3 = cls.labels[,s.order.pass1][,s.order.pass2]
}
model.names.pass3 = rownames(m2.pass3)
sample.names2.pass3 = colnames(m2.pass3)
n.phen.pass3 = 40
top.genes.ind = NULL
top.genes.names = NULL
top.genes.vectors = NULL
top.genes.MI = NULL
top.diffs = NULL
explained.vectors = NULL
bin.gene.matrix.3 = ifelse(cls.list2.pass3[-1,]=="WT", 0, 1)
mid.point <- which.min(abs(m2.pass2[1,] - quantile(m2.pass2[1,], 0.5)))
grey.and.black = c("#C0C0C0", "#000000")
pathway.name = model.names.pass2[1]
MI.ref = mutual.inf.2(m2.pass2[1,], m2.pass2[1,])
mycol <- vector(length=512, mode = "numeric")
for (k in 1:256) mycol[k] <- rgb(255, k - 1, k - 1, maxColorValue=255)
for (k in 257:512) mycol[k] <- rgb(511 - (k - 1), 511 - (k - 1), 255, maxColorValue=255)
mycol <- rev(mycol)
explained.initial = ifelse(
cls.list2.pass2[1,] =="WT", 0, 1)
explained.vectors = rbind(explained.vectors, explained.initial)
explained = explained.initial
explained.MI.initial = mutual.inf.2(explained, m2.pass2[1,])/MI.ref
print(paste("explained.MI.initial =", explained.MI.initial))
print(explained)
cex.axis = 1
ncolors <- length(mycol)
if(!skip.iterative){
samples.without.mut = ifelse(
cls.list2.pass2[2,] =="WT", 1, 0)
#browser()
wo.mut.or.blue = ifelse(c(samples.without.mut[1:mid.point],
rep(1, length=(Ns - mid.point)) )==1, TRUE, FALSE)
wo.mut.and.red = ifelse(c(samples.without.mut[1:mid.point],
rep(0, length=(Ns - mid.point)) )==1, TRUE, FALSE)
pdf(file=paste(results.dir, test.file.prefix, file.suffix, ".Step3_iterative.pdf", sep=""),
height=8.5, width=11)
#par(mar = c(1, 15, 1, 5))
## If we had naively searched the space without removing the explained cell lines
MI.results = mutual.inf.3.v2(m2.pass2[1,], bin.gene.matrix.3)
#browser()
MI.order = order(MI.results$MI, decreasing=TRUE, na.last=TRUE)+1
top10.names = c( #paste(c(phen.names.pass2[-1], "(from Step 2)"), collapse=" " ),
phen.names[MI.order[1:40]])
top10.MI = c( #signif(explained.MI.initial, digits=4),
signif(MI.results$MI[MI.order[1:40]-1], digits=4))
top10.labels = rbind(#explained+1,
cls.labels2.pass3[MI.order[1:40],])
par(mar = c(1, 15, 1, 5))
nf <- layout(matrix(c(1, 2), 2, 1, byrow=T), 1, c(1, 8), FALSE)
max.v <- max(max(m2.pass2[1,]), -min(m2.pass2[1,]))
V1 <- c( (ncolors/2)*normalize(m2.pass2[1,1:mid.point]) + ncolors/2,
(ncolors/2)*normalize(m2.pass2[1,(mid.point+1):Ns]))
image(1:length(m2.pass2[1,]), 1:1, as.matrix(V1),
zlim = c(0, ncolors), col=mycol, axes=FALSE,
main="Naive Step 3 without exclusion of Step 2 aberrations", sub = "", xlab= "", ylab="")
axis(2, at=1:1, labels=pathway.name, adj= 0.5, tick=FALSE, las = 1, cex.axis=1, font.axis=1, line=-1)
V1 <- top10.labels
V1 <- apply(V1, MARGIN=2, FUN=rev)
# max.v <- max(max(V1), -min(V1))
# V1 <- ceiling(ncolors * (V1 - (- max.v))/(1.001*(max.v - (- max.v))))
image(1:length(m2.pass2[1,]), 1:dim(V1)[1], t(V1),
zlim = c(0, length(grey.and.black)), col=grey.and.black, axes=FALSE,
main="", #paste("step:", i),
sub = "", xlab= "", ylab="")
axis(2, at=1:dim(V1)[1], labels=rev(top10.names), adj= 0.5, tick=FALSE, las = 1, cex.axis=1,
font.axis=1, line=-1)
axis(4, at=1:dim(V1)[1], labels=rev(top10.MI), adj= 0.5, tick=FALSE, las = 1, cex.axis=1,
font.axis=1, line=-1)
if( todd.version ){
print(top10.labels[1:5,])
browser()
explained.prev = top10.labels[1,]-1
explained.MI.prev = top10.MI[1]
} else{
explained.prev = explained
explained.MI.prev = explained.MI.initial
}
for( i in 1:n.iter){
print(paste("iteration:", i))
MI.results = mutual.inf.3.v2(
m2.pass2[1,wo.mut.or.blue],
bin.gene.matrix.3[,wo.mut.or.blue] )
MI.order = order(MI.results$MI, decreasing=TRUE, na.last=TRUE)+1
top10.names = phen.names[MI.order[1:10]]
top10.MI = MI.results$MI[MI.order[1:10]-1]
top10.labels = cls.labels.pass3[MI.order[1:10],wo.mut.or.blue]
top.genes.ind = c(top.genes.ind, MI.order[1] )
num.redundant = length(which(MI.results$MI == MI.results$MI[MI.order[1]-1]))-1
top.genes.names = c( top.genes.names, paste(phen.names[MI.order[1]], "+", num.redundant,
ifelse(num.redundant==1, "other", "others")))
mut = bin.gene.matrix.3[(MI.order[1]-1),]
explained = ifelse(mut+explained.prev>0, 1, 0)
explained.MI = mutual.inf.2( m2.pass2[1,wo.mut.or.blue], explained[wo.mut.or.blue])/MI.ref
MI.diff = explained.MI - explained.MI.prev
print(paste("Explained.MI = ", explained.MI,
" MI.diff = ", ifelse(MI.diff<0, "-", "+"),
signif(abs(MI.diff), digits=4), sep=""))
explained.vectors = rbind(explained.vectors, explained)
print(2*explained.prev + mut)
top.diffs = c(top.diffs, MI.diff)
top.genes.vectors = rbind(top.genes.vectors, mut)
top.genes.MI = c(top.genes.MI, paste(signif(MI.results$MI[MI.order[1]-1], digits=4),
sep="" ))
par(mar = c(1, 12, 1, 12))
nf <- layout(matrix(c(1, 2), 2, 1, byrow=T), 1, c(1, 8), FALSE)
max.v <- max(max(m2.pass2[1,wo.mut.or.blue]), -min(m2.pass2[1,wo.mut.or.blue]))
V1 <- c( (ncolors/2)*normalize(m2.pass2[1,1:mid.point]) + ncolors/2,
(ncolors/2)*normalize(m2.pass2[1,(mid.point+1):Ns]))[wo.mut.or.blue]
image(1:length(m2.pass2[1,wo.mut.or.blue]), 1:1, as.matrix(V1),
zlim = c(0, ncolors), col=mycol, axes=FALSE, main="", sub = "", xlab= "", ylab="")
axis(2, at=1:1, labels=pathway.name, adj= 0.5, tick=FALSE, las = 1, cex.axis=1, font.axis=1, line=-1)
V1 <- rbind( explained[wo.mut.or.blue]+1, top10.labels)
V1 <- apply(V1, MARGIN=2, FUN=rev)
image(1:length(m2.pass2[1,wo.mut.or.blue]), 1:dim(V1)[1], t(V1),
zlim = c(0, length(grey.and.black)), col=grey.and.black,
axes=FALSE, main=paste("iteration:", i), sub = "", xlab= "", ylab="")
axis(2, at=1:dim(V1)[1], labels=rev(c("explained with top result", top10.names)),
adj= 0.5, tick=FALSE, las = 1, cex.axis=1, font.axis=1, line=-1)
axis(4, at=1:dim(V1)[1], labels=rev(c( paste(signif(explained.MI, digits=4),
sep="" ),
signif(top10.MI,digits=4))),
adj= 0.5, tick=FALSE, las = 1, cex.axis=1, font.axis=1, line=-1)
samples.without.mut[wo.mut.or.blue] = samples.without.mut[wo.mut.or.blue] - mut[wo.mut.or.blue]
wo.mut.or.blue = (samples.without.mut | m2.pass2[1,] <= median(m2.pass2[1,]))
wo.mut.and.red = (samples.without.mut & m2.pass2[1,] > median(m2.pass2[1,]))
explained.MI.prev = explained.MI
explained.prev = ifelse(mut+explained.prev>0, 1, 0)
print(proc.time()-t1)
print(date())
}
explained = ifelse(apply(rbind(ifelse(cls.list2.pass2[1,]=="WT", 0,1),
top.genes.vectors), MARGIN=2, FUN=sum)>=1, 1, 0)
explained.MI = mutual.inf.2(m2.pass2[1,], explained)/MI.ref
top.genes.MI = signif(mutual.inf.3.v2(m2.pass2[1,], top.genes.vectors)$MI, digits=4)
par(mar = c(1, 12, 1, 12))
nf <- nf <- layout(matrix(c(1, 2), 2, 1, byrow=T), heights = c(1, 8), respect = FALSE)
max.v <- max(max(m2.pass2[1,]), -min(m2.pass2[1,]))
V1 <- c( (ncolors/2)*normalize(m2.pass2[1,1:mid.point]) + ncolors/2,
(ncolors/2)*normalize(m2.pass2[1,(mid.point+1):Ns]))
image(1:length(m2.pass2[1,]), 1:1, as.matrix(V1),
zlim = c(0, ncolors), col=mycol, axes=FALSE,
main="Final results from iterations (removing cell lines)", sub = "", xlab= "", ylab="")
axis(2, at=1:1, labels=pathway.name, adj= 0.5, tick=FALSE, las = 1, cex.axis=1, font.axis=1, line=-1)
V1 <- rbind(ifelse(cls.list2.pass2[1,]=="WT", 0,1), top.genes.vectors, explained)+1
V1 <- apply(V1, MARGIN=2, FUN=rev)
image(1:length(m2.pass2[1,]), 1:dim(V1)[1], t(V1),
zlim = c(0, length(grey.and.black)), col=grey.and.black, axes=FALSE,
sub = "", xlab= "", ylab="")
axis(2, at=1:dim(V1)[1], labels=rev(c(paste(phen.names.pass2[-1], collapse=" "),
top.genes.names, "explained")), adj= 0.5, tick=FALSE,
las = 1, cex.axis=1, font.axis=1, line=-1)
axis(4, at=1:dim(V1)[1], labels=rev( c(signif(explained.MI.initial, digits=4),
paste(top.genes.MI, sep=""),
signif(explained.MI,digits=4))), adj= 0.5, tick=FALSE,
las = 1, cex.axis=1, font.axis=1, line=-1)
explained.vectors.MI = mutual.inf.3.v2(m2.pass2[1,], explained.vectors)$MI
MI.diffs = explained.vectors.MI[-1] - explained.vectors.MI[1:n.iter]
par(mar = c(1, 12, 1, 12))
nf <- nf <- layout(matrix(c(1, 2), 2, 1, byrow=T), heights = c(1, 8), respect = FALSE)
max.v <- max(max(m2.pass2[1,]), -min(m2.pass2[1,]))
V1 <- c( (ncolors/2)*normalize(m2.pass2[1,1:mid.point]) + ncolors/2,
(ncolors/2)*normalize(m2.pass2[1,(mid.point+1):Ns]))
image(1:length(m2.pass2[1,]), 1:1, as.matrix(V1),
zlim = c(0, ncolors), col=mycol, axes=FALSE, main="Final results from iterations (removing cell lines)", sub = "", xlab= "", ylab="")
axis(2, at=1:1, labels=pathway.name, adj= 0.5, tick=FALSE, las = 1, cex.axis=1, font.axis=1, line=-1)
V1 = apply(explained.vectors+1, MARGIN=2, FUN=rev)
image(1:length(m2.pass2[1,]), 1:dim(V1)[1], t(V1),
zlim = c(0, length(grey.and.black)), col=grey.and.black, axes=FALSE,
# main=paste("step:", i),
sub = "", xlab= "", ylab="")
left.labels = c("INITIAL cumulative", paste("cumulative, iter: ", 1:(n.iter-1), " ", sep=""),
paste("FINAL cumulative, iter: ", n.iter, " ",sep=""))
right.labels = c( paste(" ", signif(explained.vectors.MI[1],digits=4), sep=""),
paste( " ", signif(explained.vectors.MI[2:(n.iter+1)], digits=4),
" (",ifelse(MI.diffs < 0, "-", "+"),
signif(abs(MI.diffs), digits=4),")", sep="")
)
axis(2, at=1:dim(V1)[1], labels=rev(left.labels), adj= 0.5, tick=FALSE, las = 1, cex.axis=1,
font.axis=1, line=-1)
axis(4, at=1:dim(V1)[1], labels=rev(right.labels), adj= 0.5, tick=FALSE,
las = 1, cex.axis=1, font.axis=1, line=-1)
cls.labels2.pass3 = rbind(cls.labels2.pass2[1,], top.genes.vectors+1)
cls.list2.pass3 = rbind(cls.list2.pass2[1,], ifelse(top.genes.vectors==0, "WT", "MUT"))
MI.results = mutual.inf.3.v2( explained,
m2.pass2) ## must subtract 1 from the indices because bin.gene.matrix.3
## doesn't include SUMMARY
#target.vector.name=phen.pass1[ind.master],
# n.randomizations = n.randomizations)
# g.order.pass3.top40
phen.descs.pass3 = ifelse( is.nan(MI.results$MI), " - ", signif(MI.results$MI, digits=3))
# phen.descs2.pass3 = phen.descs.pass3[g.order.pass3]
print(proc.time()-t1)
print(date())
dev.off()
}else{ print("skipping iterative method!")}
if( do.mRMR == T){
print("--- Begin Step 3 (min redundancy Max Relevance) ---")
explained.MI.initial = mutual.inf.2(explained.initial, m2.pass2[1,])/MI.ref
print(paste("explained.MI.initial =", explained.MI.initial))
print(explained.initial)
relevance = mutual.inf.3.v2( m2.pass2[1,], bin.gene.matrix.3, pos.and.neg=T)$MI
redundancy = mutual.inf.3.v2( explained.initial, bin.gene.matrix.3, pos.and.neg=F)$MI
print(proc.time()-t1)
print(date())
#browser()
MI.D = relevance - redundancy # Mutual Information Difference
MI.D.string = "MI.D=rel-red"
MI.D.order = order(MI.D, decreasing=TRUE, na.last=TRUE)
top.MI.D = MI.D[MI.D.order[1]]
top.ind.MI.D = which(MI.D == top.MI.D)
top.gene.MI.D = paste(phen.names[ top.ind.MI.D[1]+2 ], "+", length(top.ind.MI.D)-1, "others")
## Plot and iterate with MI.D first
print("Plot and iterate with MI.D first")
#quartz(height=8.5, width=11)
pdf(file=paste(results.dir, test.file.prefix, file.suffix,
".Step3_", MI.D.string, ".pdf", sep=""), height=8.5, width=11)
explained.with.top.MI.D = ifelse(explained.initial + bin.gene.matrix.3[MI.D.order[1],]>=1, 1, 0)
explained.with.top.MI.D.MI = mutual.inf.2(explained.with.top.MI.D, m2.pass2[1,])/MI.ref
MI.diff = explained.with.top.MI.D.MI - explained.MI.initial
# top.diffs = c(top.diffs, MI.diff)
print(paste("Explained.MI = ", explained.with.top.MI.D.MI,
" MI.diff = ", ifelse(MI.diff<0, "-", "+"), signif(abs(MI.diff), digits=4), sep=""))
print(ifelse(cls.list2.pass2[1,]=="WT", 0,2) + bin.gene.matrix.3[MI.D.order[1],])
top10.names = c("explained with top result ", paste(phen.names[ MI.D.order[1:10]+2 ], " "))
top10.MI = c( paste(" MI = ",
signif(explained.with.top.MI.D.MI, digits=4),
" diff:", ifelse(MI.diff<0, "-", "+"), signif(abs(MI.diff), digits=4), sep=""),
paste(" MI:", signif(relevance[MI.D.order[1:10]], digits=4),
" MI.D:", signif(MI.D[MI.D.order[1:10]], digits=4), sep=""))
# top.genes.MI = c(top.genes.MI, paste(" ", signif(top.MI.D, digits=4),
# #" (", ifelse(MI.diff<0, "-", "+"), signif(abs(MI.diff), digits=4), ")",
# sep="") )
# top.genes.names = c(top.genes.names,
# paste(phen.names[ top.ind.MI.D[1]+2 ], "+", length(top.ind.MI.D)-1, "others "))
# top.genes.vectors = rbind(bin.gene.matrix.3[MI.D.order[1],])
top10.labels = rbind( ifelse(cls.list2.pass2[1,]=="WT", 0,1),
explained.with.top.MI.D, bin.gene.matrix.3[MI.D.order[1:10],]) + 1
par(mar = c(1, 12, 1, 12))
#par(mar = c(1, 10, 1, 5))
nf <- layout(matrix(c(1, 2), 2, 1, byrow=T), 1, c(1, 5), FALSE)
max.v <- max(max(m2.pass2[1,]), -min(m2.pass2[1,]))
V1 <- c( (ncolors/2)*normalize(m2.pass2[1,1:mid.point]) + ncolors/2,
(ncolors/2)*normalize(m2.pass2[1,(mid.point+1):Ns]))
image(1:length(m2.pass2[1,]), 1:1, as.matrix(V1),
zlim = c(0, ncolors), col=mycol, axes=FALSE, main=MI.D.string, sub = "", xlab= "", ylab="")
axis(2, at=1:1, labels=pathway.name, adj= 0.5, tick=FALSE, las = 1, cex.axis=1, font.axis=1, line=-1)
V1 <- top10.labels
V1 <- apply(V1, MARGIN=2, FUN=rev)
image(1:length(m2.pass2[1,]), 1:dim(V1)[1], t(V1),
zlim = c(0, length(grey.and.black)), col=grey.and.black, axes=FALSE,
main=paste("iteration:", 0), sub = "", xlab= "", ylab="")
axis(2, at=1:dim(V1)[1], labels=rev(c(paste(phen.names.pass2[-1], collapse=" "), top10.names)),
adj= 0.5, tick=FALSE, las = 1, cex.axis=1, font.axis=1, line=-1)
axis(4, at=1:dim(V1)[1], labels=rev(c(phen.descs.pass2[1], top10.MI)),
adj= 0.5, tick=FALSE, las = 1, cex.axis=1, font.axis=1, line=-1)
dev.off()
#browser()
## Plot MI.Q next
# browser()
print("Plot MI.Q next")
MI.Q = (relevance)/(redundancy) # Mutual Information Quotient
MI.Q.string = "MI.Q=(rel)|(red)"
MI.Q.order = order(MI.Q, decreasing=TRUE, na.last=TRUE)
top.MI.Q = MI.Q[MI.Q.order[1]]
top.ind.MI.Q = which(MI.Q == top.MI.Q)
top.gene.MI.Q = paste(phen.names[ top.ind.MI.Q[1]+2 ], "+", length(top.ind.MI.Q)-1, "others")
pdf(file=paste(results.dir, test.file.prefix,
file.suffix, ".Step3", MI.Q.string, ".pdf", sep=""), height=8.5, width=11)
explained.with.top.MI.Q = ifelse(explained.initial + bin.gene.matrix.3[MI.Q.order[1],]>=1, 1, 0)
explained.with.top.MI.Q.MI = mutual.inf.2(explained.with.top.MI.Q, m2.pass2[1,])/MI.ref
MI.diff = explained.with.top.MI.Q.MI - explained.MI.initial
#top.diffs = c(top.diffs, MI.diff)
print(paste("Explained.MI =", explained.with.top.MI.Q.MI,
" MI.diff =", ifelse(MI.diff<0, "-", "+"), signif(abs(MI.diff), digits=4), sep=""))
print(ifelse(cls.list2.pass2[1,]=="WT", 0,2) + bin.gene.matrix.3[MI.Q.order[1],])
top10.names = c("explained with top result", paste(phen.names[ MI.Q.order[1:10]+2 ], " "))
top10.MI = c( paste(" MI:",
signif(explained.with.top.MI.Q.MI, digits=4),
" diff:", ifelse(MI.diff<0, "-", "+"), signif(abs(MI.diff), digits=4), sep=""),
paste(" MI:", signif(relevance[MI.Q.order[1:10]], digits=4),
" MI.Q:", signif(MI.Q[MI.Q.order[1:10]], digits=4), sep=""))
top10.labels = rbind( ifelse(cls.list2.pass2[1,]=="WT", 0,1),
explained.with.top.MI.Q, bin.gene.matrix.3[MI.Q.order[1:10],]) +1
#top.genes.MI = c(top.genes.MI, paste(" ", signif(top.MI.Q, digits=4),
#" (", ifelse(MI.diff<0, "-", "+"), signif(abs(MI.diff), digits=4), ")",
# sep=""))
#top.genes.names = c(top.genes.names,
# paste(phen.names[ top.ind.MI.Q[1]+2 ], "+", length(top.ind.MI.Q)-1, "others "))
#top.genes.vectors = rbind(bin.gene.matrix.3[MI.Q.order[1],])
#quartz(height=8.5, width=11)
par(mar = c(1, 12, 1, 12))
#par(mar = c(1, 10, 1, 5))
nf <- layout(matrix(c(1, 2), 2, 1, byrow=T), 1, c(1, 5), FALSE)
max.v <- max(max(m2.pass2[1,]), -min(m2.pass2[1,]))
V1 <- c( (ncolors/2)*normalize(m2.pass2[1,1:mid.point]) + ncolors/2, (ncolors/2)*normalize(m2.pass2[1,(mid.point+1):Ns]))
image(1:length(m2.pass2[1,]), 1:1, as.matrix(V1),
zlim = c(0, ncolors), col=mycol, axes=FALSE, main=MI.Q.string, sub = "", xlab= "", ylab="")
axis(2, at=1:1, labels=pathway.name, adj= 0.5, tick=FALSE, las = 1, cex.axis=1, font.axis=1, line=-1)
V1 <- top10.labels
V1 <- apply(V1, MARGIN=2, FUN=rev)
image(1:length(m2.pass2[1,]), 1:dim(V1)[1], t(V1),
zlim = c(0, length(grey.and.black)), col=grey.and.black, axes=FALSE, main=paste("iteration:", 0), sub = "", xlab= "", ylab="")
axis(2, at=1:dim(V1)[1], labels=rev(c(paste(phen.names.pass2[-1], collapse=" "), top10.names)), adj= 0.5, tick=FALSE, las = 1, cex.axis=1, font.axis=1, line=-1)
axis(4, at=1:dim(V1)[1], labels=rev(c(phen.descs.pass2[1], top10.MI)), adj= 0.5, tick=FALSE, las = 1, cex.axis=1, font.axis=1, line=-1)
dev.off()
} else{ print("Skipped min redundancy Max Relevance!") }
if (!is.na(output.dataset)) {
# V.GCT <- m.all
print("Figure out why model.descs2.pass1 does not correspond to the rows of m.all")
browser()
# colnames(V.GCT) <- sample.names2
# row.names(V.GCT) <- model.names.pass1
write.gct(gct.data.frame = m.all, descs = model.descs2.pass1, filename =output.dataset)
}
}
OPAM.sort.projection.by.score.8 <- function(
# input.ds,
# signatures = "NA",
input.all.pathways.ds,
input.cls,
tissue = "NA",
results.dir,
normalize.score = T,
# normalization.type = "zero.one",
model = "NA",
target.phen = NA,
target.class = NA,
user.colors = NA,
decreasing.order = T,
output.dataset = NA,
char.rescale = 1,
cmap.type = 3,
row.norm = T,
u.gene.names.known = "NA",
n.randomizations = 10
)
# Calls MSIG.HeatMapPlot.9 and makes a plot sorted by the highest-scoring
# signatures and abnormalities (gene mutations or copy number alterations)
# i.e. doesn't require a "model" to score by as OPAM.sort.projection.by.score.2 does.
# However, it *will* use "model" if it cannot calculate p-values on the gene signatures, which
# happens when every cell line has a genomic aberration.
#
# Runs 3 passes on the data:
# 1st pass: looks at the genes and copy number alterations specified by u.gene.names.known
# 2nd pass: looks at only the top abnormalities (using a p-value cutoff) from the 1st pass, and adjusts
# the PATHWAY.MUT vector accordingly (only according to the genes, not by copy number data)
# 3rd pass: Takes the winning signature from the 2nd pass and then looks all the genes available
#
# Very similar to OPAM.sort.projection.by.score.4, however this version uses mutual.inf instead of
# roc.area to calculate mutual information scores and p-values for PATHWAY.MUT, the vector of total genomic aberrations
# in all samples
#
# Differs from OPAM.sort.projection.by.score.6 by requiring the gct file of expression in
# all pathways by the input tissue ("input.all.pathways.ds")
#
# Differs from OPAM.sort.projection.by.score.7 by finding the top enriched pathways that differentiate according to phenotype
# from testing all the pathways in "input.all.pathways.ds." Does not require signatures to be known a priori.
{
library(gtools)
library(verification)
library(ROCR)
library(MASS)
library(RColorBrewer)
library(heatmap.plus)
# dataset <- MSIG.Gct2Frame(filename = input.ds) # Read gene expression dataset (GCT format)
# m <- data.matrix(dataset$ds)
# model.names <- dataset$row.names
## model.descs <- dataset$descs
# Ns <- length(m[1,])
# dim(m)
# sample.names <- dataset$names
dataset.all <- MSIG.Gct2Frame( filename = input.all.pathways.ds)
m.all <- data.matrix(dataset.all$ds)#[1:30,]
#model.names <- dataset.all$row.names#[1:30]
model.names <- make.unique(dataset.all$descs)
m.all <- na.omit(t(apply(m.all, MARGIN=1, FUN=normalize)))
Ns = length(m.all[1,])
sample.names = dataset.all$names
# if( signatures == "NA" ){
# stop("Must provide a vector of signature names to evaluate, or specify 'ALL'")
# }
# if( signatures == "ALL"){
# model.names = model.names.all
# m = m.all
# model.descs = dataset.all$descs
# } else{
# model.names = signatures
# model.ind = match(signatures, model.names.all)
# m = m.all[model.ind,]
# model.descs = dataset.all$descs[model.ind]
## browser()
# }
# model.names = model.names.all
# m = m.all
model.descs = dataset.all$descs#[1:30]
n.models <- length(m.all[,1])
temp <- strsplit(input.all.pathways.ds, split="/") # Extract test file name
s <- length(temp[[1]])
test.file.name <- temp[[1]][s]
temp <- strsplit(test.file.name, split=".gct")
test.file.prefix <- temp[[1]][1]
char.res <- 0.013 * n.models + 0.65
# normalize scores
# if (normalize.score == T) {
# if (normalization.type == "zero.one") {
# for (i in 1:n.models) {
# m[i,] <- (m[i,] - min(m[i,]))/(max(m[i,]) - min(m[i,]))
# }
# } else if (normalization.type == "z.score") {
# for (i in 1:n.models) {
# m[i,] <- (m[i,] - mean(m[i,]))/sd(m[i,])
# }
# } else if (normalization.type == "r.z.score") {
# for (i in 1:n.models) {
# m[i,] <- (m[i,] - median(m[i,]))/mad(m[i,])
# }
# }
# }
CLS <- MSIG.ReadPhenFile.2(file = input.cls) # Read phenotype file (CLS format)
cls.labels <- CLS$class.v
cls.phen <- CLS$phen
cls.list <- CLS$class.list
# browser()
if (is.vector(cls.labels)) {
n.phen <- 1
} else {
n.phen <- length(cls.labels[,1])
}
if (!is.na(user.colors[1])) {
c.test <- user.colors
} else {
if (!is.null(CLS$col.phen)) {
c.test <- CLS$col.phen
} else {
c.test <- c(brewer.pal(n=7, name="Set2"), brewer.pal(n=7, name="Dark2"), brewer.pal(n=7, name="Set1"),
brewer.pal(n=8, name="Accent"), brewer.pal(n=9, name="Spectral"), brewer.pal(n=8, name="Set3"),
brewer.pal(n=8, name="BuGn"),
brewer.pal(n=7, name="Set2"), brewer.pal(n=7, name="Dark2"), brewer.pal(n=7, name="Set1"),
brewer.pal(n=8, name="Accent"), brewer.pal(n=10, name="Spectral"), brewer.pal(n=8, name="Set3"),
brewer.pal(n=8, name="BuGn"),
brewer.pal(n=7, name="Set2"), brewer.pal(n=7, name="Dark2"), brewer.pal(n=7, name="Set1"),
brewer.pal(n=8, name="Accent"), brewer.pal(n=10, name="Spectral"), brewer.pal(n=8, name="Set3"),
brewer.pal(n=8, name="BuGn"))
}
}
# browser()
if (!is.null(CLS$phen.names)) {
phen.names <- CLS$phen.names
} else if( !is.null(CLS$phen.list)){
phen.names = CLS$phen.list
} else {
phen.names <- "NA"
}
cls.phen.index <- unlist(cls.phen)
cls.phen.colors <- c.test[1:length(cls.phen.index)]
# print("cls.phen.colors:")
# print(cls.phen.colors)
n.classes <- vector(length=n.phen, mode="numeric")
if (n.phen == 1) {
max.classes <- length(cls.phen)
n.classes[1] <- max.classes
} else {
max.classes <- max(unlist(lapply(cls.phen, FUN=length)))
for (i in 1:n.phen) {
n.classes[i] <- length(cls.phen[[i]])
}
}
print("--- Begin Pass 1 ---")
# model.names.original = model.names
# m.original = m
phen.pass1 = u.gene.names.known
n.phen.pass1 = length(u.gene.names.known)
ind.phen.pass1 = which( phen.names %in% phen.pass1 )
phen.pass1 = phen.names[ind.phen.pass1]
# phen.pass1 = c( "SUMMARY", phen.pass1)
# browser()
MI.list.pass1 = vector( length=n.models, mode="numeric" )
FDR.list.pass1 = vector( length=n.models, mode="numeric" )
if( !is.vector(cls.labels)){
cls.list.pass1 = cls.list[ind.phen.pass1,]
cls.labels.pass1 = cls.labels[ind.phen.pass1,]
} else{
cls.list.pass1 = cls.list
cls.labels.pass1 = cls.labels
}
cls.list.pass1.2 = t(as.matrix(ifelse(cls.list.pass1 == "WT", 0, 1)))
# browser()
if (!is.na(target.phen)) {
if( is.vector(cls.list.pass1.2)){ bin.class.pass1 = cls.list.pass1.2
} else { bin.class.pass1 = apply( cls.list.pass1.2, MARGIN=2, FUN=sum) }
# Normalize bin.class.pass1
if( length(unique(bin.class.pass1)) > 1){
bin.class.pass1 = (
bin.class.pass1 - min(bin.class.pass1))/(max(bin.class.pass1) - min(bin.class.pass1))
} else if ( length(unique(bin.class.pass1)) == 1){
bin.class = rep(1, length(cls.list[1,]))
}
if( is.vector( cls.list.pass1) ){
cls.list.pass1 = ifelse(bin.class.pass1 > 0, "MUT", "WT")
} else{ cls.list.pass1[1,] = ifelse(bin.class.pass1 > 0, "MUT", "WT") }
} else {
# browser()
bin.class.pass1 <- ifelse(cls.list == cls.phen[1], 0, 1)
}
# browser()
# MI.ref.models.pass1 = mutual.inf.2(bin.class.pass1, bin.class.pass1)$MI
# print(paste("MI.ref.models.pass1 =", MI.ref.models.pass1))
# browser()
model.descs2.pass1 = vector(length = n.models, mode="character")
pdf(file=paste(tissue, test.file.name, ".Phase1", "pdf", sep="."))
#browser()
skipped.indices = 21:(n.models)
# browser()
if( length(unique(bin.class.pass1)) > 1 ){
# signature.ind = which(rownames(m.all) %in% signatures)
MI.results = mutual.inf.3.v2(bin.class.pass1, m.all) #signature.indices = 1:n.models, )
MI.list.pass1 = MI.results$MI
# FDR.list.pass1 = MI.results$FDR
# browser()
for (i in 1:n.models) {
MI.signif <- signif(MI.list.pass1[i], digits=3)
# FDR.signif <- signif(FDR.list.pass1[i], digits=3)
model.descs2.pass1[i] <- paste(MI.signif, sep="")
}
# browser()
# m.order.pass1 = order(MI.list.pass1, decreasing=TRUE, na.last=TRUE)
# m.order.pass1 = order(MI.list.pass1, decreasing=TRUE, na.last=TRUE)
m.order.pass1 = order(MI.list.pass1, decreasing=FALSE, na.last=TRUE)
m.order.pass1.top10 = m.order.pass1[-skipped.indices]
# m.order.pass1 = 1:n.models
m2.pass1 <- m.all[m.order.pass1, ]
s.order.pass1 <- order(m2.pass1[1,], decreasing = FALSE )
# s.order.pass1 = 1:Ns
m2.pass1 <- m2.pass1[-skipped.indices, s.order.pass1]
# m2.pass1.top10 = m2.pass1[-skipped.indices,]
} else{
MI.list.pass1 = rep(NA, n.models)
# FDR.list.pass1 = rep(NA, n.models)
model.descs2.pass1 = rep(" - ", n.models)
loc <- match(model, model.names)
s.order.pass1 <- order(m.all[loc,], decreasing = decreasing.order)
# loc = s.order.pass1[1]
# s.order.pass1 = 1:Ns
m2.pass1 <- m.all[, s.order.pass1]
correl <- cor(t(m2.pass1))[, loc]
m.order.pass1 <- order(correl, decreasing=T)
# m.order.pass1 = 1:n.models
m2.pass1 <- m2.pass1[m.order.pass1, ]
}
# browser()
dev.off()
# skipped.indices = 11:(n.models-10)
# browser()
MI.list.pass1.top10 = MI.list.pass1[m.order.pass1.top10]
MI.list.pass1 = MI.list.pass1[m.order.pass1]
# FDR.list.pass1.top10 = FDR.list.pass1[m.order.pass1.top10]
# FDR.list.pass1 = FDR.list.pass1[m.order.pass1]
bin.class.pass1 = bin.class.pass1[s.order.pass1]
# m2.pass1 <- m2.pass1[m.order.pass1, ]
model.descs2.pass1.top10 = model.descs2.pass1[m.order.pass1.top10]
model.descs2.pass1 = model.descs2.pass1[m.order.pass1]
sample.names2.pass1 <- colnames(m2.pass1)
model.names.pass1.top10 <- rownames(m2.pass1)
print(matrix(c(model.names.pass1.top10, model.descs2.pass1.top10), ncol=2), quote=F)
# browser()
if (is.vector(cls.labels)) {
cls.labels2.pass1 <- cls.labels.pass1[s.order.pass1]
cls.list2.pass1 <- cls.list.pass1[s.order.pass1]
} else {
cls.labels2.pass1 <- cls.labels.pass1[, s.order.pass1]
cls.list2.pass1 <- cls.list.pass1[, s.order.pass1]
}
m.all = m.all[, s.order.pass1]
# browser()
winning.model.ind.pass1 = which(model.names.pass1.top10[1] == rownames(m.all))
# pathway.name <- "KRAS_ALL_UP"
# pathway <- m[1,]
# pathway0 <- ifelse(pathway < median(pathway), 0, 1) # disctretized version
# MI.ref.genes.pass1 <- mutual.inf.2(m[1,], m[1,])$MI
# browser()
# m.score.pass1 <- m2.pass1[1,]
# m.score.norm.pass1 <- (m.score.pass1 - min(m.score.pass1))/(max(m.score.pass1) - min(m.score.pass1))
# m.score.pass1 = ifelse( m.score.pass1 < median(m.score.pass1), -1, 1) # discretized version
# MI.ref.genes.pass1 <- mutual.inf.2(m.score.norm.pass1, m.score.norm.pass1)$MI
# print(paste("MI.ref.genes.pass1 =", MI.ref.genes.pass1))
MI.list.phen.pass1 = vector(mode="numeric", length=n.phen.pass1)
# FDR.list.phen.pass1 = vector(mode="numeric", length=n.phen.pass1)
phen.descs2.pass1 = vector(mode="character", length=n.phen.pass1)
if( length(unique(bin.class.pass1)) > 1){
# MI.results <-(mutual.inf.3(bin.class.pass1, m.all,
# winning.model.ind.pass1, gene.target.name = phen.pass1[1]))#/MI.ref.genes.pass1
MI.signif <- signif(MI.list.pass1[1], digits=3)
MI.list.phen.pass1[1] = MI.list.pass1[1]
# FDR.signif <- signif(FDR.list.pass1[1], digits=3)
# FDR.list.phen.pass1[1] = FDR.list.pass1[1]
} else{
MI.signif <- "-"
MI.list.phen.pass1[1] = NA
# FDR.signif <- "- "
# FDR.list.phen.pass1[1] = NA
}
print(paste(format(phen.pass1[1], width=12), "mutual.inf =", MI.signif))
phen.descs2.pass1[1] <- paste(MI.signif)
# browser()
if( n.phen >= 2 ){
bin.gene.matrix = ifelse(cls.list2.pass1[-1,]=="WT", 0, 1)
n.aberrations = apply(bin.gene.matrix, MARGIN=1, FUN=sum)
u.n.aberrations = unique(n.aberrations[n.aberrations != 0])
for( i in 1:length(u.n.aberrations)){
ind.without.SUMMARY = which(n.aberrations == u.n.aberrations[i])
ind.master = ind.without.SUMMARY + 1
# browser()
# bin.gene.matrix.temp = bin.gene.matrix[ind.without.SUMMARY,]
MI.results = mutual.inf.3.v2(bin.gene.matrix[ind.without.SUMMARY,],
m.all, winning.model.ind.pass1, gene.target.name=phen.pass1[ind.master],
n.randomizations = n.randomizations)
MI.list.phen.pass1[ind.master] = MI.results$MI
# FDR.list.phen.pass1[ind.master] = MI.results$FDR
for( j in 1:length(ind.master)){
phen.descs.pass1[ind.master[j]] =
paste( signif(MI.results$MI[j], digits=3),
sep="")
}
}
ind.zeros = which(n.aberrations==0) + 1
MI.list.phen.pass1[ind.zeros] = NA
# FDR.list.phen.pass1[ind.zeros] = NA
phen.descs.pass1[ind.zeros] = " - "
}
phen.names.pass1 = phen.pass1#[g.order.pass1]#[1:n.phen.pass1]
# browser(text="Figure out how to print phen.descs.pass1 and phen.names.pass1 in a nice table")
# Recompute cls.list2 as some mutations or copy numbers may have been removed
# Recompute cls.phen and cls.labels2 as order may have changed
cls.phen2.pass1 <- NULL
if (is.vector(cls.labels)) {
classes <- unique(cls.list2.pass1)
cls.phen2.pass1 <- classes
cls.labels2.pass1 <- match(cls.list2.pass1, cls.phen2.pass1)
} else {
for (kk in 1:length(cls.list2.pass1[, 1])) {
classes <- unique(cls.list2.pass1[kk,])
# cls.phen2[[kk]] <- classes
cls.phen2.pass1 <- c(cls.phen2.pass1, classes)
cls.labels2.pass1[kk,] <- match(cls.list2.pass1[kk,], classes)
}
}
# cls.labels2.pass1 = cls.labels2.pass1[1:n.phen.pass1,]
# browser()
# correl <- cor(t(m2))[, loc]
# m.order <- order(correl, decreasing=decreasing.order)
# correl2 <- correl[m.order]
# model.descs2 <- paste(model.descs[m.order], signif(correl2, digits=3))
phen.list.pass1 <- unlist(cls.phen2.pass1)
colors.list.pass1 = rep( "gray", length(phen.list.pass1))
colors.list.pass1[phen.list.pass1=="MUT"] = cls.phen.colors[1]
colors.list.pass1[phen.list.pass1=="DEL"] = cls.phen.colors[3]
colors.list.pass1[phen.list.pass1=="AMP"] = cls.phen.colors[4]
colors.list.pass1[phen.list.pass1=="ALT"] = cls.phen.colors[5]
# browser()
# colors.list.pass1[1,] = grey(bin.class.pass1)
# print("cls.phen2:")
# print(unlist(cls.phen2))
#
# print("cls.phen:")
# print(unlist(cls.phen))
#
# print("colors.list:")
# print(colors.list)
# browser()
filename <- paste(results.dir, test.file.prefix, ".Phase1.MI|HXY", sep="")
# pdf(file=paste(filename, ".pdf", sep=""), height = 11, width = 9)
pdf(file=paste(filename, ".pdf", sep=""), height = 11, width = 17 )
# browser()
# windows(width=12, height=8)
MSIG.HeatMapPlot.9(V = m2.pass1,
# pathway.mut = bin.class.pass1,
row.names = model.names.pass1.top10,
row.names2 = model.descs2.pass1.top10,
col.labels = cls.labels2.pass1,
col.classes = cls.phen2.pass1,
phen.cmap = colors.list.pass1,
phen.names = phen.names.pass1,
phen.names2 = phen.descs2.pass1,
col.names = sample.names2.pass1,
main = paste(tissue, test.file.prefix),
xlab=" ", ylab=" ", row.norm = row.norm,
cmap.type = cmap.type, char.rescale = char.rescale, legend=F)
dev.off()
# browser()
if (!is.na(output.dataset)) {
# V.GCT <- m.all
# colnames(V.GCT) <- sample.names2
# row.names(V.GCT) <- model.names2
write.gct(gct.data.frame = m.all, descs = model.descs2.pass1, filename = paste(output.dataset, ".gct", sep=""))
write.cls.2( class.v = cls.labels2.pass1, phen = cls.phen, filename = paste(output.dataset, ".cls", sep=""))
}
}
MSIG.HeatMapPlot.11 <- function(
## For Plotting expression heatmap only!! (No phenotypes)
V,
row.names = "NA",
row.names2 = "NA",
col.labels = "NA",
col.labels2 = "NA",
col.classes = "NA",
phen.cmap = NULL,
col.names = "NA",
phen.names = "NA",
phen.names2 = "NA",
main = " ",
sub = " ",
xlab=" ",
ylab=" ",
row.norm = TRUE,
char.rescale = 0.85,
cmap.type = 1, # 1 = vintage pinkogram, 2 = scale of blues, 3 = high-resolution pinkogram for scores or probabilities [0, 1], 4 = high-resolution pinkogram for general values, 5 = color map for normalized enrichment scores, 6 = scale of red purples, 7 = scale of Oranges, 8 = scale of Greens, 9 = scale of Blues
max.v = "NA",
legend = T)
{
#
# Plots a heatmap "pinkogram" of a gene expression matrix including phenotype vector and gene, sample and phenotype labels
#
# The Broad Institute
# SOFTWARE COPYRIGHT NOTICE AGREEMENT
# This software and its documentation are copyright 2003 by the
# Broad Institute/Massachusetts Institute of Technology.
# All rights are reserved.
#
# This software is supplied without any warranty or guaranteed support
# whatsoever. Neither the Broad Institute nor MIT can be responsible for
# its use, misuse, or functionality.
n.rows <- length(V[,1])
n.cols <- length(V[1,])
V1 <- matrix(0, nrow=n.rows, ncol=n.cols)
# if ((cmap.type == 5) | (cmap.type == 3)) {
if (cmap.type == 5) {
row.norm <- F
}
if (row.norm == TRUE) {
row.mean <- apply(V, MARGIN=1, FUN=mean)
row.sd <- apply(V, MARGIN=1, FUN=sd)
row.n <- length(V[,1])
for (i in 1:n.rows) {
if (row.sd[i] == 0) {
V1[i,] <- 0
} else {
V1[i,] <- (V[i,] - row.mean[i])/(0.333 * row.sd[i])
}
V1[i,] <- ifelse(V1[i,] < -4, -4, V1[i,])
V1[i,] <- ifelse(V1[i,] > 4, 4, V1[i,])
}
} else {
V1 <- V
}
if (cmap.type == 1) {
mycol <- c("#0000FF", "#4040FF", "#7070FF", "#8888FF", "#A9A9FF", "#D5D5FF", "#EEE5EE", "#FFAADA",
"#FF9DB0", "#FF7080",
"#FF5A5A", "#FF4040", "#FF0D1D") # blue-pinkogram colors. This is the 1998-vintage,
# pre-gene cluster, original pinkogram color map
} else if (cmap.type == 2) {
violet.palette <- colorRampPalette(c("#400030", "white"), space = "rgb")
mycol <- rev(violet.palette(20))
# mycol <- c("#FCFBFD","#F4F2F8","#F8F7FB","#EFEDF5","#E1E1EF","#E8E7F2","#DADAEB","#C6C7E1","#D0D1E6",
# "#BCBDDC","#A8A6CF",
# "#B2B2D6","#9E9AC8","#8A87BF","#9491C4","#807DBA","#7260AB","#796FB3","#6A51A3","#5C3596",
# "#63439D","#54278F","#460D83","#4D1A89","#3F007D")
} else if (cmap.type == 6) {
mycol <- c("#FFF7F3", "#FDE0DD", "#FCC5C0", "#FA9FB5", "#F768A1", "#DD3497", "#AE017E",
"#7A0177", "#49006A")
} else if (cmap.type == 7) {
mycol <- c("#FFF5EB", "#FEE6CE", "#FDD0A2", "#FDAE6B", "#FD8D3C", "#F16913", "#D94801",
"#A63603", "#7F2704")
} else if (cmap.type == 8) {
mycol <- c("#F7FCF5", "#E5F5E0", "#C7E9C0", "#A1D99B", "#74C476", "#41AB5D", "#238B45",
"#006D2C", "#00441B")
} else if (cmap.type == 9) {
mycol <- c("#F7FBFF", "#DEEBF7", "#C6DBEF", "#9ECAE1", "#6BAED6", "#4292C6", "#2171B5",
"#08519C", "#08306B")
} else if ((cmap.type == 3) | (cmap.type == 4) | (cmap.type == 5)) {
mycol <- vector(length=512, mode = "numeric")
for (k in 1:256) {
mycol[k] <- rgb(255, k - 1, k - 1, maxColorValue=255)
}
for (k in 257:512) {
mycol[k] <- rgb(511 - (k - 1), 511 - (k - 1), 255, maxColorValue=255)
}
mycol <- rev(mycol)
}
ncolors <- length(mycol)
if (cmap.type == 5) {
if (max.v == "NA") {
max.v <- max(max(V1), -min(V1))
}
V2 <- ceiling(ncolors * (V1 - (- max.v))/(1.001*(max.v - (- max.v))))
} else {
V2 <- ceiling(ncolors * (V1 - min(V1))/(1.001*(max(V1) - min(V1))))
}
if (col.labels[1] == "NA") {
heatm <- matrix(0, nrow = n.rows, ncol = n.cols)
heatm[1:n.rows,] <- V2[seq(n.rows, 1, -1),]
tot.cols <- ncolors
browser()
if (legend == T) {
nf <- layout(matrix(c(1, 2, 3, 0), 2, 2, byrow=T), widths = c(5, 1), heights = c(10, 1), respect = FALSE)
} else {
nf <- layout(matrix(c(1, 2), 2, 1, byrow=T), heights = c(8, 1), respect = FALSE)
}
par(mar = c(3, 16, 3, 16))
mycol <- c(mycol, phen.cmap[1:length(col.classes)])
image(1:n.cols, 1:n.rows, t(heatm), zlim = c(0, tot.cols), col=mycol, axes=FALSE,
main=main, sub = sub, xlab= xlab, ylab=ylab)
n.rows.phen <- 0
} else {
tot.cols <- ncolors
if (is.vector(col.labels)) {
heatm <- matrix(0, nrow = n.rows + 1, ncol = n.cols)
heatm[1:n.rows,] <- V2[seq(n.rows, 1, -1),]
n.rows.phen <- 1
heatm[n.rows + 1,] <- tot.cols + col.labels
cols.row <- length(unique(col.labels))
tot.cols <- tot.cols + cols.row
phen.cmap <- phen.cmap[1:cols.row]
} else {
n.rows.phen <- length(col.labels[,1])
cols.row <- vector(length=n.rows.phen, mode = "numeric")
heatm <- matrix(0, nrow = n.rows + n.rows.phen, ncol = n.cols)
heatm[1:n.rows,] <- V2[seq(n.rows, 1, -1),]
for (k in seq(n.rows + n.rows.phen, n.rows + 1, -1)) {
heatm[k,] <- tot.cols + col.labels[n.rows + n.rows.phen - k + 1,]
cols.row[n.rows + n.rows.phen - k + 1] <- length(unique(col.labels[n.rows + n.rows.phen - k + 1,]))
tot.cols <- tot.cols + cols.row[n.rows + n.rows.phen - k + 1]
# print(c("col:", k, ":", tot.cols + col.labels[n.rows + n.rows.phen - k + 1,], "tot.cols:", tot.cols))
}
phen.cmap <- phen.cmap[1:sum(unlist(lapply(col.classes, length)))]
}
if (legend == T) {
# nf <- layout(matrix(c(1, 2, 3, 0), 2, 2, byrow=T), widths = c(10, 2), heights = c(6, 1), respect = FALSE)
nf <- layout(matrix(c(1, 2, 3), 3, 1, byrow=T), heights = c(8, 4, 1), respect = FALSE)
} else {
nf <- layout(matrix(c(1, 2), 2, 1, byrow=T), heights = c(5, 1), respect = FALSE)
}
par(mar = c(3, 16, 3, 16))
mycol <- c(mycol, phen.cmap)
image(1:n.cols, 1:(n.rows + n.rows.phen), t(heatm), zlim = c(0, tot.cols), col=mycol, axes=FALSE,
main=main, sub = sub, xlab= xlab, ylab=ylab)
}
# Add lines to separate phenotypes or subgroups
if (col.labels2[1] != "NA") {
groups <- split(col.labels2, col.labels2)
len.vec <- lapply(groups, length)
plot.div <- c(0.51, cumsum(len.vec) + 0.5)
for (i in plot.div) {
lines(c(i, i), c(0, n.rows + n.rows.phen + 0.48), lwd = 2, cex = 0.9, col = "black")
}
lines(c(0.51, n.cols + 0.49), c(0.51, 0.51), lwd = 2, cex = 0.9, col = "black")
lines(c(0.51, n.cols + 0.49), c(n.rows + n.rows.phen + 0.48, n.rows + n.rows.phen + 0.48), lwd = 2,
cex = 0.9, col = "black")
lines(c(0.51, n.cols + 0.49), c(n.rows + 0.50, n.rows + 0.50), lwd = 2,
cex = 0.9, col = "black")
}
if (row.names[1] != "NA") {
# browser()
numC <- nchar(row.names)
size.row.char <- char.rescale*25/(n.rows + 20)
for (i in 1:n.rows) {
row.names[i] <- substr(row.names[i], 1, 40)
row.names[i] <- paste(row.names[i], " ", sep="")
}
if (phen.names[1] == "NA") {
head.names <- paste("Class", seq(n.rows.phen, 1, -1))
} else {
head.names <- as.character(rev(phen.names))
}
row.names <- c(row.names[seq(n.rows, 1, -1)], head.names)
# print(paste("n.rows:", n.rows))
# print(paste("Phen names:", phen.names))
# print(paste("Head names:", head.names))
# print(paste("Row names:", row.names))
axis(2, at=1:(n.rows + n.rows.phen), labels=row.names, adj= 0.5, tick=FALSE, las = 1, cex.axis=size.row.char,
font.axis=2, line=-1)
}
if (row.names2[1] != "NA") {
# browser()
numC <- nchar(row.names2)
size.row.char <- char.rescale*25/(n.rows + 20)
for (i in 1:n.rows) {
row.names2[i] <- substr(row.names2[i], 1, 40)
row.names2[i] <- paste(" ", row.names2[i], sep="")
}
for( i in 1:n.rows.phen ){
phen.names2[i] <- substr(phen.names2[i], 1, 40)
phen.names2[i] <- paste( " ", phen.names2[i], sep="")
}
row.names2 <- rev(row.names2)
phen.names2 <- rev(phen.names2)
axis(4, at=1:(n.rows + n.rows.phen), labels=c(row.names2, phen.names2), adj= 0.5, tick=FALSE, las = 1,
cex.axis=size.row.char, font.axis=2, line=-1)
}
if (col.names[1] != "NA") {
size.col.char <- char.rescale*20/(n.cols + 25)
axis(1, at=1:n.cols, labels=col.names, tick=FALSE, las = 3, cex.axis=size.col.char, font.axis=2, line=-1)
}
# Phenotype Legend
# print("--------------------------------------------------------------------------------------------")
if (legend == T) {
leg.txt <- NULL
p.vec <- NULL
c.vec <- NULL
c2.vec <- NULL
ind <- 1
par(mar = c(0, 0, 0, 0))
plot(c(0,0), c(1, 1), xlim = c(0, 1), ylim = c(-.050, 1.05), axes=F, type="n", xlab = "", ylab="")
for (i in 1:n.rows.phen) {
if (is.vector(col.labels)) {
phen.v <- as.character(col.classes)
} else {
phen.v <- as.character(col.classes[[i]])
}
p.name <- paste(as.character(rev(head.names)[i]), ": ", sep="")
leg.txt <- c(p.name, phen.v)
p.vec <- rep(22, cols.row[i] + 1)
c.vec <- c("#FFFFFF", phen.cmap[ind:(ind + cols.row[i] - 1)])
c2.vec <- c("#FFFFFF", rep("black", cols.row[i]))
ind <- ind + cols.row[i]
offset <- 0.07
legend(x=0, y= 1 - offset*i,
horiz = T, x.intersp = 0.5, legend=leg.txt, bty="n", xjust=0, yjust= 1, pch = p.vec,
pt.bg = c.vec, col = c2.vec, cex = 1.20, pt.cex=1.75)
}
}
# Color map legend
# print(c("range V=", range(V)))
# print(c("range V1=", range(V1)))
# print(c("range V2=", range(V2)))
par(mar = c(2, 12, 2, 12))
num.v <- 20
range.v <- range(V2)
incr <- (range.v[1] - range.v[2])/(num.v - 1)
heatm.v <- matrix(rev(seq(range.v[2], range.v[1], incr)), nrow=num.v, ncol=1)
image(1:num.v, 1:1, heatm.v, zlim = c(0, tot.cols), col=mycol, axes=FALSE,
main=" ", sub = " ", xlab= ylab, ylab=xlab)
range.v <- range(V1)
incr <- (range.v[1] - range.v[2])/(num.v - 1)
heatm.v2 <- matrix(signif(rev(seq(range.v[2], range.v[1], incr)), digits=2), nrow=num.v, ncol=1)
# print(c("heatm.v2=", heatm.v2))
axis(3, at=1:num.v, labels=heatm.v2, adj= 0.5, tick=FALSE, las = 1, cex.axis=0.5*char.rescale, font.axis=1)
return()
}
MSIG.HeatMapPlot.9 <- function(
V,
row.names = "NA",
row.names2 = "NA",
col.labels = "NA",
col.labels2 = "NA",
col.classes = "NA",
phen.cmap = "NA",
col.names = "NA",
phen.names = "NA",
phen.names2 = "NA",
main = " ",
sub = " ",
xlab=" ",
ylab=" ",
row.norm = TRUE,
char.rescale = 0.85,
cmap.type = 1, # 1 = vintage pinkogram, 2 = scale of blues, 3 = high-resolution pinkogram for scores or probabilities [0, 1], 4 = high-resolution pinkogram for general values, 5 = color map for normalized enrichment scores, 6 = scale of red purples, 7 = scale of Oranges, 8 = scale of Greens, 9 = scale of Blues
max.v = "NA",
legend = F)
{
#
# Plots a heatmap "pinkogram" of a gene expression matrix including phenotype vector and gene, sample and phenotype labels
# Doesn't plot the spectrum on the bottom
#
# The Broad Institute
# SOFTWARE COPYRIGHT NOTICE AGREEMENT
# This software and its documentation are copyright 2003 by the
# Broad Institute/Massachusetts Institute of Technology.
# All rights are reserved.
#
# This software is supplied without any warranty or guaranteed support
# whatsoever. Neither the Broad Institute nor MIT can be responsible for
# its use, misuse, or functionality.
n.rows <- length(V[,1])
n.cols <- length(V[1,])
V1 <- matrix(0, nrow=n.rows, ncol=n.cols)
# if ((cmap.type == 5) | (cmap.type == 3)) {
if (cmap.type == 5) {
row.norm <- F
}
if (row.norm == TRUE) {
row.mean <- apply(V, MARGIN=1, FUN=mean)
row.sd <- apply(V, MARGIN=1, FUN=sd)
row.n <- length(V[,1])
for (i in 1:n.rows) {
if (row.sd[i] == 0) {
V1[i,] <- 0
} else {
V1[i,] <- (V[i,] - row.mean[i])/(0.333 * row.sd[i])
}
V1[i,] <- ifelse(V1[i,] < -4, -4, V1[i,])
V1[i,] <- ifelse(V1[i,] > 4, 4, V1[i,])
}
} else {
V1 <- V
}
if (cmap.type == 1) {
mycol <- c("#0000FF", "#4040FF", "#7070FF", "#8888FF", "#A9A9FF", "#D5D5FF", "#EEE5EE", "#FFAADA",
"#FF9DB0", "#FF7080",
"#FF5A5A", "#FF4040", "#FF0D1D") # blue-pinkogram colors. This is the 1998-vintage,
# pre-gene cluster, original pinkogram color map
} else if (cmap.type == 2) {
violet.palette <- colorRampPalette(c("#400030", "white"), space = "rgb")
mycol <- rev(violet.palette(20))
# mycol <- c("#FCFBFD","#F4F2F8","#F8F7FB","#EFEDF5","#E1E1EF","#E8E7F2","#DADAEB","#C6C7E1","#D0D1E6",
# "#BCBDDC","#A8A6CF",
# "#B2B2D6","#9E9AC8","#8A87BF","#9491C4","#807DBA","#7260AB","#796FB3","#6A51A3","#5C3596",
# "#63439D","#54278F","#460D83","#4D1A89","#3F007D")
} else if (cmap.type == 6) {
mycol <- c("#FFF7F3", "#FDE0DD", "#FCC5C0", "#FA9FB5", "#F768A1", "#DD3497", "#AE017E",
"#7A0177", "#49006A")
} else if (cmap.type == 7) {
mycol <- c("#FFF5EB", "#FEE6CE", "#FDD0A2", "#FDAE6B", "#FD8D3C", "#F16913", "#D94801",
"#A63603", "#7F2704")
} else if (cmap.type == 8) {
mycol <- c("#F7FCF5", "#E5F5E0", "#C7E9C0", "#A1D99B", "#74C476", "#41AB5D", "#238B45",
"#006D2C", "#00441B")
} else if (cmap.type == 9) {
mycol <- c("#F7FBFF", "#DEEBF7", "#C6DBEF", "#9ECAE1", "#6BAED6", "#4292C6", "#2171B5",
"#08519C", "#08306B")
} else if ((cmap.type == 3) | (cmap.type == 4) | (cmap.type == 5)) {
mycol <- vector(length=512, mode = "numeric")
for (k in 1:256) {
mycol[k] <- rgb(255, k - 1, k - 1, maxColorValue=255)
}
for (k in 257:512) {
mycol[k] <- rgb(511 - (k - 1), 511 - (k - 1), 255, maxColorValue=255)
}
mycol <- rev(mycol)
}
ncolors <- length(mycol)
if (cmap.type == 5) {
if (max.v == "NA") {
max.v <- max(max(V1), -min(V1))
}
V2 <- ceiling(ncolors * (V1 - (- max.v))/(1.001*(max.v - (- max.v))))
} else {
V2 <- ceiling(ncolors * (V1 - min(V1))/(1.001*(max(V1) - min(V1))))
}
if (col.labels[1] == "NA") {
heatm <- matrix(0, nrow = n.rows + 1, ncol = n.cols)
heatm[1:n.rows,] <- V2[seq(n.rows, 1, -1),]
tot.cols <- ncolors
if (legend == T) {
nf <- layout(matrix(c(1, 2, 3, 0), 2, 2, byrow=T), widths = c(5, 1), heights = c(10, 1), respect = FALSE)
} else {
nf <- layout(matrix(c(1, 2), 2, 1, byrow=T), heights = c(8, 1), respect = FALSE)
}
par(mar = c(3, 16, 3, 16))
mycol <- c(mycol, phen.cmap[1:length(col.classes)])
image(1:n.cols, 1:n.rows, t(heatm), zlim = c(0, tot.cols), col=mycol, axes=FALSE,
main=main, sub = sub, xlab= xlab, ylab=ylab)
n.rows.phen <- 0
} else {
tot.cols <- ncolors
if (is.vector(col.labels)) {
heatm <- matrix(0, nrow = n.rows + 1, ncol = n.cols)
heatm[1:n.rows,] <- V2[seq(n.rows, 1, -1),]
n.rows.phen <- 1
heatm[n.rows + 1,] <- tot.cols + col.labels
cols.row <- length(unique(col.labels))
tot.cols <- tot.cols + cols.row
phen.cmap <- phen.cmap[1:cols.row]
} else {
n.rows.phen <- length(col.labels[,1])
cols.row <- vector(length=n.rows.phen, mode = "numeric")
heatm <- matrix(0, nrow = n.rows + n.rows.phen, ncol = n.cols)
heatm[1:n.rows,] <- V2[seq(n.rows, 1, -1),]
for (k in seq(n.rows + n.rows.phen, n.rows + 1, -1)) {
heatm[k,] <- tot.cols + col.labels[n.rows + n.rows.phen - k + 1,]
cols.row[n.rows + n.rows.phen - k + 1] <- length(unique(col.labels[n.rows + n.rows.phen - k + 1,]))
tot.cols <- tot.cols + cols.row[n.rows + n.rows.phen - k + 1]
# print(c("col:", k, ":", tot.cols + col.labels[n.rows + n.rows.phen - k + 1,], "tot.cols:", tot.cols))
}
phen.cmap <- phen.cmap[1:sum(unlist(lapply(col.classes, length)))]
}
if (legend == T) {
# nf <- layout(matrix(c(1, 2, 3, 0), 2, 2, byrow=T), widths = c(10, 2), heights = c(6, 1), respect = FALSE)
nf <- layout(matrix(c(1, 2, 3), 3, 1, byrow=T), heights = c(8, 4, 1), respect = FALSE)
} else {
nf <- layout(matrix(c(1, 2), 2, 1, byrow=T), heights = c(5, 1), respect = FALSE)
}
par(mar = c(3, 16, 3, 16))
mycol <- c(mycol, phen.cmap)
image(1:n.cols, 1:(n.rows + n.rows.phen), t(heatm), zlim = c(0, tot.cols), col=mycol, axes=FALSE,
main=main, sub = sub, xlab= xlab, ylab=ylab)
}
# Add lines to separate phenotypes or subgroups
if (col.labels2[1] != "NA") {
groups <- split(col.labels2, col.labels2)
len.vec <- lapply(groups, length)
plot.div <- c(0.51, cumsum(len.vec) + 0.5)
for (i in plot.div) {
lines(c(i, i), c(0, n.rows + n.rows.phen + 0.48), lwd = 2, cex = 0.9, col = "black")
}
lines(c(0.51, n.cols + 0.49), c(0.51, 0.51), lwd = 2, cex = 0.9, col = "black")
lines(c(0.51, n.cols + 0.49), c(n.rows + n.rows.phen + 0.48, n.rows + n.rows.phen + 0.48), lwd = 2,
cex = 0.9, col = "black")
lines(c(0.51, n.cols + 0.49), c(n.rows + 0.50, n.rows + 0.50), lwd = 2,
cex = 0.9, col = "black")
}
if (row.names[1] != "NA") {
# browser()
numC <- nchar(row.names)
size.row.char <- char.rescale*25/(n.rows + 20)
for (i in 1:n.rows) {
row.names[i] <- substr(row.names[i], 1, 40)
row.names[i] <- paste(row.names[i], " ", sep="")
}
if (phen.names[1] == "NA") {
head.names <- paste("Class", seq(n.rows.phen, 1, -1))
} else {
head.names <- as.character(rev(phen.names))
}
row.names <- c(row.names[seq(n.rows, 1, -1)], head.names)
# print(paste("n.rows:", n.rows))
# print(paste("Phen names:", phen.names))
# print(paste("Head names:", head.names))
# print(paste("Row names:", row.names))
axis(2, at=1:(n.rows + n.rows.phen), labels=row.names, adj= 0.5, tick=FALSE, las = 1, cex.axis=size.row.char,
font.axis=2, line=-1)
}
if (row.names2[1] != "NA") {
# browser()
numC <- nchar(row.names2)
size.row.char <- char.rescale*25/(n.rows + 20)
for (i in 1:n.rows) {
row.names2[i] <- substr(row.names2[i], 1, 40)
row.names2[i] <- paste(" ", row.names2[i], sep="")
}
for( i in 1:n.rows.phen ){
phen.names2[i] <- substr(phen.names2[i], 1, 40)
phen.names2[i] <- paste( " ", phen.names2[i], sep="")
}
row.names2 <- rev(row.names2)
phen.names2 <- rev(phen.names2)
axis(4, at=1:(n.rows + n.rows.phen), labels=c(row.names2, phen.names2), adj= 0.5, tick=FALSE, las = 1,
cex.axis=size.row.char, font.axis=2, line=-1)
}
if (col.names[1] != "NA") {
size.col.char <- char.rescale*20/(n.cols + 25)
axis(1, at=1:n.cols, labels=col.names, tick=FALSE, las = 3, cex.axis=size.col.char, font.axis=2, line=-1)
}
# Phenotype Legend
# print("--------------------------------------------------------------------------------------------")
if (legend == T) {
leg.txt <- NULL
p.vec <- NULL
c.vec <- NULL
c2.vec <- NULL
ind <- 1
par(mar = c(0, 0, 0, 0))
plot(c(0,0), c(1, 1), xlim = c(0, 1), ylim = c(-.050, 1.05), axes=F, type="n", xlab = "", ylab="")
for (i in 1:n.rows.phen) {
if (is.vector(col.labels)) {
phen.v <- as.character(col.classes)
} else {
phen.v <- as.character(col.classes[[i]])
}
p.name <- paste(as.character(rev(head.names)[i]), ": ", sep="")
leg.txt <- c(p.name, phen.v)
p.vec <- rep(22, cols.row[i] + 1)
c.vec <- c("#FFFFFF", phen.cmap[ind:(ind + cols.row[i] - 1)])
c2.vec <- c("#FFFFFF", rep("black", cols.row[i]))
ind <- ind + cols.row[i]
offset <- 0.07
legend(x=0, y= 1 - offset*i,
horiz = T, x.intersp = 0.5, legend=leg.txt, bty="n", xjust=0, yjust= 1, pch = p.vec,
pt.bg = c.vec, col = c2.vec, cex = 1.20, pt.cex=1.75)
}
}
# Color map legend
# print(c("range V=", range(V)))
# print(c("range V1=", range(V1)))
# print(c("range V2=", range(V2)))
# par(mar = c(2, 12, 2, 12))
# num.v <- 20
# range.v <- range(V2)
# incr <- (range.v[1] - range.v[2])/(num.v - 1)
# heatm.v <- matrix(rev(seq(range.v[2], range.v[1], incr)), nrow=num.v, ncol=1)
# image(1:num.v, 1:1, heatm.v, zlim = c(0, tot.cols), col=mycol, axes=FALSE,
# main=" ", sub = " ", xlab= ylab, ylab=xlab)
# range.v <- range(V1)
# incr <- (range.v[1] - range.v[2])/(num.v - 1)
# heatm.v2 <- matrix(signif(rev(seq(range.v[2], range.v[1], incr)), digits=2), nrow=num.v, ncol=1)
## print(c("heatm.v2=", heatm.v2))
# axis(3, at=1:num.v, labels=heatm.v2, adj= 0.5, tick=FALSE, las = 1, cex.axis=0.5*char.rescale, font.axis=1)
return()
}
MSIG.HeatMapPlot.10<- function(
V,
pathway.mut,
row.names = "NA",
row.names2 = "NA",
col.labels = "NA",
col.labels2 = "NA",
col.classes = "NA",
phen.cmap = "NA",
col.names = "NA",
phen.names = "NA",
phen.names2 = "NA",
main = " ",
sub = " ",
xlab=" ",
ylab=" ",
row.norm = TRUE,
char.rescale = 0.85,
cmap.type = 1, # 1 = vintage pinkogram, 2 = scale of blues, 3 = high-resolution pinkogram for scores or probabilities [0, 1], 4 = high-resolution pinkogram for general values, 5 = color map for normalized enrichment scores, 6 = scale of red purples, 7 = scale of Oranges, 8 = scale of Greens, 9 = scale of Blues
max.v = "NA",
legend = T,
tissue.names = "NA",
tissue.labels = NA)
{
#
# Plots a heatmap "pinkogram" of a gene expression matrix including phenotype vector and gene, sample and phenotype labels
# Doesn't plot the spectrum on the bottom
#
# Plots PATHWAY.MUT as a continuous vector in a greyscale spectrum
#
# The Broad Institute
# SOFTWARE COPYRIGHT NOTICE AGREEMENT
# This software and its documentation are copyright 2003 by the
# Broad Institute/Massachusetts Institute of Technology.
# All rights are reserved.
#
# This software is supplied without any warranty or guaranteed support
# whatsoever. Neither the Broad Institute nor MIT can be responsible for
# its use, misuse, or functionality.
n.rows <- length(V[,1])
n.cols <- length(V[1,])
V1 <- matrix(0, nrow=n.rows, ncol=n.cols)
set3 = brewer.pal(12, "Set3")
accent = brewer.pal(8, "Accent")
# if ((cmap.type == 5) | (cmap.type == 3)) {
if (cmap.type == 5) {
row.norm <- F
}
if (row.norm == TRUE) {
row.mean <- apply(V, MARGIN=1, FUN=mean)
row.sd <- apply(V, MARGIN=1, FUN=sd)
row.n <- length(V[,1])
for (i in 1:n.rows) {
if (row.sd[i] == 0) {
V1[i,] <- 0
} else {
V1[i,] <- (V[i,] - row.mean[i])/(0.333 * row.sd[i])
}
V1[i,] <- ifelse(V1[i,] < -4, -4, V1[i,])
V1[i,] <- ifelse(V1[i,] > 4, 4, V1[i,])
}
} else {
V1 <- V
}
if (cmap.type == 1) {
mycol <- c("#0000FF", "#4040FF", "#7070FF", "#8888FF", "#A9A9FF", "#D5D5FF", "#EEE5EE", "#FFAADA",
"#FF9DB0", "#FF7080",
"#FF5A5A", "#FF4040", "#FF0D1D") # blue-pinkogram colors. This is the 1998-vintage,
# pre-gene cluster, original pinkogram color map
} else if (cmap.type == 2) {
violet.palette <- colorRampPalette(c("#400030", "white"), space = "rgb")
mycol <- rev(violet.palette(20))
# mycol <- c("#FCFBFD","#F4F2F8","#F8F7FB","#EFEDF5","#E1E1EF","#E8E7F2","#DADAEB","#C6C7E1","#D0D1E6",
# "#BCBDDC","#A8A6CF",
# "#B2B2D6","#9E9AC8","#8A87BF","#9491C4","#807DBA","#7260AB","#796FB3","#6A51A3","#5C3596",
# "#63439D","#54278F","#460D83","#4D1A89","#3F007D")
} else if (cmap.type == 6) {
mycol <- c("#FFF7F3", "#FDE0DD", "#FCC5C0", "#FA9FB5", "#F768A1", "#DD3497", "#AE017E",
"#7A0177", "#49006A")
} else if (cmap.type == 7) {
mycol <- c("#FFF5EB", "#FEE6CE", "#FDD0A2", "#FDAE6B", "#FD8D3C", "#F16913", "#D94801",
"#A63603", "#7F2704")
} else if (cmap.type == 8) {
mycol <- c("#F7FCF5", "#E5F5E0", "#C7E9C0", "#A1D99B", "#74C476", "#41AB5D", "#238B45",
"#006D2C", "#00441B")
} else if (cmap.type == 9) {
mycol <- c("#F7FBFF", "#DEEBF7", "#C6DBEF", "#9ECAE1", "#6BAED6", "#4292C6", "#2171B5",
"#08519C", "#08306B")
} else if ((cmap.type == 3) | (cmap.type == 4) | (cmap.type == 5)) {
mycol <- vector(length=512, mode = "numeric")
for (k in 1:256) {
mycol[k] <- rgb(255, k - 1, k - 1, maxColorValue=255)
}
for (k in 257:512) {
mycol[k] <- rgb(511 - (k - 1), 511 - (k - 1), 255, maxColorValue=255)
}
mycol <- rev(mycol)
}
# browser()
ncolors <- length(mycol)
pathway.mut = (-(pathway.mut*.749 + 0.251 - 1))
# image(1:n.cols, 1, as.matrix(pathway.mut), col=gray(n.cols:0/n.cols))
if (cmap.type == 5) {
if (max.v == "NA") {
max.v <- max(max(V1), -min(V1))
}
V2 <- ceiling(ncolors * (V1 - (- max.v))/(1.001*(max.v - (- max.v))))
} else {
V2 <- ceiling(ncolors * (V1 - min(V1))/(1.001*(max(V1) - min(V1))))
}
if (col.labels[1] == "NA") {
heatm <- matrix(0, nrow = n.rows + 1, ncol = n.cols)
heatm[1:n.rows,] <- V2[seq(n.rows, 1, -1),]
tot.cols <- ncolors
if (legend == T) {
nf <- layout(matrix(c(1, 2, 3, 0), 2, 2, byrow=T), widths = c(5, 1), heights = c(10, 1), respect = FALSE)
} else {
nf <- layout(matrix(c(1, 2), 2, 1, byrow=T), heights = c(8, 1), respect = FALSE)
}
par(mar = c(3, 16, 3, 16))
mycol <- c(mycol, phen.cmap[1:length(col.classes)])
image(1:n.cols, 1:n.rows, t(heatm), zlim = c(0, tot.cols), col=mycol, axes=FALSE,
main=main, sub = sub, xlab= xlab, ylab=ylab)
n.rows.phen <- 0
} else {
tot.cols <- ncolors
if (is.vector(col.labels)) {
heatm <- matrix(0, nrow = n.rows + 1, ncol = n.cols)
heatm[1:n.rows,] <- V2[seq(n.rows, 1, -1),]
n.rows.phen <- 1
heatm[n.rows + 1,] <- tot.cols + col.labels
cols.row <- length(unique(col.labels))
tot.cols <- tot.cols + cols.row
phen.cmap <- phen.cmap[1:cols.row]
pathway.mut.grey = grey(pathway.mut)
u.pathway.mut.grey = unique(pathway.mut.grey)
heatm[n.rows + n.rows.phen,] = match(pathway.mut.grey, u.pathway.mut.grey) + tot.cols
} else {
n.rows.phen <- length(col.labels[,1])
cols.row <- vector(length=n.rows.phen, mode = "numeric")
heatm <- matrix(0, nrow = n.rows + n.rows.phen, ncol = n.cols)
heatm[1:n.rows,] <- V2[seq(n.rows, 1, -1),]
# heatm[n.rows+n.rows.phen,] = t(as.matrix(gray(pathway.mut)))
for (k in seq(n.rows + n.rows.phen, n.rows + 1, -1)) {
heatm[k,] <- tot.cols + col.labels[n.rows + n.rows.phen - k + 1,]
cols.row[n.rows + n.rows.phen - k + 1] <- length(unique(col.labels[n.rows + n.rows.phen - k + 1,]))
tot.cols <- tot.cols + cols.row[n.rows + n.rows.phen - k + 1]
# print(c("col:", k, ":", tot.cols + col.labels[n.rows + n.rows.phen - k + 1,], "tot.cols:", tot.cols))
}
# browser()
pathway.mut.grey = grey(pathway.mut)
u.pathway.mut.grey = unique(pathway.mut.grey)
heatm[n.rows + n.rows.phen,] = match(pathway.mut.grey, u.pathway.mut.grey) + tot.cols
tot.cols = tot.cols + length(u.pathway.mut.grey)
phen.cmap <- phen.cmap[1:sum(unlist(lapply(col.classes, length)))]
}
# image(as.matrix(grey(pathway.mut)))
if (legend == T) {
# nf <- layout(matrix(c(1, 2, 3, 0), 2, 2, byrow=T), widths = c(10, 2), heights = c(6, 1), respect = FALSE)
nf <- layout(matrix(c(1, 2, 3), 3, 1, byrow=T), heights = c(8, 4, 1), respect = FALSE)
} else {
nf <- layout(matrix(c(1, 2), 2, 1, byrow=T), heights = c(5, 1), respect = FALSE)
}
par(mar = c(3, 16, 3, 16))
# browser()
mycol <- c(mycol, phen.cmap, u.pathway.mut.grey)
# browser()
image(1:n.cols, 1:(n.rows + n.rows.phen), t(heatm), zlim = c(0, tot.cols), col=mycol, axes=FALSE,
main=main, sub = sub, xlab= xlab, ylab=ylab)
}
# Add lines to separate phenotypes or subgroups
if (col.labels2[1] != "NA") {
groups <- split(col.labels2, col.labels2)
len.vec <- lapply(groups, length)
plot.div <- c(0.51, cumsum(len.vec) + 0.5)
for (i in plot.div) {
lines(c(i, i), c(0, n.rows + n.rows.phen + 0.48), lwd = 2, cex = 0.9, col = "black")
}
lines(c(0.51, n.cols + 0.49), c(0.51, 0.51), lwd = 2, cex = 0.9, col = "black")
lines(c(0.51, n.cols + 0.49), c(n.rows + n.rows.phen + 0.48, n.rows + n.rows.phen + 0.48), lwd = 2,
cex = 0.9, col = "black")
lines(c(0.51, n.cols + 0.49), c(n.rows + 0.50, n.rows + 0.50), lwd = 2,
cex = 0.9, col = "black")
}
if (row.names[1] != "NA") {
# browser()
numC <- nchar(row.names)
size.row.char <- char.rescale*25/(n.rows + 20)
for (i in 1:n.rows) {
row.names[i] <- substr(row.names[i], 1, 40)
row.names[i] <- paste(row.names[i], " ", sep="")
}
if (phen.names[1] == "NA") {
head.names <- paste("Class", seq(n.rows.phen, 1, -1))
} else {
head.names <- as.character(rev(phen.names))
}
row.names <- c(row.names[seq(n.rows, 1, -1)], head.names)
# print(paste("n.rows:", n.rows))
# print(paste("Phen names:", phen.names))
# print(paste("Head names:", head.names))
# print(paste("Row names:", row.names))
# browser()
axis(2, at=1:(n.rows + n.rows.phen), labels=row.names, adj= 0.5, tick=FALSE, las = 1, cex.axis=size.row.char,
font.axis=2, line=-1)
}
if (row.names2[1] != "NA") {
# browser()
numC <- nchar(row.names2)
size.row.char <- char.rescale*25/(n.rows + 20)
for (i in 1:n.rows) {
row.names2[i] <- substr(row.names2[i], 1, 40)
row.names2[i] <- paste(" ", row.names2[i], sep="")
}
for( i in 1:n.rows.phen ){
phen.names2[i] <- substr(phen.names2[i], 1, 40)
phen.names2[i] <- paste( " ", phen.names2[i], sep="")
}
row.names2 <- rev(row.names2)
phen.names2 <- rev(phen.names2)
axis(4, at=1:(n.rows + n.rows.phen), labels=c(row.names2, phen.names2), adj= 0.5, tick=FALSE, las = 1,
cex.axis=size.row.char, font.axis=2, line=-1)
}
if (col.names[1] != "NA") {
size.col.char <- char.rescale*20/(n.cols + 25)
axis(1, at=1:n.cols, labels=col.names, tick=FALSE, las = 3, cex.axis=size.col.char, font.axis=2, line=-1)
}
# Phenotype Legend
# print("--------------------------------------------------------------------------------------------")
if (legend == T) {
leg.txt <- NULL
p.vec <- NULL
c.vec <- NULL
c2.vec <- NULL
ind <- 1
par(mar = c(0, 0, 0, 0))
plot(c(0,0), c(1, 1), xlim = c(0, 1), ylim = c(-.050, 1.05), axes=F, type="n", xlab = "", ylab="")
for (i in 1:n.rows.phen) {
# browser()
if (is.vector(col.labels)) {
phen.v <- as.character(col.classes)
} else {
phen.v <- as.character(col.classes[[i]])
}
p.name <- paste(as.character(rev(head.names)[i]), ": ", sep="")
leg.txt <- c(p.name, phen.v)
p.vec <- rep(22, cols.row[i] + 1)
c.vec <- c("#FFFFFF", phen.cmap[ind:(ind + cols.row[i] - 1)])
c2.vec <- c("#FFFFFF", rep("black", cols.row[i]))
ind <- ind + cols.row[i]
offset <- 0.07
legend(x=0, y= 1 - offset*i,
horiz = T, x.intersp = 0.5, legend=leg.txt, bty="n", xjust=0, yjust= 1, pch = p.vec,
pt.bg = c.vec, col = c2.vec, cex = 1.20, pt.cex=1.75)
}
}
# Color map legend
# print(c("range V=", range(V)))
# print(c("range V1=", range(V1)))
# print(c("range V2=", range(V2)))
par(mar = c(2, 12, 2, 12))
num.v <- 20
range.v <- range(V2)
incr <- (range.v[1] - range.v[2])/(num.v - 1)
heatm.v <- matrix(rev(seq(range.v[2], range.v[1], incr)), nrow=num.v, ncol=1)
image(1:num.v, 1:1, heatm.v, zlim = c(0, tot.cols), col=mycol, axes=FALSE,
main=" ", sub = " ", xlab= ylab, ylab=xlab)
range.v <- range(V1)
incr <- (range.v[1] - range.v[2])/(num.v - 1)
heatm.v2 <- matrix(signif(rev(seq(range.v[2], range.v[1], incr)), digits=2), nrow=num.v, ncol=1)
# print(c("heatm.v2=", heatm.v2))
axis(3, at=1:num.v, labels=heatm.v2, adj= 0.5, tick=FALSE, las = 1, cex.axis=0.5*char.rescale, font.axis=1)
return()
}
MSIG.HeatMapPlot.10.multiple.tissues <- function(
V,
pathway.mut,
row.names = "NA",
row.names2 = "NA",
col.labels = "NA",
col.labels2 = "NA",
col.classes = "NA",
phen.cmap = "NA",
col.names = "NA",
phen.names = "NA",
phen.names2 = "NA",
main = " ",
sub = " ",
xlab=" ",
ylab=" ",
row.norm = TRUE,
char.rescale = 0.85,
cmap.type = 1, # 1 = vintage pinkogram, 2 = scale of blues, 3 = high-resolution pinkogram for scores or probabilities [0, 1], 4 = high-resolution pinkogram for general values, 5 = color map for normalized enrichment scores, 6 = scale of red purples, 7 = scale of Oranges, 8 = scale of Greens, 9 = scale of Blues
max.v = "NA",
legend = T,
tissue.names = "NA",
tissue.labels = NA)
{
#
# Plots a heatmap "pinkogram" of a gene expression matrix including phenotype vector and gene, sample and phenotype labels
# Doesn't plot the spectrum on the bottom
#
# Plots PATHWAY.MUT as a continuous vector in a greyscale spectrum
#
# The Broad Institute
# SOFTWARE COPYRIGHT NOTICE AGREEMENT
# This software and its documentation are copyright 2003 by the
# Broad Institute/Massachusetts Institute of Technology.
# All rights are reserved.
#
# This software is supplied without any warranty or guaranteed support
# whatsoever. Neither the Broad Institute nor MIT can be responsible for
# its use, misuse, or functionality.
library(RColorBrewer)
n.tissues = length(tissue.names)
n.rows <- length(V[,1])
n.cols <- length(V[1,])
V1 <- matrix(0, nrow=n.rows, ncol=n.cols)
# if ((cmap.type == 5) | (cmap.type == 3)) {
if (cmap.type == 5) {
row.norm <- F
}
if (row.norm == TRUE) {
row.mean <- apply(V, MARGIN=1, FUN=mean)
row.sd <- apply(V, MARGIN=1, FUN=sd)
row.n <- length(V[,1])
for (i in 1:n.rows) {
if (row.sd[i] == 0) {
V1[i,] <- 0
} else {
V1[i,] <- (V[i,] - row.mean[i])/(0.333 * row.sd[i])
}
V1[i,] <- ifelse(V1[i,] < -4, -4, V1[i,])
V1[i,] <- ifelse(V1[i,] > 4, 4, V1[i,])
}
} else {
V1 <- V
}
if (cmap.type == 1) {
mycol <- c("#0000FF", "#4040FF", "#7070FF", "#8888FF", "#A9A9FF", "#D5D5FF", "#EEE5EE", "#FFAADA",
"#FF9DB0", "#FF7080",
"#FF5A5A", "#FF4040", "#FF0D1D") # blue-pinkogram colors. This is the 1998-vintage,
# pre-gene cluster, original pinkogram color map
} else if (cmap.type == 2) {
violet.palette <- colorRampPalette(c("#400030", "white"), space = "rgb")
mycol <- rev(violet.palette(20))
# mycol <- c("#FCFBFD","#F4F2F8","#F8F7FB","#EFEDF5","#E1E1EF","#E8E7F2","#DADAEB","#C6C7E1","#D0D1E6",
# "#BCBDDC","#A8A6CF",
# "#B2B2D6","#9E9AC8","#8A87BF","#9491C4","#807DBA","#7260AB","#796FB3","#6A51A3","#5C3596",
# "#63439D","#54278F","#460D83","#4D1A89","#3F007D")
} else if (cmap.type == 6) {
mycol <- c("#FFF7F3", "#FDE0DD", "#FCC5C0", "#FA9FB5", "#F768A1", "#DD3497", "#AE017E",
"#7A0177", "#49006A")
} else if (cmap.type == 7) {
mycol <- c("#FFF5EB", "#FEE6CE", "#FDD0A2", "#FDAE6B", "#FD8D3C", "#F16913", "#D94801",
"#A63603", "#7F2704")
} else if (cmap.type == 8) {
mycol <- c("#F7FCF5", "#E5F5E0", "#C7E9C0", "#A1D99B", "#74C476", "#41AB5D", "#238B45",
"#006D2C", "#00441B")
} else if (cmap.type == 9) {
mycol <- c("#F7FBFF", "#DEEBF7", "#C6DBEF", "#9ECAE1", "#6BAED6", "#4292C6", "#2171B5",
"#08519C", "#08306B")
} else if ((cmap.type == 3) | (cmap.type == 4) | (cmap.type == 5)) {
mycol <- vector(length=512, mode = "numeric")
for (k in 1:256) {
mycol[k] <- rgb(255, k - 1, k - 1, maxColorValue=255)
}
for (k in 257:512) {
mycol[k] <- rgb(511 - (k - 1), 511 - (k - 1), 255, maxColorValue=255)
}
mycol <- rev(mycol)
}
# browser()
ncolors <- length(mycol)
pathway.mut = (-(pathway.mut*.749 + 0.251 - 1))
# image(1:n.cols, 1, as.matrix(pathway.mut), col=gray(n.cols:0/n.cols))
if (cmap.type == 5) {
if (max.v == "NA") {
max.v <- max(max(V1), -min(V1))
}
V2 <- ceiling(ncolors * (V1 - (- max.v))/(1.001*(max.v - (- max.v))))
} else {
V2 <- ceiling(ncolors * (V1 - min(V1))/(1.001*(max(V1) - min(V1))))
}
if (col.labels[1] == "NA") {
heatm <- matrix(0, nrow = n.rows + 1, ncol = n.cols)
heatm[1:n.rows,] <- V2[seq(n.rows, 1, -1),]
tot.cols <- ncolors
if (legend == T) {
nf <- layout(matrix(c(1, 2, 3, 0), 2, 2, byrow=T), widths = c(5, 1), heights = c(10, 1), respect = FALSE)
} else {
nf <- layout(matrix(c(1, 2), 2, 1, byrow=T), heights = c(8, 1), respect = FALSE)
}
par(mar = c(3, 16, 3, 16))
mycol <- c(mycol, phen.cmap[1:length(col.classes)])
image(1:n.cols, 1:n.rows, t(heatm), zlim = c(0, tot.cols), col=mycol, axes=FALSE,
main=main, sub = sub, xlab= xlab, ylab=ylab)
n.rows.phen <- 0
} else {
tot.cols <- ncolors
if (is.vector(col.labels)) {
heatm <- matrix(0, nrow = n.rows + 2, ncol = n.cols)
heatm[1:n.rows,] <- V2[seq(n.rows, 1, -1),]
n.rows.phen <- 1
heatm[n.rows + 1,] <- tot.cols + col.labels
cols.row <- length(unique(col.labels))
tot.cols <- tot.cols + cols.row
phen.cmap <- phen.cmap[1:cols.row]
pathway.mut.grey = grey(pathway.mut)
u.pathway.mut.grey = unique(pathway.mut.grey)
heatm[n.rows + n.rows.phen,] = match(pathway.mut.grey, u.pathway.mut.grey) + tot.cols
} else {
n.rows.phen <- length(col.labels[,1])
cols.row <- vector(length=n.rows.phen, mode = "numeric")
heatm <- matrix(0, nrow = n.rows + n.rows.phen, ncol = n.cols)
heatm[1:n.rows,] <- V2[seq(n.rows, 1, -1),]
# heatm[n.rows+n.rows.phen,] = t(as.matrix(gray(pathway.mut)))
for (k in seq(n.rows + n.rows.phen, n.rows + 1, -1)) {
heatm[k,] <- tot.cols + col.labels[n.rows + n.rows.phen - k + 1,]
cols.row[n.rows + n.rows.phen - k + 1] <- length(unique(col.labels[n.rows + n.rows.phen - k + 1,]))
tot.cols <- tot.cols + cols.row[n.rows + n.rows.phen - k + 1]
# print(c("col:", k, ":", tot.cols + col.labels[n.rows + n.rows.phen - k + 1,], "tot.cols:", tot.cols))
}
# browser()
pathway.mut.grey = grey(pathway.mut)
u.pathway.mut.grey = unique(pathway.mut.grey)
heatm[n.rows + n.rows.phen,] = match(pathway.mut.grey, u.pathway.mut.grey) + tot.cols
tot.cols = tot.cols + length(u.pathway.mut.grey)
phen.cmap <- phen.cmap[1:sum(unlist(lapply(col.classes, length)))]
}
# image(as.matrix(grey(pathway.mut)))
if (legend == T) {
# nf <- layout(matrix(c(1, 2, 3, 0), 2, 2, byrow=T), widths = c(10, 2), heights = c(6, 1), respect = FALSE)
nf <- layout(matrix(c(1, 2, 3), 3, 1, byrow=T), heights = c(8, 4, 1), respect = FALSE)
} else {
nf <- layout(matrix(c(1, 2), 2, 1, byrow=T), heights = c(5, 1), respect = FALSE)
}
par(mar = c(3, 16, 3, 16))
#browser()
mycol <- c(mycol, phen.cmap, u.pathway.mut.grey)
if( length(tissue.names) > 1 ){
#browser()
tissue.colors = c(brewer.pal(12, "Set3"), brewer.pal(12,"Paired"))[1:length(tissue.names)]
#row.names = c(row.names, "Tissue Types")
mycol <- c(mycol, tissue.colors)
n.rows.phen = n.rows.phen + 1
heatm = rbind(heatm, (tissue.labels + tot.cols))
tot.cols = tot.cols + length(tissue.colors)
}
#browser()
#browser()
image(1:n.cols, 1:(n.rows + n.rows.phen), t(heatm), zlim = c(0, tot.cols), col=mycol, axes=FALSE,
main=main, sub = sub, xlab= xlab, ylab=ylab)
}
# Add lines to separate phenotypes or subgroups
if (col.labels2[1] != "NA") {
groups <- split(col.labels2, col.labels2)
len.vec <- lapply(groups, length)
plot.div <- c(0.51, cumsum(len.vec) + 0.5)
for (i in plot.div) {
lines(c(i, i), c(0, n.rows + n.rows.phen + 0.48), lwd = 2, cex = 0.9, col = "black")
}
lines(c(0.51, n.cols + 0.49), c(0.51, 0.51), lwd = 2, cex = 0.9, col = "black")
lines(c(0.51, n.cols + 0.49), c(n.rows + n.rows.phen + 0.48, n.rows + n.rows.phen + 0.48), lwd = 2,
cex = 0.9, col = "black")
lines(c(0.51, n.cols + 0.49), c(n.rows + 0.50, n.rows + 0.50), lwd = 2,
cex = 0.9, col = "black")
}
if (row.names[1] != "NA") {
# browser()
numC <- nchar(row.names)
size.row.char <- char.rescale*25/(n.rows + 20)
for (i in 1:n.rows) {
row.names[i] <- substr(row.names[i], 1, 40)
row.names[i] <- paste(row.names[i], " ", sep="")
}
if (phen.names[1] == "NA") {
head.names <- paste("Class", seq(n.rows.phen, 1, -1))
} else {
head.names <- as.character(rev(phen.names))
}
row.names <- c(row.names[seq(n.rows, 1, -1)], head.names)
if( length(tissue.names) > 1){ row.names = c(row.names, "Tissue Types")}
# print(paste("n.rows:", n.rows))
# print(paste("Phen names:", phen.names))
# print(paste("Head names:", head.names))
# print(paste("Row names:", row.names))
# browser()
axis(2, at=1:(n.rows + n.rows.phen), labels=row.names, adj= 0.5, tick=FALSE, las = 1, cex.axis=size.row.char,
font.axis=2, line=-1)
}
if (row.names2[1] != "NA") {
# browser()
numC <- nchar(row.names2)
size.row.char <- char.rescale*25/(n.rows + 20)
for (i in 1:n.rows) {
row.names2[i] <- substr(row.names2[i], 1, 40)
row.names2[i] <- paste(" ", row.names2[i], sep="")
}
for( i in 1:n.rows.phen ){
phen.names2[i] <- substr(phen.names2[i], 1, 40)
phen.names2[i] <- paste( " ", phen.names2[i], sep="")
}
row.names2 <- rev(row.names2)
phen.names2 <- rev(phen.names2)
axis(4, at=1:(n.rows + n.rows.phen), labels=c(row.names2, phen.names2), adj= 0.5, tick=FALSE, las = 1,
cex.axis=size.row.char, font.axis=2, line=-1)
}
if (col.names[1] != "NA") {
size.col.char <- char.rescale*20/(n.cols + 25)
axis(1, at=1:n.cols, labels=col.names, tick=FALSE, las = 3, cex.axis=size.col.char, font.axis=2, line=-1)
}
# Phenotype Legend
# print("--------------------------------------------------------------------------------------------")
if (legend == T) {
leg.txt <- NULL
p.vec <- NULL
c.vec <- NULL
c2.vec <- NULL
ind <- 1
par(mar = c(0, 0, 0, 0))
plot(c(0,0), c(1, 1), xlim = c(0, 1), ylim = c(-.050, 1.05), axes=F, type="n", xlab = "", ylab="")
for (i in 1:(n.rows.phen-1)) {
# browser()
if (is.vector(col.labels)) {
phen.v <- as.character(col.classes)
} else {
phen.v <- as.character(col.classes[[i]])
}
p.name <- paste(as.character(rev(head.names)[i]), ": ", sep="")
leg.txt <- c(p.name, phen.v)
p.vec <- rep(22, cols.row[i] + 1)
c.vec <- c("#FFFFFF", phen.cmap[ind:(ind + cols.row[i] - 1)])
c2.vec <- c("#FFFFFF", rep("black", cols.row[i]))
ind <- ind + cols.row[i]
offset <- 0.07
legend(x=0, y= 1 - offset*i,
horiz = T, x.intersp = 0.5, legend=leg.txt, bty="n", xjust=0, yjust= 1, pch = p.vec,
pt.bg = c.vec, col = c2.vec, cex = 1.20, pt.cex=1.75)
}
par(mar = c(0, 0, 0, 0))
plot(c(0,0), c(1, 1), xlim = c(0, 1), ylim = c(-.050, 1.05), axes=F, type="n", xlab = "", ylab="")
legend(x=0, y= 10, horiz = T, x.intersp = 0.5, legend=tissue.names, bty="n", xjust=0, yjust= 1,
fill = tissue.colors, cex = 1.20, pt.cex=1.75, ncol=1)
}
#browser()
## Tissue Legend
if(length(tissue.names)>1){
#browser()
par(mar = c(0, 0, 0, 0))
plot(c(0,0), c(1, 1), xlim = c(0, 1), ylim = c(-.050, 1.05), axes=F, type="n", xlab = "", ylab="")
legend(x=0, y= 1, #horiz = T, x.intersp = 0.5, y.intersp=.25,
legend=tissue.names, bty="n", xjust=0, yjust= 1,
fill = tissue.colors, #cex = 1.20, pt.cex=1.75,
ncol=4)
}
# Color map legend
# print(c("range V=", range(V)))
# print(c("range V1=", range(V1)))
# print(c("range V2=", range(V2)))
if(legend==TRUE){
par(mar = c(2, 12, 2, 12))
num.v <- 20
range.v <- range(V2)
incr <- (range.v[1] - range.v[2])/(num.v - 1)
heatm.v <- matrix(rev(seq(range.v[2], range.v[1], incr)), nrow=num.v, ncol=1)
image(1:num.v, 1:1, heatm.v, zlim = c(0, tot.cols), col=mycol, axes=FALSE,
main=" ", sub = " ", xlab= ylab, ylab=xlab)
range.v <- range(V1)
incr <- (range.v[1] - range.v[2])/(num.v - 1)
heatm.v2 <- matrix(signif(rev(seq(range.v[2], range.v[1], incr)), digits=2), nrow=num.v, ncol=1)
# print(c("heatm.v2=", heatm.v2))
axis(3, at=1:num.v, labels=heatm.v2, adj= 0.5, tick=FALSE, las = 1, cex.axis=0.5*char.rescale, font.axis=1)
}
return()
}
MSIG.Gct2Frame <- function(filename = "NULL") {
#
# Reads a gene expression dataset in GCT format and converts it into an R data frame
#
# The Broad Institute
# SOFTWARE COPYRIGHT NOTICE AGREEMENT
# This software and its documentation are copyright 2003 by the
# Broad Institute/Massachusetts Institute of Technology.
# All rights are reserved.
#
# This software is supplied without any warranty or guaranteed support
# whatsoever. Neither the Broad Institute nor MIT can be responsible for
# its use, misuse, or functionality.
ds <- read.delim(filename, header=T, sep="\t", skip=2, row.names=1, blank.lines.skip=T, comment.char="", as.is=T, na.strings = "")
descs <- ds[,1]
ds <- ds[-1]
row.names <- row.names(ds)
names <- names(ds)
return(list(ds = ds, row.names = row.names, descs = descs, names = names))
}
Read.GeneSets.db <- function(
gs.db,
thres.min = 2,
thres.max = 2000,
gene.names = NULL)
{
temp <- readLines(gs.db)
max.Ng <- length(temp)
temp.size.G <- vector(length = max.Ng, mode = "numeric")
for (i in 1:max.Ng) {
temp.size.G[i] <- length(unlist(strsplit(temp[[i]], "\t"))) - 2
}
max.size.G <- max(temp.size.G)
gs <- matrix(rep("null", max.Ng*max.size.G), nrow=max.Ng, ncol= max.size.G)
temp.names <- vector(length = max.Ng, mode = "character")
temp.desc <- vector(length = max.Ng, mode = "character")
gs.count <- 1
for (i in 1:max.Ng) {
gene.set.size <- length(unlist(strsplit(temp[[i]], "\t"))) - 2
gs.line <- noquote(unlist(strsplit(temp[[i]], "\t")))
gene.set.name <- gs.line[1]
gene.set.desc <- gs.line[2]
gene.set.tags <- vector(length = gene.set.size, mode = "character")
for (j in 1:gene.set.size) {
gene.set.tags[j] <- gs.line[j + 2]
}
if (is.null(gene.names)) {
existing.set <- rep(TRUE, length(gene.set.tags))
} else {
existing.set <- is.element(gene.set.tags, gene.names)
}
set.size <- length(existing.set[existing.set == T])
if ((set.size < thres.min) || (set.size > thres.max)) next
temp.size.G[gs.count] <- set.size
gs[gs.count,] <- c(gene.set.tags[existing.set], rep("null", max.size.G - temp.size.G[gs.count]))
temp.names[gs.count] <- gene.set.name
temp.desc[gs.count] <- gene.set.desc
gs.count <- gs.count + 1
}
Ng <- gs.count - 1
gs.names <- vector(length = Ng, mode = "character")
gs.desc <- vector(length = Ng, mode = "character")
size.G <- vector(length = Ng, mode = "numeric")
gs.names <- temp.names[1:Ng]
gs.desc <- temp.desc[1:Ng]
size.G <- temp.size.G[1:Ng]
return(list(N.gs = Ng, gs = gs, gs.names = gs.names, gs.desc = gs.desc, size.G = size.G, max.N.gs = max.Ng))
}
write.cls.2 <- function (class.v, phen, filename)
{
f <- file(filename, "w")
n <- length(phen)
l <- length(class.v)
cat(l, n, "1", "\n", file = f, append = TRUE, sep = " ")
cat("#", unlist(phen), "\n", file = f, append = TRUE, sep = " ")
if (is.vector(class.v)) {
class.v <- phen[class.v]
cat(class.v, "\n", file = f, append = TRUE, sep = " ")
} else {
class.list <- matrix(0, nrow=length(class.v[,1]), ncol=length(class.v[1,]))
for (i in 1:length(class.v[,1])) {
class.list[i,] <- unlist(phen[[i]])[class.v[i,]]
cat(class.list[i,], "\n", file = f, append = TRUE, sep = " ")
}
}
close(f)
}
write.gct <- function(gct.data.frame, descs = "", filename)
{
f <- file(filename, "w")
cat("#1.2", "\n", file = f, append = TRUE, sep = "")
cat(dim(gct.data.frame)[1], "\t", dim(gct.data.frame)[2], "\n", file = f, append = TRUE, sep = "")
cat("Name", "\t", file = f, append = TRUE, sep = "")
cat("Description", file = f, append = TRUE, sep = "")
names <- names(gct.data.frame)
cat("\t", names[1], file = f, append = TRUE, sep = "")
if (length(names) > 1) {
for (j in 2:length(names)) {
cat("\t", names[j], file = f, append = TRUE, sep = "")
}
}
cat("\n", file = f, append = TRUE, sep = "\t")
oldWarn <- options(warn = -1)
m <- matrix(nrow = dim(gct.data.frame)[1], ncol = dim(gct.data.frame)[2] + 2)
m[, 1] <- row.names(gct.data.frame)
if (length(descs) > 1) {
m[, 2] <- descs
} else {
m[, 2] <- row.names(gct.data.frame)
}
index <- 3
for (i in 1:dim(gct.data.frame)[2]) {
m[, index] <- gct.data.frame[, i]
index <- index + 1
}
write.table(m, file = f, append = TRUE, quote = FALSE, sep = "\t", eol = "\n", col.names = FALSE, row.names = FALSE)
close(f)
options(warn = 0)
}
MSIG.ReadPhenFile <- function(file = "NULL") {
#
# Reads a matrix of class vectors from a CLS file and defines phenotype and class labels vectors
# (numeric and character) for the samples in a gene expression file (RES or GCT format)
#
# The Broad Institute
# SOFTWARE COPYRIGHT NOTICE AGREEMENT
# This software and its documentation are copyright 2003 by the
# Broad Institute/Massachusetts Institute of Technology.
# All rights are reserved.
#
# This software is supplied without any warranty or guaranteed support
# whatsoever. Neither the Broad Institute nor MIT can be responsible for
# its use, misuse, or functionality.
cls.cont <- readLines(file)
num.lines <- length(cls.cont)
temp <- unlist(strsplit(cls.cont[[1]], " "))
if (length(temp) == 3) {
phen.names <- NULL
col.phen <- NULL
} else {
l.phen.names <- match("phen.names:", temp)
l.col.phen <- match("col.phen:", temp)
phen.names <- temp[(l.phen.names + 1):(l.col.phen - 1)]
col.phen <- temp[(l.col.phen + 1):length(temp)]
}
temp <- unlist(strsplit(cls.cont[[2]], " "))
phen.list <- temp[2:length(temp)]
for (k in 1:(num.lines - 2)) {
temp <- unlist(strsplit(cls.cont[[k + 2]], " "))
if (k == 1) {
len <- length(temp)
class.list <- matrix(0, nrow = num.lines - 2, ncol = len)
class.v <- matrix(0, nrow = num.lines - 2, ncol = len)
phen <- list(NULL)
}
class.list[k, ] <- temp
classes <- unique(temp)
class.v[k, ] <- match(temp, classes)
phen[[k]] <- classes
}
if (num.lines == 3) {
class.list <- as.vector(class.list)
class.v <- as.vector(class.v)
phen <- unlist(phen)
}
return(list(phen.list = phen.list, phen = phen, phen.names = phen.names, col.phen = col.phen,
class.v = class.v, class.list = class.list))
}
MSIG.ReadPhenFile.2 <- function(file = "NULL") {
#
# Reads a matrix of class vectors from a CLS file and defines phenotype and class labels vectors
# (numeric and character) for the samples in a gene expression file (RES or GCT format)
#
# The Broad Institute
# SOFTWARE COPYRIGHT NOTICE AGREEMENT
# This software and its documentation are copyright 2003 by the
# Broad Institute/Massachusetts Institute of Technology.
# All rights are reserved.
#
# This software is supplied without any warranty or guaranteed support
# whatsoever. Neither the Broad Institute nor MIT can be responsible for
# its use, misuse, or functionality.
cls.cont <- readLines(file)
num.lines <- length(cls.cont)
temp <- unlist(strsplit(cls.cont[[1]], " "))
if (length(temp) == 3) {
phen.names <- NULL
col.phen <- NULL
} else {
# browser()
l.phen.names <- match("phen.names:", temp)
l.col.phen <- match("col.phen:", temp)
phen.names <- temp[(l.phen.names + 1):(l.col.phen - 1)]
col.phen <- temp[(l.col.phen + 1):length(temp)]
}
temp <- unlist(strsplit(cls.cont[[2]], " "))
phen.list <- temp[2:length(temp)]
phen <- NULL
for (k in 1:(num.lines - 2)) {
temp <- unlist(strsplit(cls.cont[[k + 2]], " "))
if (k == 1) {
len <- length(temp)
class.list <- matrix(0, nrow = num.lines - 2, ncol = len)
class.v <- matrix(0, nrow = num.lines - 2, ncol = len)
# phen <- NULL
}
class.list[k, ] <- temp
classes <- unique(temp)
class.v[k, ] <- match(temp, classes)
# phen[[k]] <- classes
phen <- c(phen, classes)
}
if (num.lines == 3) {
class.list <- as.vector(class.list)
class.v <- as.vector(class.v)
# phen <- unlist(phen)
}
return(list(phen.list = phen.list, phen = phen, phen.names = phen.names, col.phen = col.phen,
class.v = class.v, class.list = class.list))
}
MSIG.Subset.Dataset.2 <- function(
input.ds,
input.cls = NULL,
column.subset = "ALL", # subset of column numbers or names (or phenotypes)
column.sel.type = "samples", # "samples" or "phenotype"
row.subset = "ALL", # subset of row numbers or names
output.ds,
output.cls = NULL) {
# start of methodology
print(c("Running MSIG.Subset.Dataset... on GCT file:", input.ds))
print(c("Running MSIG.Subset.Dataset... on CLS file:", input.cls))
# Read input datasets
dataset <- MSIG.Gct2Frame(filename = input.ds)
m <- data.matrix(dataset$ds)
gs.names <- dataset$row.names
gs.descs <- dataset$descs
sample.names <- dataset$names
# Read CLS file
if (!is.null(input.cls)) {
CLS <- MSIG.ReadPhenFile.2(file=input.cls)
class.labels <- CLS$class.v
class.phen <- CLS$phen
class.list <- CLS$class.list
}
# Select desired column subset
if (column.sel.type == "samples") {
if (column.subset[1] == "ALL") {
m2 <- m
sample.names2 <- sample.names
if (!is.null(input.cls)) {
class.labels2 <- class.labels
}
} else {
if (is.numeric(column.subset[1])) {
m2 <- m[,column.subset]
sample.names2 <- sample.names[column.subset]
if (!is.null(input.cls)) {
if (is.vector(class.labels)) {
class.labels2 <- class.labels[column.subset]
} else {
class.labels2 <- class.labels[, column.subset]
}
}
} else {
locations <- !is.na(match(sample.names, column.subset))
sample.names2 <- sample.names[locations]
m2 <- m[, locations]
if (!is.null(input.cls)) {
if (is.vector(class.labels)) {
class.labels2 <- class.labels[locations]
} else {
class.labels2 <- class.labels[, locations]
}
}
}
}
} else if (column.sel.type == "phenotype") {
locations <- !is.na(match(class.list, column.subset))
sample.names2 <- sample.names[locations]
m2 <- m[,locations]
if (!is.null(input.cls)) {
if (is.vector(class.labels)) {
class.labels2 <- class.labels[locations]
} else {
class.labels2 <- class.labels[, locations]
}
}
}
if (row.subset[1] == "ALL") {
m3 <- m2
gs.names2 <- gs.names
gs.descs2 <- gs.descs
} else {
locations <- !is.na(match(gs.names, row.subset))
m3 <- m2[locations,]
gs.names2 <- gs.names[locations]
gs.descs2 <- gs.descs[locations]
}
# Save datasets
V <- data.frame(m3)
names(V) <- sample.names2
row.names(V) <- gs.names2
write.gct(gct.data.frame = V, descs = gs.descs2, filename = output.ds)
if (!is.null(input.cls)) {
write.cls.2(class.v = class.labels2, phen = class.phen, filename = output.cls)
}
}
OPAM.match.projection.to.pathway <- function(
input.ds,
input.cls = NA,
results.dir,
normalize.score = F,
normalization.type = "zero.one",
pathway,
max.n = 10,
user.colors = NA,
decreasing.order = T,
sort.columns = F,
char.rescale = 1.25,
cmap.type = 3,
row.norm = T,
output.dataset = NA)
{
library(gtools)
library(verification)
library(ROCR)
library(MASS)
library(RColorBrewer)
library(heatmap.plus)
dataset <- MSIG.Gct2Frame(filename = input.ds) # Read gene expression dataset (GCT format)
m <- data.matrix(dataset$ds)
pathway.names <- dataset$row.names
pathway.descs <- dataset$descs
Ns <- length(m[1,])
dim(m)
sample.names <- dataset$names
n.pathways <- length(m[,1])
temp <- strsplit(input.ds, split="/") # Extract test file name
s <- length(temp[[1]])
test.file.name <- temp[[1]][s]
temp <- strsplit(test.file.name, split=".gct")
test.file.prefix <- temp[[1]][1]
# char.res <- 0.013 * n.pathways + 0.65
# normalize scores
if (normalize.score == T) {
if (normalization.type == "zero.one") {
for (i in 1:n.pathways) {
m[i,] <- (m[i,] - min(m[i,]))/(max(m[i,]) - min(m[i,]))
}
} else if (normalization.type == "z.score") {
for (i in 1:n.pathways) {
m[i,] <- (m[i,] - mean(m[i,]))/sd(m[i,])
}
} else if (normalization.type == "r.z.score") {
for (i in 1:n.pathways) {
m[i,] <- (m[i,] - median(m[i,]))/mad(m[i,])
}
}
}
loc <- match(pathway, pathway.names)
print(c("loc:", loc))
if (sort.columns == T) {
s.order <- order(m[loc,], decreasing = decreasing.order)
m2 <- m[, s.order]
sample.names2 <- sample.names[s.order]
} else {
m2 <- m
sample.names2 <- sample.names
}
correl <- cor(t(m2))[, loc]
m.order <- order(correl, decreasing=T)
correl2 <- correl[m.order]
m2 <- m2[m.order[1:max.n],]
pathway.names2 <- pathway.names[m.order]
pathway.descs2 <- signif(correl2, digits=3)
if (input.cls == "NA") {
cls.labels2 <- c(rep(0, 10), rep(1, length(sample.names2) - 10))
cls.phen2 <- c(" ")
colors.list <- c("white")
phen.names2 <- " "
} else {
CLS <- MSIG.ReadPhenFile.2(file = input.cls) # Read phenotype file (CLS format)
cls.labels <- CLS$class.v
cls.phen <- CLS$phen
cls.list <- CLS$class.list
if (!is.null(CLS$phen.names)) {
phen.names <- CLS$phen.names
} else {
phen.names <- " "
}
if (is.vector(cls.labels)) {
if (sort.columns == T) {
cls.labels2 <- cls.labels[s.order]
cls.list2 <- cls.list[s.order]
} else {
cls.labels2 <- cls.labels
cls.list2 <- cls.list
}
n.phen <- 1
} else {
if (sort.columns == T) {
cls.labels2 <- cls.labels[, s.order]
cls.list2 <- cls.list[, s.order]
} else {
cls.labels2 <- cls.labels
cls.list2 <- cls.list
}
n.phen <- length(cls.labels2[,1])
}
cls.phen2 <- list(NULL)
if (is.vector(cls.labels2)) {
classes <- unique(cls.list2)
cls.phen2 <- classes
cls.labels2 <- match(cls.list2, cls.phen2)
} else {
for (kk in 1:length(cls.list2[, 1])) {
classes <- unique(cls.list2[kk,])
cls.phen2[[kk]] <- classes
cls.labels2[kk,] <- match(cls.list2[kk,], cls.phen2[[kk]])
}
}
phen.names2 <- phen.names
if (!is.na(user.colors[1])) {
c.test <- user.colors
} else {
if (!is.null(CLS$col.phen)) {
c.test <- CLS$col.phen
} else {
c.test <- c(brewer.pal(n=7, name="Dark2"), brewer.pal(n=7, name="Set2"), brewer.pal(n=7, name="Set1"),
brewer.pal(n=8, name="Accent"), brewer.pal(n=9, name="Spectral"), brewer.pal(n=8, name="Set3"),
brewer.pal(n=8, name="BuGn"),
brewer.pal(n=7, name="Set2"), brewer.pal(n=7, name="Dark2"), brewer.pal(n=7, name="Set1"),
brewer.pal(n=8, name="Accent"), brewer.pal(n=10, name="Spectral"), brewer.pal(n=8, name="Set3"),
brewer.pal(n=8, name="BuGn"),
brewer.pal(n=7, name="Set2"), brewer.pal(n=7, name="Dark2"), brewer.pal(n=7, name="Set1"),
brewer.pal(n=8, name="Accent"), brewer.pal(n=10, name="Spectral"), brewer.pal(n=8, name="Set3"),
brewer.pal(n=8, name="BuGn"))
}
}
}
cls.phen.index <- unlist(cls.phen2)
colors.list <- c.test[1:length(cls.phen.index)]
filename <- paste(results.dir, test.file.prefix, ".SORT.PROJ.TO.", pathway, sep="")
pdf(file=paste(filename, ".pdf", sep=""), height = 8.5, width = 10.5)
MSIG.HeatMapPlot.7(V = m2, row.names = pathway.names2[1:max.n],
row.names2 = pathway.descs2[1:max.n], col.labels = cls.labels2,
col.classes = cls.phen2, phen.cmap = colors.list, phen.names = phen.names,
col.names = sample.names2, main = " ", xlab=" ", ylab=" ", row.norm = row.norm,
cmap.type = cmap.type, char.rescale = char.rescale, legend=T)
dev.off()
if (!is.na(output.dataset)) {
V.GCT <- m2
colnames(V.GCT) <- sample.names2
row.names(V.GCT) <- pathway.names2
write.gct(gct.data.frame = V.GCT, descs = pathway.descs2, filename =output.dataset)
}
}
MSIG.Define.Dataset.from.Table2 <- function(
input.gct,
table.txt,
output.gct,
output.txt = NULL, # optional version of table with overlap (GCT & TAB) samples
output.cls,
prefix_entries = F)
{
# Read input dataset
library(RColorBrewer)
dataset1 <- MSIG.Gct2Frame(filename = input.gct)
m <- data.matrix(dataset1$ds)
gene.names <- dataset1$row.names
gene.decs <- dataset1$descs
sample.names.gct <- dataset1$names
Ns <- length(sample.names.gct)
# Read Table
tab <- read.delim(table.txt, header=T, row.names = 1, sep="\t", skip=0, blank.lines.skip=T, comment.char="", as.is=T)
sample.names.tab <- row.names(tab)
phen.names <- names(tab)
overlap <- intersect(sample.names.tab, sample.names.gct)
print("sample names GCT")
print(sample.names.gct)
print("sample names TAB")
print(sample.names.tab)
locs.gct <- match(overlap, sample.names.gct)
print(match(sample.names.tab, sample.names.gct))
print(match(sample.names.gct, sample.names.tab))
locs.tab <- match(overlap, sample.names.tab)
print(locs.tab)
print(c("GCT matching set (", length(locs.gct), " samples):", sample.names.gct[locs.gct]))
print(c("TAB matching set (", length(overlap), " samples):", sample.names.tab[locs.tab]))
print(c("overlap set (", length(overlap), " samples):", overlap))
m2 <- m[, locs.gct]
sample.names.gct <- sample.names.gct[locs.gct]
sample.names.tab <- sample.names.tab[locs.tab]
if (!is.null(output.txt)) {
tab2 <- tab[locs.tab,]
sample.names.tab2 <- sample.names.tab[locs.tab]
col.names <- paste(colnames(tab2), collapse = "\t")
col.names <- paste("SAMPLE", col.names, sep= "\t")
write(noquote(col.names), file = output.txt, append = F, ncolumns = length(col.names))
write.table(tab2, file=output.txt, quote=F, col.names = F, row.names = T, append = T, sep="\t")
}
cls.table <- t(tab[locs.tab,])
if (prefix_entries == TRUE) {
for (i in 1:length(cls.table[,1])) {
# cls.table[i,] <- paste(row.names(cls.table)[i], cls.table[i,], sep=".")
cls.table[i,] <- paste(colnames(tab)[i], tab[,i], sep=".")
}
}
if (!is.null(output.gct)) {
V <- data.frame(m2)
names(V) <- sample.names.gct
row.names(V) <- gene.names
write.gct(gct.data.frame = V, descs = gene.decs, filename = output.gct)
}
class.phen <- unique(cls.table)
n <- length(class.phen)
l <- length(cls.table[1,])
col.list <- c(brewer.pal(n=7, name="Set2"), brewer.pal(n=7, name="Dark2"), brewer.pal(n=7, name="Set1"),
brewer.pal(n=8, name="Accent"), brewer.pal(n=10, name="Spectral"), brewer.pal(n=8, name="Set3"),
brewer.pal(n=8, name="BuGn"),
brewer.pal(n=7, name="Set2"), brewer.pal(n=7, name="Dark2"), brewer.pal(n=7, name="Set1"),
brewer.pal(n=8, name="Accent"), brewer.pal(n=10, name="Spectral"), brewer.pal(n=8, name="Set3"),
brewer.pal(n=8, name="BuGn"),
brewer.pal(n=7, name="Set2"), brewer.pal(n=7, name="Dark2"), brewer.pal(n=7, name="Set1"),
brewer.pal(n=8, name="Accent"), brewer.pal(n=10, name="Spectral"), brewer.pal(n=8, name="Set3"),
brewer.pal(n=8, name="BuGn"))
num <- 0
class.order.list <- NULL
for (i in 1:length(cls.table[,1])) {
num <- num + length(unique(cls.table[i,]))
class.order.list <- c(class.order.list, unique(cls.table[i,]))
}
phen.names.string <- paste("phen.names:", paste(phen.names, collapse=" "), sep=" ")
sig.col <- col.list[1:num]
col.phen.string <- paste("col.phen:", paste(sig.col, collapse=" "), sep=" ")
cat(paste(l, num, length(cls.table[, 1]), phen.names.string, col.phen.string, sep=" "), "\n",
file = output.cls, append = FALSE, sep = "")
cat("# ", paste(class.order.list, collapse=" "), "\n", file = output.cls, append = TRUE, sep = "")
for (i in 1:length(cls.table[,1])) {
cat(paste(cls.table[i,], collapse=" "), "\n", file = output.cls, append = TRUE, sep = "")
}
}
MSIG.Define.Dataset.from.Table.2 <- function(
input.gct,
table.txt,
output.gct,
output.cls,
prefix_entries = F)
{
# Read input dataset
library(RColorBrewer)
dataset1 <- MSIG.Gct2Frame(filename = input.gct)
m <- data.matrix(dataset1$ds)
gene.names <- dataset1$row.names
gene.decs <- dataset1$descs
sample.names.gct <- dataset1$names
Ns <- length(sample.names.gct)
# browser()
# Read Table
tab <- read.delim(table.txt, header=T, row.names = 1, sep="\t", skip=0, blank.lines.skip=T, comment.char="", as.is=T)
sample.names.tab <- row.names(tab)
phen.names <- names(tab)
overlap <- intersect(sample.names.tab, sample.names.gct)
if(length(overlap)==0){ return(NULL)}
# print(overlap)
# print("sample names GCT")
# print(sample.names.gct)
# print("sample names TAB")
# print(sample.names.tab)
if(length(overlap)==0){ return(NULL)}
locs.gct <- match(overlap, sample.names.gct)
print(match(sample.names.tab, sample.names.gct))
print(match(sample.names.gct, sample.names.tab))
locs.tab <- match(overlap, sample.names.tab)
# print(locs.tab)
# print(c("GCT matching set (", length(locs.gct), " samples):", sample.names.gct[locs.gct]))
# print(c("TAB matching set (", length(overlap), " samples):", sample.names.tab[locs.tab]))
# print(c("overlap set (", length(overlap), " samples):", overlap))
m2 <- m[, locs.gct]
sample.names.gct <- sample.names.gct[locs.gct]
sample.names.tab <- sample.names.tab[locs.tab]
cls.table <- t(tab[locs.tab,])
if (prefix_entries == TRUE) {
for (i in 1:length(cls.table[,1])) {
# cls.table[i,] <- paste(row.names(cls.table)[i], cls.table[i,], sep=".")
cls.table[i,] <- paste(colnames(tab)[i], tab[,i], sep=".")
}
}
if (!is.null(output.gct)) {
V <- data.frame(m2)
names(V) <- sample.names.gct
row.names(V) <- gene.names
write.gct(gct.data.frame = V, descs = gene.decs, filename = output.gct)
}
class.phen <- unique(cls.table)
n <- length(class.phen)
l <- length(cls.table[1,])
col.list <- c(brewer.pal(n=7, name="Set2"), brewer.pal(n=7, name="Dark2"), brewer.pal(n=7, name="Set1"),
brewer.pal(n=8, name="Accent"), brewer.pal(n=10, name="Spectral"), brewer.pal(n=8, name="Set3"),
brewer.pal(n=8, name="BuGn"),
brewer.pal(n=7, name="Set2"), brewer.pal(n=7, name="Dark2"), brewer.pal(n=7, name="Set1"),
brewer.pal(n=8, name="Accent"), brewer.pal(n=10, name="Spectral"), brewer.pal(n=8, name="Set3"),
brewer.pal(n=8, name="BuGn"),
brewer.pal(n=7, name="Set2"), brewer.pal(n=7, name="Dark2"), brewer.pal(n=7, name="Set1"),
brewer.pal(n=8, name="Accent"), brewer.pal(n=10, name="Spectral"), brewer.pal(n=8, name="Set3"),
brewer.pal(n=8, name="BuGn"))
# num <- 0
# class.order.list <- NULL
class.order.list = apply(cls.table, 1, unique)
num = sum(unlist(lapply(class.order.list, length)))
class.order.list = unlist(class.order.list)
# for (i in 1:length(cls.table[,1])) {
## num <- num + length(unique(cls.table[i,]))
# class.order.list <- c(class.order.list, unique(cls.table[i,]))
# }
phen.names.string <- paste("phen.names:", paste(phen.names, collapse=" "), sep=" ")
sig.col <- col.list[1:num]
col.phen.string <- paste("col.phen:", paste(sig.col, collapse=" "), sep=" ")
cat(paste(l, num, length(cls.table[, 1]), phen.names.string, col.phen.string, sep=" "), "\n",
file = output.cls, append = FALSE, sep = "")
cat("# ", paste(class.order.list, collapse=" "), "\n", file = output.cls, append = TRUE, sep = "")
for (i in 1:length(cls.table[,1])) {
cat(paste(cls.table[i,], collapse=" "), "\n", file = output.cls, append = TRUE, sep = "")
}
}
rec.area <- function(
obs,
pred,
metric = "absolute.deviation", # Either "squared.error" or "absolute.deviation"
# null.distribution = "gaussian", # Either "gaussian" [null.model = mean(obs)] or "laplacian" [null.model = median(obs)]
interval = 0.01
){
# browser()
error.windows = seq(0, 1, by=interval)
n.errors = length(error.windows)
intervals = rep( interval, n.errors )
n.obs = length(obs)
n.pred = length(pred)
if( n.obs != n.pred ){ stop( "The number of observations does not equal the number of predictions." ) }
# if( null.distribution == "gaussian" ){ null.model = mean(obs)
# } else if( null.distribution == "laplacian" ){ null.model = median(obs) }
if( metric == "squared.error" ){
difference = (obs-pred)^2
accuracy = unlist(lapply(error.windows, FUN=squared.error, difference, n.obs))
} else if( metric == "absolute.deviation" ){
difference = abs(obs-pred)
accuracy = unlist(lapply(error.windows, FUN=absolute.deviation, difference, n.obs))
}
# plot(accuracy, type="l"); par(new=TRUE); plot(error.windows, type="l")
triangle.heights = accuracy - c(0, accuracy[1:(n.errors-1)])
triangles = triangle.heights*intervals/2
rectangle.heights = c(0, accuracy[1:(n.errors-1)])
rectangles = rectangle.heights*intervals
# A = (cumsum(accuracy)*intervals)[n.errors]
A = sum( rectangles + triangles)
# Calculate p-value using Kolmogorov-Smirnov Test
# Dn = max(accuracy-error.windows)
# i = 1:100
# x = sqrt( n.obs*n.pred/(n.obs+n.pred) )*Dn
# p.value = 1 - (sqrt(2*pi)/x)*sum( exp(-(2*i - 1)^2 * pi^2/ (8*x^2)) )
# browser()
# pred.scrambled = sample(pred)
# difference.scrambled = abs(pred.scrambled - obs)
# accuracy.scrambled = unlist(lapply(error.windows, FUN=squared.error, difference.scrambled, n.obs))
# triangle.heights = accuracy.scrambled - c(0, accuracy.scrambled[1:(n.errors-1)])
# triangles = triangle.heights*intervals/2
# rectangle.heights = c(0, accuracy.scrambled[1:(n.errors-1)])
# rectangles = rectangle.heights*intervals
# A.scrambled = sum( rectangles + triangles)
# T2.scrambled = .5*(sum((accuracy.scrambled-error.windows)^2))
# p.value.scrambled = cvmts.pval(T2.scrambled, n.errors, n.errors)
# Calculate p-value using Cramer-Von-Mises Criterion
T2 = .25*(sum((accuracy-error.windows)^2)) # accuracy-error.windows = integral difference between null model and REC
# browser()
p.value = cvmts.pval(T2, n.errors, n.errors)
# T2.norm = (T2- min(T2))/(max(T2)-min(T2))
# wilcox.test(T2.norm, error.)
#browser()
# U = 2*n.errors^2*(T2 + (4*n.errors^2-1)/12*n.errors)
# p.value.u = cvmts.pval(U, n.errors, n.errors)
# print('calculating REC...')
# browser()
# rec.list.ccle[master.ind] <<- A
# p.value.list.ccle[master.ind] <<- p.value
# T2.list.ccle[master.ind] <<- T2
#
#
#
# rec.list.scrambled[master.ind] <<- A.scrambled
# p.value.list.scrambled[master.ind] <<- p.value.scrambled
# T2.list.scrambled[master.ind] <<- T2.scrambled
# master.ind <<- master.ind+1
# browser()
# stat = ks.test(accuracy, error.windows, exact=TRUE)
return( list(A = A, p.value = p.value, T2=T2) )
}
squared.error <- function( error, squared.difference, n ){
return( length(which( squared.difference <= error ))/n )
}
absolute.deviation <- function( error, absolute.difference, n ) {
return( length(which( absolute.difference <= error ))/n )
}
mutual.inf <- function(x, y, n.grid=100) {
kde2d.xy <- kde2d(x, y, n = n.grid, h = c(width.SJ(x, method="dpi"), width.SJ(y, method="dpi")))
# X <- kde2d.xy$x
# Y <- kde2d.xy$y
PXY <- kde2d.xy$z/sum(kde2d.xy$z)
PX <- apply(PXY, MARGIN=1, sum)
PX <- PX/sum(PX)
PX <- matrix(PX, nrow=n.grid, ncol=n.grid)
PY <- apply(PXY, MARGIN=2, sum)
PY <- PY/sum(PY)
PY <- matrix(PY, byrow = TRUE, nrow=n.grid, ncol=n.grid)
MI <- sum(PXY * log2(PXY/(PX*PY)))
# browser()
return(MI)
}
mutual.inf.2 <- function(x, y, n.grid=100, normalize.by ="HXY", # Whether to normalize by HXY, HX, or HY
pos.and.neg = T
) {
# x and y can be binary or continuous
# If there is not sufficient variation in x and y,
# will take the standard deviation as the bandwidth
# (IQR finds the inter-quartile range of the data vector)
if( length(unique(x)) == 1 || length(unique(y)) == 1 ){
# browser()
return( NA )
}
# bandwidth.x = ifelse(IQR(x) == 0, bcv(x, n.grid), width.SJ(x, method="dpi"))
# bandwidth.y = ifelse(IQR(y) == 0, bcv(y, n.grid), width.SJ(y, method="dpi"))
# print("---")
# print(x)
# print(y)
kde2d.xy <- kde2d(x, y, n = n.grid, h = c(suppressWarnings(bcv(x)), suppressWarnings(bcv(y))) )
# X <- kde2d.xy$x
# Y <- kde2d.xy$y
# Z = kde2d.xy$z
PXY <- kde2d.xy$z/sum(kde2d.xy$z)
PX <- apply(PXY, MARGIN=1, sum)
PX <- PX/sum(PX)
HX = -sum(PX * log2(PX))
PX <- matrix(PX, nrow=n.grid, ncol=n.grid)
PY <- apply(PXY, MARGIN=2, sum)
PY <- PY/sum(PY)
HY = -sum( PY * log2(PY))
PY <- matrix(PY, byrow = TRUE, nrow=n.grid, ncol=n.grid)
# browser()
# MIXY = PXY * log2(PXY/(PX*PY))
#
# if( pos.and.neg ){
# q1 = MIXY[1:(n.grid/2), 1:(n.grid/2)]
# q2 = MIXY[1:(n.grid/2), (n.grid/2 + 1):n.grid]
# q3 = MIXY[(n.grid/2+1):n.grid, 1:(n.grid/2)]
# q4 = MIXY[(n.grid/2+1):n.grid, (n.grid/2+1):n.grid]
#
# # q's divide MIXY into quarters. If the sum of q2 and q3 is greater than the sum of q1 and q4, then
# # x and y are negatively correlated.
# # on heatmap: q2 q4
# # q1 q3
#
## Ignore NaN's that are a result of underflow (experimentally derived)
# MI <- ifelse( sum(q1+q4, na.rm=TRUE) < sum(q2+q3, na.rm=TRUE),
# -sum(MIXY, na.rm=TRUE), sum(MIXY, na.rm=TRUE))
#} else{ MI = sum(MIXY, na.rm=TRUE)}
# MI <- ifelse( sum(q1+q4, na.rm=TRUE) < sum(q2+q3, na.rm=TRUE), -sum(q2+q3-q1-q4, na.rm=TRUE), sum(q1+q4-q2-q3, na.rm=TRUE))
HXY <- - sum(PXY * log2(PXY), na.rm=TRUE)
# HX = -sum( PX * log2(PX) )
# HY = -sum( PY * log2(PY) )
# MI.norm = (HX+HY)/HXY
#browser()
# normalization.factor = 1 #ifelse(normalize.by=="HXY", HXY, ifelse(normalize.by=="HX", HX, HY))
## browser()
MI.norm = ifelse(pos.and.neg, sign(cor(x, y)), 1) * ((HX + HY)/HXY - 1) #MI/normalization.factor
# browser()
return( MI.norm )#list(MI=MI, HXY=HXY))
}
#mutual.inf.2.single.gene.target <- function( signature, gene.target){
# return(mutual.inf.2(gene.target, signature))
#}
mutual.inf.2.multiple.gene.targets <- function( signature, gene.targets ){
return(apply(gene.targets,
MARGIN=1, FUN=mutual.inf.2,
signature ) )
}
mutual.inf.3 <- function(gene.target, signature.matrix, signature.indices, n.grid=100, gene.target.name = "",
n.randomizations = 100, tissue = "NA") {
## How this is different from mutual.inf.2:
## x is a target pathway and y is a matrix
## calculates a false discovery rate for the mutual information
## of the pathway with a randomized version of x
## y.ind indicates the row index of the target gene set / signature
# x and the elements of y can be binary or continuous
# If there is not sufficient variation in x and y,
# will take the standard deviation as the bandwidth
# (IQR finds the inter-quartile range of the data vector)
# "signature.indices" is the indices of signatures that you are interested in using.
# This code is used in comparing the chosen signatures to SUMMARY, the mutation
# status of all the cell lines.
# browser()
n.signatures = length(signature.matrix[,1])
MI.vector = vector(length=n.signatures, mode="double")
# MI.vector.rand = vector(length=length(signature.matrix[,1]), mode="double")
gene.target.rand = t(replicate(n.randomizations, sample(gene.target)))
MI.matrix.rand = matrix(ncol = n.signatures, nrow = n.randomizations)
# browser()
# for( i in 1:length(signature.matrix[,1]) ){
MI.vector = apply(signature.matrix, MARGIN=1, FUN=mutual.inf.2, gene.target)
# browser()
# for( j in 1:n.randomizations ){
MI.matrix.rand = apply(signature.matrix,
MARGIN=1, FUN=mutual.inf.2.multiple.gene.targets,
gene.target.rand)
#mutual.inf.2(gene.target.rand[j,], signature.matrix[i,])$MI
# }
# browser()
# MI.vector.rand[i] = mean(temp.MI.rand)
# }
# x.rand = sample(x)
# print("Make plot of densities!! And save the output!")
# browser()
quartz()
# if( gene.target.name =="SUMMARY"){
temp <- density(MI.vector, adjust=1, n = 512, from=min(MI.vector), to=max(MI.vector))
x <- temp$x
y <- temp$y/sum(temp$y)
temp.rand <- density(MI.matrix.rand, adjust=1, n = 512, from=min(MI.matrix.rand), to=max(MI.matrix.rand))
x.rand <- temp.rand$x
y.rand <- temp.rand$y/sum(temp.rand$y)
# pdf(file=paste(tissue, gene.target.name, n.randomizations, "pdf", sep=".") )
# quartz(file=paste(tissue, gene.target.name, n.randomizations, "pdf", sep="."))
plot(x.rand, y.rand, type="l", lwd=2, xlab="MI", #xlim = c(max(min(x), 10^-5), max(x)), ylim = range(c(y, y.rand)),
col="red",
ylab = "P(MI)", main=paste(tissue, gene.target.name, n.randomizations, sep=" "))
points(x, y, type="l", lwd=2, col = "black")
legend("topright", c("actual gene target vs all gene sets", "randomized gene target vs all gene sets"), col=c("black", "red"), lwd=c(2,2))
browser()
# dev.off()
# }
MI = MI.vector[signature.indices]
FDR = vector(length=length(MI))
# browser()
ranked.MI.vector = rank(-MI.vector) # take negative so rank 1 corresponds to highest value
ranked.MI.matrix.rand = rank(MI.matrix.rand)
median.MI.rand = median(MI.matrix.rand)
if( gene.target.name[1] == "EGFR_AMP" #|| gene.target.name =="TP53"
){ browser() }
for( i in 1:length(MI)){
if( MI[i] > median.MI.rand ){
rank.observed = ranked.MI.vector[signature.indices[i]]
rank.randomized = sum(MI[i] < MI.matrix.rand)
} else{
rank.observed = n.signatures - ranked.MI.vector[signature.indices[i]] + 1
rank.randomized = sum(MI[i] > MI.matrix.rand)
# browser()
}
FDR[i] = (rank.randomized/n.randomizations)/rank.observed
# if( MI[i] <= median.MI.rand ){ browser() }
}
# MI.rand.ind = which(x.rand >= MI)
# MI.integral = sum(x[MI.ind]*y[MI.ind], na.rm=T)
# MI.rand.integral = sum(x.rand[MI.ind]*y.rand[MI.ind], na.rm=T)
# FDR = MI.rand.integral/MI.integral
# browser()
return(list(MI=MI, FDR=FDR))
}
mutual.inf.3.v2 <- function(target.vector, comparison.matrix, n.grid=100, target.vector.name = "",
tissue = "NA", normalize.by = "HXY", pos.and.neg=T) {
## How this is different from mutual.inf.2:
## x is a target pathway and y is a matrix
## calculates a false discovery rate for the mutual information
## of the pathway with a randomized version of x
## y.ind indicates the row index of the target gene set / signature
# x and the elements of y can be binary or continuous
# If there is not sufficient variation in x and y,
# will take the standard deviation as the bandwidth
# (IQR finds the inter-quartile range of the data vector)
# "signature.indices" is the indices of signatures that you are interested in using.
# This code is used in comparing the chosen signatures to SUMMARY, the mutation
# status of all the cell lines.
n.signatures = length(comparison.matrix[,1])
MI.vector = vector(length=n.signatures, mode="double")
MI.ref = mutual.inf.2(target.vector, target.vector, normalize.by=normalize.by)
print(paste("MI.ref =", MI.ref))
MI.vector = apply(comparison.matrix, MARGIN=1, FUN=mutual.inf.2, target.vector, normalize.by=normalize.by,
pos.and.neg=pos.and.neg)
MI = MI.vector/MI.ref
FDR = rep(1, length=length(MI))
return(list(MI=MI, FDR=FDR))
}
mutual.inf.4 <- function(gene.targets, signature.matrix, signature.index, n.grid=100, gene.target.name = "",
n.randomizations = 100) {
## How this is different from mutual.inf.2:
## x is a target pathway and y is a matrix
## calculates a false discovery rate for the mutual information
## of the pathway with a randomized version of x
## y.ind indicates the row index of the target gene set / signature
# x and the elements of y can be binary or continuous
# If there is not sufficient variation in x and y,
# will take the standard deviation as the bandwidth
# (IQR finds the inter-quartile range of the data vector)
# "signature.indices" is the index of the "winning" signature that is used to compare to
# all the genomic aberrations.
# browser()
if( is.vector(gene.targets)){
gene.targets = t(as.matrix(gene.targets))
}
n.gene.targets = length(gene.targets[,1])
n.signatures = length(signature.matrix[,1])
n.samples = length(gene.targets[1,])
MI.matrix = matrix(ncol = n.signatures, nrow = n.gene.targets)
MI.matrix.rand = matrix(ncol = n.signatures, nrow = n.randomizations)
gene.target.rand = t(replicate(n.randomizations, sample(gene.targets[1,])))
# temp.MI.rand = vector(length=n.iter)
# browser()
# for( i in 1:length(signature.matrix[,1]) ){
# browser()
MI.matrix = apply(signature.matrix, MARGIN=1,
FUN=mutual.inf.2.multiple.gene.targets,
gene.targets)
MI.matrix.rand = apply(signature.matrix,
MARGIN=1, FUN=mutual.inf.2.multiple.gene.targets,
gene.target.rand)
# MI.matrix.rand[i,] = apply(gene.target.rand,
# MARGIN=1, FUN=mutual.inf.2,
# signature.matrix[i,])
# browser()
# MI.vector.rand[i] = mean(temp.MI.rand)
# }
# x.rand = sample(x)
# browser()
quartz()
temp <- density(MI.matrix, adjust=1, n = 512, from=min(MI.matrix), to=max(MI.matrix))
x <- temp$x
y <- temp$y/sum(temp$y)
temp.rand <- density(MI.matrix.rand, adjust=1, n = 512, from=min(MI.matrix), to=max(MI.matrix))
x.rand <- temp.rand$x
y.rand <- temp.rand$y/sum(temp.rand$y)
# pdf(file=paste(tissue, paste(gene.target.name, collapse="-"), rownames(signature.matrix)[signature.index], n.randomizations, "pdf", sep="."))
# quartz(file=paste(tissue, paste(gene.target.name, collapse="-"), rownames(signature.matrix)[signature.index], n.randomizations, "pdf", sep="."))
#if( gene.gene.target.name =="SUMMARY"){
plot(x.rand, y.rand, type="l", lwd=2, xlab="MI", #xlim = c(max(min(x), 10^-5), max(x)), ylim = range(c(y, y.rand)),
col="red",
ylab = "P(MI)", main=paste(tissue, paste(gene.target.name, collapse=" "), rownames(signature.matrix)[signature.index], n.randomizations, sep=" "))
points(x, y, type="l", lwd=2, col = "black")
legend("topright", c("actual gene target(s) vs all gene sets", "randomized gene target vs all gene sets"), col=c("black", "red"), lwd=c(2,2))
# if( gene.target.name[1] =="KRAS_AMP") {browser()}
browser()
# dev.off()
#}
#
# browser()
# MI = ifelse(is.matrix(MI.matrix), MI.matrix[,signature.index], MI.matrix[signature.index])
# ranked.MI.matrix = ifelse( is.matrix(MI.matrix), apply(-MI.matrix, MARGIN=1, rank), rank(-MI.matrix))
# MI.vector = ifelse(is.matrix(MI.matrix))
if(is.matrix(MI.matrix)){
MI = MI.matrix[,signature.index]
ranked.MI.matrix = apply(-MI.matrix, MARGIN=1, rank)
FDR = vector(length=n.gene.targets, mode="numeric")
# browser()
for( i in 1:n.gene.targets){
if( MI[i] > median(MI.matrix.rand) ){
rank.observed = ranked.MI.matrix[signature.index, i]
rank.randomized = sum(MI[i] < MI.matrix.rand)
} else{
# browser()
rank.observed = n.signatures - ranked.MI.matrix[signature.index, i]
rank.randomized = sum(MI[i] > MI.matrix.rand)
}
FDR[i] = (rank.randomized/n.randomizations)/rank.observed
# browser()
}
} else{
MI = MI.matrix[signature.index]
ranked.MI.matrix = rank(-MI.matrix)
if( MI > median(MI.matrix.rand)){
rank.observed = ranked.MI.matrix[signature.index]
rank.randomized = sum(MI <= MI.matrix.rand)
} else{
rank.observed = n.signatures - ranked.MI.matrix[signature.index]
rank.randomized = sum(MI >= MI.matrix.rand)
}
FDR = (rank.randomized/n.randomizations)/rank.observed
}
# if( gene.target.name == "EGFR_AMP" #|| gene.target.name =="TP53"
# ){ browser() }
# if( MI > 0 ){
# MI.ind = which(x >= MI)
# } else{ MI.ind = which( x <= MI) }
# MI.rand.ind = which(x.rand >= MI)
# MI.integral = sum(x[MI.ind]*y[MI.ind], na.rm=T)
# MI.rand.integral = sum(x.rand[MI.ind]*y.rand[MI.ind], na.rm=T)
# FDR = MI.rand.integral/MI.integral
# browser()
return(list(MI=MI, FDR=FDR))
}
mutual.inf.4.v2 <- function(gene.targets, signature.matrix, signature.index, n.grid=100, gene.target.name = "",
n.randomizations = 100) {
## How this is different from mutual.inf.2:
## x is a target pathway and y is a matrix
## calculates a false discovery rate for the mutual information
## of the pathway with a randomized version of x
## y.ind indicates the row index of the target gene set / signature
# x and the elements of y can be binary or continuous
# If there is not sufficient variation in x and y,
# will take the standard deviation as the bandwidth
# (IQR finds the inter-quartile range of the data vector)
# "signature.indices" is the index of the "winning" signature that is used to compare to
# all the genomic aberrations.
# browser()
if( is.vector(gene.targets)){
gene.targets = t(as.matrix(gene.targets))
}
n.gene.targets = length(gene.targets[,1])
n.signatures = length(signature.matrix[,1])
n.samples = length(gene.targets[1,])
MI.matrix = matrix(ncol = n.signatures, nrow = n.gene.targets)
if( n.randomizations > 0 ){
MI.matrix.rand = matrix(ncol = n.signatures, nrow = n.randomizations)
gene.target.rand = t(replicate(n.randomizations, sample(gene.targets[1,])))
MI.matrix.rand = apply(signature.matrix,
MARGIN=1, FUN=mutual.inf.2.multiple.gene.targets,
gene.target.rand)
MI.matrix.rand = normalize(MI.matrix.rand)
}
# temp.MI.rand = vector(length=n.iter)
# browser()
# for( i in 1:length(signature.matrix[,1]) ){
# browser()
MI.matrix = apply(signature.matrix, MARGIN=1,
FUN=mutual.inf.2.multiple.gene.targets,
gene.targets)
MI.matrix = normalize(MI.matrix)
# MI.matrix.rand[i,] = apply(gene.target.rand,
# MARGIN=1, FUN=mutual.inf.2,
# signature.matrix[i,])
# browser()
# MI.vector.rand[i] = mean(temp.MI.rand)
# }
# x.rand = sample(x)
# browser()
# quartz()
# temp <- density(MI.matrix, adjust=1, n = 512, from=min(MI.matrix), to=max(MI.matrix))
# x <- temp$x
# y <- temp$y/sum(temp$y)
#
# temp.rand <- density(MI.matrix.rand, adjust=1, n = 512, from=min(MI.matrix), to=max(MI.matrix))
# x.rand <- temp.rand$x
# y.rand <- temp.rand$y/sum(temp.rand$y)
#
## pdf(file=paste(tissue, paste(gene.target.name, collapse="-"), rownames(signature.matrix)[signature.index], n.randomizations, "pdf", sep="."))
## quartz(file=paste(tissue, paste(gene.target.name, collapse="-"), rownames(signature.matrix)[signature.index], n.randomizations, "pdf", sep="."))
##if( gene.gene.target.name =="SUMMARY"){
# plot(x.rand, y.rand, type="l", lwd=2, xlab="MI", #xlim = c(max(min(x), 10^-5), max(x)), ylim = range(c(y, y.rand)),
# col="red",
# ylab = "P(MI)", main=paste(tissue, paste(gene.target.name, collapse=" "), rownames(signature.matrix)[signature.index], n.randomizations, sep=" "))
# points(x, y, type="l", lwd=2, col = "black")
# legend("topright", c("actual gene target(s) vs all gene sets", "randomized gene target vs all gene sets"), col=c("black", "red"), lwd=c(2,2))
## if( gene.target.name[1] =="KRAS_AMP") {browser()}
# browser()
# dev.off()
#}
#
# browser()
# MI = ifelse(is.matrix(MI.matrix), MI.matrix[,signature.index], MI.matrix[signature.index])
# ranked.MI.matrix = ifelse( is.matrix(MI.matrix), apply(-MI.matrix, MARGIN=1, rank), rank(-MI.matrix))
# MI.vector = ifelse(is.matrix(MI.matrix))
# browser()
#MI.ref = mutual.inf.2( signature.matrix )
if(is.matrix(MI.matrix)){
MI = MI.matrix[,signature.index]
if( n.randomizations > 0 ){
ranked.MI.matrix = apply(-MI.matrix, MARGIN=1, rank)
FDR = vector(length=n.gene.targets, mode="numeric")
# browser()
for( i in 1:n.gene.targets){
if( MI[i] > median(MI.matrix.rand) ){
rank.observed = ranked.MI.matrix[signature.index, i]
rank.randomized = sum(MI[i] < MI.matrix.rand)
} else{
# browser()
rank.observed = n.signatures - ranked.MI.matrix[signature.index, i]
rank.randomized = sum(MI[i] > MI.matrix.rand)
}
FDR[i] = (rank.randomized/n.randomizations)/rank.observed
# browser()
}
} else{ FDR = rep(1, length=n.gene.targets) }
} else{
MI = MI.matrix[signature.index]
if( n.randomizations > 0 ){
ranked.MI.matrix = rank(-MI.matrix)
if( MI > median(MI.matrix.rand)){
rank.observed = ranked.MI.matrix[signature.index]
rank.randomized = sum(MI <= MI.matrix.rand)
} else{
rank.observed = n.signatures - ranked.MI.matrix[signature.index]
rank.randomized = sum(MI >= MI.matrix.rand)
}
FDR = (rank.randomized/n.randomizations)/rank.observed
} else{ FDR = rep(1, length=n.gene.targets) }
}
# if( gene.target.name == "EGFR_AMP" #|| gene.target.name =="TP53"
# ){ browser() }
# if( MI > 0 ){
# MI.ind = which(x >= MI)
# } else{ MI.ind = which( x <= MI) }
# MI.rand.ind = which(x.rand >= MI)
# MI.integral = sum(x[MI.ind]*y[MI.ind], na.rm=T)
# MI.rand.integral = sum(x.rand[MI.ind]*y.rand[MI.ind], na.rm=T)
# FDR = MI.rand.integral/MI.integral
# browser()
return(list(MI=MI, FDR=FDR))
}
mise <- function( x ) {
n.x = length(x)
r = seq(2,10)
f = vector(length=n.x, mode=mode(x))
for( i in 1:n.x ){
f = f + (x - x[i])^2
}
expected.value = sum(f)/n.x
}
amise <- function( v ){
# Reference:
# "Very fast optimal bandwith selection for univariate kernel density estimation"
# Vikas Chandrakant Raykar and Ramani Duraiswami
# [CS-TR-4774/UMIACS-TR-2005-73] June 28, 2006
H4 <- function( x ){ x^4 - 6*x^2 + 3 }
H6 <- function( x ){ x^6 - 15*x^4 + 45*x^2 - 15 }
N = length(v)
# Step 1 on page 11 of reference
sigma = mean(v)
# Step 2 on p. 11
Phi6 = sigma^(-7)*(-15/(16*sqrt(pi)))
Phi8 = sigma^(-9)*(-105/(32*sqrt(pi)))
# Step 3 on p. 11
g1 = ( -6/(sqrt(2*pi) * Phi6 * N))^(1/7)
g2 = ( 30/(sqrt(2*pi) * Phi8 * N))^(1/9)
# Make a matrix Z where Z(i,j) = x_i - x_j
Z = matrix(v, ncol = N, nrow = N, byrow=TRUE) - matrix(v, ncol = N, nrow = N, byrow=FALSE)
Phi4 <- function( g ) 1/(N*(N-1)*sqrt(2*pi)*g^5) * sum( H4(Z/g) * exp( -(Z^2)/(2*g^2)) )
Phi4.g1 = Phi4(g1)
Phi6.g2 = 1/(N*(N-1)*sqrt(2*pi)*g1^5) * sum( H4(Z/g1) * exp( -(Z^2)/(2*g1^2)) )
# Step 4
Y <- function( h ){
( (-6*sqrt(2)*Phi4.g1)/Phi6.g2)^(1/7)*h^(5/7)
}
fxn <- function( h ){
h - ( 1/ (sqrt(2) * Phi4(Y(h)) * N))^(1/5)
}
newtonraphson(fxn, mean(v))
}
#parzen.window <- function(z, h){
# Sigma = cov(z,z)
#
#
#}
write.cls.with.locs <- function( output.cls,
cls.labels,
phen.names){
class.order.list = apply(cls.labels, 1, unique)
num = sum(unlist(lapply(class.order.list, length)))
class.order.list = unlist(class.order.list)
class.phen <- unique(cls.labels)
n <- length(class.phen)
l <- length(cls.list[1,])
phen.names.string <- paste("phen.names:", paste(phen.names, collapse=" "), "col.names:", sep=" ")
cat(paste(l, num, length(cls.list[, 1]), phen.names.string, sep=" "), "\n",
file = output.cls, append = FALSE, sep = "")
cat("# ", paste(class.order.list, collapse=" "), "\n", file = output.cls, append = TRUE, sep = "")
for (i in 1:length(cls.list[,1])) {
cat(paste(cls.list[i,], collapse=" "), "\n", file = output.cls, append = TRUE, sep = "")
}
}
normalize <- function( v ){
(v - min(v))/(max(v) - min(v))
}
mutual.inf.P <- function(x, y, n.grid=100) {
# for definitions of mutual information and the universal metric (NMI) see the
# definition of "Mutual Information" in wikipedia and Thomas and Cover's book
# kde2d.xy <- kde2d(x, y, n = n.grid, h = c(width.SJ(x, method="dpi"), width.SJ(y, method="dpi")))
kde2d.xy <- kde2d(x, y, n = n.grid, h = c(bcv(x), bcv(y)))
X <- kde2d.xy$x
Y <- kde2d.xy$y
# PXY <- kde2d.xy$z/sum(kde2d.xy$z)
PXY <- kde2d.xy$z/sum(kde2d.xy$z) + .Machine$double.eps
# PX <- apply(PXY, MARGIN=1, sum)
PX <- rowSums(PXY)
PX <- PX/sum(PX)
HX <- -sum(PX * log2(PX))
PX <- matrix(PX, nrow=n.grid, ncol=n.grid)
# PY <- apply(PXY, MARGIN=2, sum)
PY <- colSums(PXY)
PY <- PY/sum(PY)
HY <- -sum(PY * log2(PY))
PY <- matrix(PY, byrow = TRUE, nrow=n.grid, ncol=n.grid)
MI <- sum(PXY * log2(PXY/(PX*PY)))
MI
HXY <- - sum(PXY * log2(PXY))
NMI <- sign(cor(x, y)) * ((HX + HY)/HXY - 1) # use peason correlation the get the sign (directionality)
return(list(MI=MI, HXY=HXY, HX=HX, HY=HY, NMI=NMI))
}
OPAM.Evaluate.Results <- function(
input.ds,
input.cls,
phenotype = NULL,
target.class = NULL,
target.type = "discrete",
output.txt,
output.pdf) {
pdf(file=output.pdf, height=8.5, width=11)
dataset <- MSIG.Gct2Frame(filename = input.ds) # Read gene expression dataset (GCT format)
m <- data.matrix(dataset$ds)
model.names <- dataset$row.names
model.descs <- dataset$descs
Ns <- length(m[1,])
for (i in 1:length(m[,1])) {
if (sd(m[i,]) == 0) {
val <- m[i, 1]
m[i,] <- m[i,] + runif(n=Ns, min= val - 0.001, max=val + 0.001) # add small noise to flat profiles
}
}
dim(m)
sample.names <- dataset$names
CLS <- MSIG.ReadPhenFile.2(file = input.cls) # Read phenotype file (CLS format)
cls.labels <- CLS$class.v
cls.phen <- CLS$phen
cls.list <- CLS$class.list
library(verification)
if (is.null(phenotype)) {
phen.loc <- 1
} else {
phen.loc <- match(phenotype, CLS$phen.names)
}
if (is.vector(CLS$class.list)) {
target.vec <- CLS$class.list
} else {
target.vec <- CLS$class.list[phen.loc,]
}
if (target.type == "continuous") {
target <- target.vec
} else if (target.type == "discrete") {
target <- ifelse(target.vec == target.class, 1, 0)
}
ind <- order(target)
target <- target[ind]
target.vec <- target.vec[ind]
m <- m[, ind]
sample.names[ind]
class.v <- CLS$class.v
if (is.vector(class.v)) {
class.v <- class.v[ind]
} else {
class.v <- class.v[, ind]
}
annot <- MI <- AUC <- AUC.pval <- t.stat <- t.pval <- vector(length=dim(m)[1], mode="numeric")
NMI.ref <- mutual.inf.P(x = target, y = target, n.grid=100)$NMI
for (i in 1:dim(m)[1]) {
feature <- m[i,]
MI[i] <- signif(mutual.inf.P(target, feature, n.grid=100)$NMI/NMI.ref, 4)
if (target.type == "continuous") {
AUC[i] <- AUC.pval[i] <- t.stat[i] <- t.pval[i] <- "-"
} else if (target.type == "discrete") {
feature.norm <- (feature - min(feature))/(max(feature) - min(feature))
perf.auc <- roc.area(target, feature.norm)
AUC[i] <- ifelse(perf.auc$A < 0.5, -(1 - perf.auc$A), perf.auc$A)
AUC[i] <- signif(AUC[i], digits=4)
p.val <- perf.auc$p.value
p.val <- ifelse(p.val > 0.5, 1 - p.val, p.val)
AUC.pval[i] <- signif(p.val, digits=4)
temp <- split(feature, target)
x <- temp$'1'
y <- temp$'0'
t.stat[i] <- signif(t.test(x=x, y=y)$statistic, digits=4)
p.val <- t.test(x=x, y=y)$p.value
p.val <- ifelse(p.val > 0.5, 1 - p.val, p.val)
t.pval[i] <- signif(p.val, digits=4)
}
annot[i] <- paste(MI[i], " ", AUC[i], " (", AUC.pval[i], ") ", t.stat[i], " (", t.pval[i], ")", sep="")
}
mycol <- vector(length=512, mode = "numeric")
for (k in 1:256) mycol[k] <- rgb(255, k - 1, k - 1, maxColorValue=255)
for (k in 257:512) mycol[k] <- rgb(511 - (k - 1), 511 - (k - 1), 255, maxColorValue=255)
mycol <- rev(mycol)
cex.axis = 1
ncolors <- length(mycol)
nf <- layout(matrix(c(1, 2), 2, 1, byrow=T), 1, c(4, 10), FALSE)
par(mar = c(1, 15, 5, 15))
max.v <- max(max(target), -min(target))
V1 <- target
image(1:length(target), 1:1, as.matrix(V1), zlim = c(0, 1), col=c("yellow", "purple"), axes=FALSE, main="", sub = "", xlab= "", ylab="")
axis(2, at=1:1, labels=paste(phenotype, target.class), adj= 0.5, tick=FALSE, las = 1, cex.axis=0.70, font.axis=1, line=-1)
axis(4, at=1:1, labels=paste("NMI AUC (p-val) t-test (p-val)", sep=""), adj= 0.5, tick=FALSE,
las = 1, cex.axis=0.80, font.axis=1, line=-1)
par(mar = c(5, 15, 1, 15))
V1 <- m
for (i in 1:dim(V1)[1]) V1[i,] <- (V1[i,] - mean(V1[i,]))/sd(V1[i,])
max.v <- max(max(V1), -min(V1))
V1 <- ceiling(ncolors * (V1 - (- max.v))/(1.001*(max.v - (- max.v))))
V1 <- apply(V1, MARGIN=2, FUN=rev)
image(1:dim(V1)[2], 1:dim(V1)[1], t(V1), zlim = c(0, ncolors), col=mycol, axes=FALSE, main="", sub = "", xlab= "", ylab="")
axis(2, at=1:dim(V1)[1], labels=row.names(V1), adj= 0.5, tick=FALSE, las = 1, cex.axis=0.70, font.axis=1, line=-1)
axis(4, at=1:dim(V1)[1], labels=rev(annot), adj= 0.5, tick=FALSE, las = 1, cex.axis=0.70, font.axis=1, line=-1)
axis(1, at=1:dim(V1)[2], labels=sample.names, adj= 0.5, tick=FALSE, las = 3, cex.axis=0.60, font.axis=1, line=-1)
dev.off()
annot2 <- data.frame(cbind(MI, AUC, AUC.pval, t.stat, t.pval))
row.names(annot2) <- row.names(m)
write(paste(c("gene set ", noquote(colnames(annot2))), collapse="\t"), file = output.txt, append = F, ncolumns = length(colnames(annot2)))
write.table(annot2, file=output.txt, append=T, quote=F, sep="\t", eol="\n", col.names=F, row.names=T)
}
OPAM.Evaluate.Results.2 <- function(
input.ds,
input.cls,
phenotype = NULL,
target.class = NULL,
target.type = "discrete",
sort.results = T,
display.top.n = 20,
output.txt,
output.pdf) {
pdf(file=output.pdf, height=8.5, width=11)
dataset <- MSIG.Gct2Frame(filename = input.ds) # Read gene expression dataset (GCT format)
m <- data.matrix(dataset$ds)
N <- dim(m)[1]
model.names <- dataset$row.names
model.descs <- dataset$descs
Ns <- length(as.matrix(m)[1,])
for (i in 1:N) {
if (sd(as.matrix(m)[i,]) == 0) {
val <- as.matrix(m)[i, 1]
m[i,] <- as.matrix(m)[i,] + runif(n=Ns, min= val - 0.001, max=val + 0.001) # add small noise to flat profiles
}
}
dim(m)
sample.names <- dataset$names
CLS <- MSIG.ReadPhenFile.2(file = input.cls) # Read phenotype file (CLS format)
cls.labels <- CLS$class.v
cls.phen <- CLS$phen
cls.list <- CLS$class.list
library(verification)
if (is.null(phenotype)) {
phen.loc <- 1
} else {
phen.loc <- match(phenotype, CLS$phen.names)
}
if (is.vector(CLS$class.list)) {
target.vec <- CLS$class.list
} else {
target.vec <- CLS$class.list[phen.loc,]
}
if (target.type == "continuous") {
target <- target.vec
} else if (target.type == "discrete") {
target <- ifelse(target.vec == target.class, 1, 0)
}
ind <- order(target)
target <- target[ind]
target.vec <- target.vec[ind]
m <- as.matrix(m)[, ind]
sample.names <- sample.names[ind]
class.v <- CLS$class.v
if (is.vector(class.v)) {
class.v <- class.v[ind]
} else {
class.v <- class.v[, ind]
}
annot <- MI <- AUC <- AUC.pval <- t.stat <- t.pval <- vector(length=N, mode="numeric")
NMI.ref <- mutual.inf.P(x = target, y = target, n.grid=100)$NMI
for (i in 1:N) {
if (N == 1) {
feature <- m
} else {
feature <- m[i,]
}
MI[i] <- signif(mutual.inf.P(target, feature, n.grid=100)$NMI/NMI.ref, 4)
if (target.type == "continuous") {
AUC[i] <- AUC.pval[i] <- t.stat[i] <- t.pval[i] <- "-"
} else if (target.type == "discrete") {
feature.norm <- (feature - min(feature))/(max(feature) - min(feature))
perf.auc <- roc.area(target, feature.norm)
AUC[i] <- ifelse(perf.auc$A < 0.5, -(1 - perf.auc$A), perf.auc$A)
AUC[i] <- signif(AUC[i], digits=4)
p.val <- perf.auc$p.value
p.val <- ifelse(p.val > 0.5, 1 - p.val, p.val)
AUC.pval[i] <- signif(p.val, digits=4)
temp <- split(feature, target)
x <- temp$'1'
y <- temp$'0'
t.stat[i] <- signif(t.test(x=x, y=y)$statistic, digits=4)
p.val <- t.test(x=x, y=y)$p.value
p.val <- ifelse(p.val > 0.5, 1 - p.val, p.val)
t.pval[i] <- signif(p.val, digits=4)
}
annot[i] <- paste(MI[i], " ", AUC[i], " (", AUC.pval[i], ") ", t.stat[i], " (", t.pval[i], ")", sep="")
}
if ((N > 1) & (sort.results == T)) {
MI.order <- order(MI, decreasing=T)
MI <- MI[MI.order]
AUC <- AUC[MI.order]
AUC.pval <- AUC.pval[MI.order]
t.stat <- t.stat[MI.order]
t.pval <= t.pval[MI.order]
m <- as.matrix(m)[MI.order,]
annot <- annot[MI.order]
}
mycol <- vector(length=512, mode = "numeric")
for (k in 1:256) mycol[k] <- rgb(255, k - 1, k - 1, maxColorValue=255)
for (k in 257:512) mycol[k] <- rgb(511 - (k - 1), 511 - (k - 1), 255, maxColorValue=255)
mycol <- rev(mycol)
cex.axis = 1
ncolors <- length(mycol)
nf <- layout(matrix(c(1, 2), 2, 1, byrow=T), 1, c(4, 10), FALSE)
par(mar = c(1, 15, 5, 15))
max.v <- max(max(target), -min(target))
V1 <- target
image(1:length(target), 1:1, as.matrix(V1), zlim = c(0, 1), col=c("yellow", "purple"), axes=FALSE, main="", sub = "", xlab= "", ylab="")
axis(2, at=1:1, labels=paste(phenotype, target.class), adj= 0.5, tick=FALSE, las = 1, cex.axis=0.70, font.axis=1, line=-1)
axis(4, at=1:1, labels=paste("NMI AUC (p-val) t-test (p-val)", sep=""), adj= 0.5, tick=FALSE,
las = 1, cex.axis=0.80, font.axis=1, line=-1)
par(mar = c(5, 15, 1, 15))
if (display.top.n > N) display.top.n <- N
if (N == 1) {
V1 <- m
V1 <- (V1 - mean(V1))/sd(V1)
} else {
V1 <- m[1:display.top.n, ]
for (i in 1:display.top.n) V1[i,] <- (V1[i,] - mean(V1[i,]))/sd(V1[i,])
}
max.v <- max(max(V1), -min(V1))
V1 <- ceiling(ncolors * (V1 - (- max.v))/(1.001*(max.v - (- max.v))))
if (N > 1) {
V1 <- apply(V1, MARGIN=2, FUN=rev)
image(1:Ns, 1:display.top.n, t(V1), zlim = c(0, ncolors), col=mycol, axes=FALSE, main="", sub = "", xlab= "", ylab="")
axis(2, at=1:display.top.n, labels=row.names(V1), adj= 0.5, tick=FALSE, las = 1, cex.axis=0.70, font.axis=1, line=-1)
axis(4, at=1:display.top.n, labels=rev(annot[1:display.top.n]), adj= 0.5, tick=FALSE, las = 1, cex.axis=0.70, font.axis=1, line=-1)
axis(1, at=1:Ns, labels=sample.names, adj= 0.5, tick=FALSE, las = 3, cex.axis=0.60, font.axis=1, line=-1)
} else {
image(1:Ns, 1:1, as.matrix(V1), zlim = c(0, ncolors), col=mycol, axes=FALSE, main="", sub = "", xlab= "", ylab="")
axis(2, at=1:1, labels=model.names, adj= 0.5, tick=FALSE, las = 1, cex.axis=0.70, font.axis=1, line=-1)
axis(4, at=1:1, labels=annot[1], adj= 0.5, tick=FALSE, las = 1, cex.axis=0.70, font.axis=1, line=-1)
axis(1, at=1:Ns, labels=sample.names, adj= 0.5, tick=FALSE, las = 3, cex.axis=0.60, font.axis=1, line=-1)
}
dev.off()
annot2 <- data.frame(cbind(MI, AUC, AUC.pval, t.stat, t.pval))
row.names(annot2) <- row.names(m)
write(paste(c("gene set ", noquote(colnames(annot2))), collapse="\t"), file = output.txt, append = F,
ncolumns = length(colnames(annot2)))
write.table(annot2, file=output.txt, append=T, quote=F, sep="\t", eol="\n", col.names=F, row.names=T)
}
OPAM.Evaluate.Results.2.1 <- function(
input.ds,
input.cls,
phenotype = NULL,
target.class = NULL,
target.type = "discrete",
sort.results = T,
display.top.n = 20,
output.txt,
output.tiff) {
tiff(file=output.tiff, width = 1200, height = 1000, units = "px", pointsize = 17)
dataset <- MSIG.Gct2Frame(filename = input.ds) # Read gene expression dataset (GCT format)
m <- data.matrix(dataset$ds)
N <- dim(m)[1]
model.names <- dataset$row.names
model.descs <- dataset$descs
Ns <- length(as.matrix(m)[1,])
for (i in 1:N) {
if (sd(as.matrix(m)[i,]) == 0) {
val <- as.matrix(m)[i, 1]
m[i,] <- as.matrix(m)[i,] + runif(n=Ns, min= val - 0.001, max=val + 0.001) # add small noise to flat profiles
}
}
dim(m)
sample.names <- dataset$names
CLS <- MSIG.ReadPhenFile.2(file = input.cls) # Read phenotype file (CLS format)
cls.labels <- CLS$class.v
cls.phen <- CLS$phen
cls.list <- CLS$class.list
library(verification)
if (is.null(phenotype)) {
phen.loc <- 1
} else {
phen.loc <- match(phenotype, CLS$phen.names)
}
if (is.vector(CLS$class.list)) {
target.vec <- CLS$class.list
} else {
target.vec <- CLS$class.list[phen.loc,]
}
if (target.type == "continuous") {
target <- target.vec
} else if (target.type == "discrete") {
target <- ifelse(target.vec == target.class, 1, 0)
}
ind <- order(target)
target <- target[ind]
target.vec <- target.vec[ind]
m <- as.matrix(m)[, ind]
sample.names <- sample.names[ind]
class.v <- CLS$class.v
if (is.vector(class.v)) {
class.v <- class.v[ind]
} else {
class.v <- class.v[, ind]
}
annot <- MI <- AUC <- AUC.pval <- t.stat <- t.pval <- vector(length=N, mode="numeric")
NMI.ref <- mutual.inf.P(x = target, y = target, n.grid=100)$NMI
for (i in 1:N) {
if (N == 1) {
feature <- m
} else {
feature <- m[i,]
}
MI[i] <- signif(mutual.inf.P(target, feature, n.grid=100)$NMI/NMI.ref, 2)
if (target.type == "continuous") {
AUC[i] <- AUC.pval[i] <- t.stat[i] <- t.pval[i] <- "-"
} else if (target.type == "discrete") {
feature.norm <- (feature - min(feature))/(max(feature) - min(feature))
perf.auc <- roc.area(target, feature.norm)
AUC[i] <- ifelse(perf.auc$A < 0.5, -(1 - perf.auc$A), perf.auc$A)
AUC[i] <- signif(AUC[i], digits=2)
p.val <- perf.auc$p.value
p.val <- ifelse(p.val > 0.5, 1 - p.val, p.val)
AUC.pval[i] <- signif(p.val, digits=2)
temp <- split(feature, target)
x <- temp$'1'
y <- temp$'0'
t.stat[i] <- signif(t.test(x=x, y=y)$statistic, digits=2)
p.val <- t.test(x=x, y=y)$p.value
p.val <- ifelse(p.val > 0.5, 1 - p.val, p.val)
t.pval[i] <- signif(p.val, digits=2)
}
annot[i] <- paste(MI[i], " ", AUC[i], " (", AUC.pval[i], ") ", t.stat[i], " (", t.pval[i], ")", sep="")
}
if ((N > 1) & (sort.results == T)) {
MI.order <- order(MI, decreasing=T)
MI <- MI[MI.order]
AUC <- AUC[MI.order]
AUC.pval <- AUC.pval[MI.order]
t.stat <- t.stat[MI.order]
t.pval <= t.pval[MI.order]
m <- as.matrix(m)[MI.order,]
annot <- annot[MI.order]
}
mycol <- vector(length=512, mode = "numeric")
for (k in 1:256) mycol[k] <- rgb(255, k - 1, k - 1, maxColorValue=255)
for (k in 257:512) mycol[k] <- rgb(511 - (k - 1), 511 - (k - 1), 255, maxColorValue=255)
mycol <- rev(mycol)
cex.axis = 1
ncolors <- length(mycol)
nf <- layout(matrix(c(1, 2), 2, 1, byrow=T), 1, c(2, 10), FALSE)
par(mar = c(1, 19, 4, 11))
max.v <- max(max(target), -min(target))
V1 <- target
image(1:length(target), 1:1, as.matrix(V1), zlim = c(0, 1), col=c("lightgray", "black"), axes=FALSE, main="", sub = "", xlab= "", ylab="")
axis(2, at=1:1, labels=paste(phenotype, target.class), adj= 0.5, tick=FALSE, las = 1, cex.axis=0.70, font.axis=1, line=-1)
axis(4, at=1:1, labels=paste("NMI AUC (p-val) t-test (p-val)", sep=""), adj= 0.5, tick=FALSE,
las = 1, cex.axis=0.80, font.axis=1, line=-1)
par(mar = c(4, 19, 1, 11))
if (display.top.n > N) display.top.n <- N
if (N == 1) {
V1 <- m
V1 <- (V1 - mean(V1))/sd(V1)
} else {
V1 <- m[1:display.top.n, ]
for (i in 1:display.top.n) V1[i,] <- (V1[i,] - mean(V1[i,]))/sd(V1[i,])
}
max.v <- max(max(V1), -min(V1))
V1 <- ceiling(ncolors * (V1 - (- max.v))/(1.001*(max.v - (- max.v))))
if (N > 1) {
V1 <- apply(V1, MARGIN=2, FUN=rev)
image(1:Ns, 1:display.top.n, t(V1), zlim = c(0, ncolors), col=mycol, axes=FALSE, main="", sub = "", xlab= "", ylab="")
axis(2, at=1:display.top.n, labels=row.names(V1), adj= 0.5, tick=FALSE, las = 1, cex.axis=0.70, font.axis=1, line=-1)
axis(4, at=1:display.top.n, labels=rev(annot[1:display.top.n]), adj= 0.5, tick=FALSE, las = 1, cex.axis=0.70, font.axis=1, line=-1)
axis(1, at=1:Ns, labels=sample.names, adj= 0.5, tick=FALSE, las = 3, cex.axis=0.60, font.axis=1, line=-1)
} else {
image(1:Ns, 1:1, as.matrix(V1), zlim = c(0, ncolors), col=mycol, axes=FALSE, main="", sub = "", xlab= "", ylab="")
axis(2, at=1:1, labels=model.names, adj= 0.5, tick=FALSE, las = 1, cex.axis=0.70, font.axis=1, line=-1)
axis(4, at=1:1, labels=annot[1], adj= 0.5, tick=FALSE, las = 1, cex.axis=0.70, font.axis=1, line=-1)
axis(1, at=1:Ns, labels=sample.names, adj= 0.5, tick=FALSE, las = 3, cex.axis=0.60, font.axis=1, line=-1)
}
dev.off()
annot2 <- data.frame(cbind(MI, AUC, AUC.pval, t.stat, t.pval))
row.names(annot2) <- row.names(m)
write(paste(c("gene set ", noquote(colnames(annot2))), collapse="\t"), file = output.txt, append = F,
ncolumns = length(colnames(annot2)))
write.table(annot2, file=output.txt, append=T, quote=F, sep="\t", eol="\n", col.names=F, row.names=T)
}
OPAM.Evaluate.Results.4 <- function(
input.ds,
input.cls,
phenotype = NULL,
target.class = NULL,
target.type = "discrete",
sort.phenotype = T,
sort.results = T,
display.top.n = 20,
output.txt,
output.pdf,
cex.axis=0.7) {
pdf(file=output.pdf, height=8.5, width=11)
dataset <- MSIG.Gct2Frame(filename = input.ds) # Read gene expression dataset (GCT format)
m <- data.matrix(dataset$ds)
N <- dim(m)[1]
model.names <- dataset$row.names
model.descs <- dataset$descs
Ns <- length(as.matrix(m)[1,])
for (i in 1:N) {
if (sd(as.matrix(m)[i,]) == 0) {
val <- as.matrix(m)[i, 1]
m[i,] <- as.matrix(m)[i,] + runif(n=Ns, min= val - 0.001, max=val + 0.001) # add small noise to flat profiles
}
}
dim(m)
sample.names <- dataset$names
CLS <- MSIG.ReadPhenFile.2(file = input.cls) # Read phenotype file (CLS format)
cls.labels <- CLS$class.v
cls.phen <- CLS$phen
cls.list <- CLS$class.list
library(verification)
if (is.null(phenotype)) {
phen.loc <- 1
} else {
phen.loc <- match(phenotype, CLS$phen.names)
}
if (is.vector(CLS$class.list)) {
if (target.type == "discrete") {
target <- ifelse(CLS$class.list == target.class, 1, 0)
} else {
target <- as.numeric(CLS$class.v) + runif(length(CLS$class.v), min=0, max=10*.Machine$double.eps)
}
} else {
if (target.type == "discrete") {
target <- ifelse(CLS$class.list[phen.loc,] == target.class, 1, 0)
} else {
target <- as.numeric(CLS$class.v[phen.loc,]) + runif(length(CLS$class.v[phen.loc,]), min=0, max=10*.Machine$double.eps)
}
}
print(paste("target:", target))
class.v <- CLS$class.v
if (sort.phenotype == T) {
ind <- order(target)
target <- target[ind]
m <- as.matrix(m)[, ind]
sample.names <- sample.names[ind]
if (is.vector(class.v)) {
class.v <- class.v[ind]
} else {
class.v <- class.v[, ind]
}
}
annot <- MI <- AUC <- AUC.pval <- t.stat <- t.pval <- vector(length=N, mode="numeric")
NMI.ref <- mutual.inf.P(x = target, y = target, n.grid=100)$NMI
for (i in 1:N) {
if (N == 1) {
feature <- m
} else {
feature <- m[i,]
}
MI[i] <- signif(mutual.inf.P(target, feature, n.grid=100)$NMI/NMI.ref, 4)
if (target.type == "continuous") {
AUC[i] <- AUC.pval[i] <- t.stat[i] <- t.pval[i] <- "-"
} else if (target.type == "discrete") {
feature.norm <- (feature - min(feature))/(max(feature) - min(feature))
perf.auc <- roc.area(target, feature.norm)
AUC[i] <- ifelse(perf.auc$A < 0.5, -(1 - perf.auc$A), perf.auc$A)
AUC[i] <- signif(AUC[i], digits=4)
p.val <- perf.auc$p.value
p.val <- ifelse(p.val > 0.5, 1 - p.val, p.val)
AUC.pval[i] <- signif(p.val, digits=4)
temp <- split(feature, target)
x <- temp$'1'
y <- temp$'0'
t.stat[i] <- signif(t.test(x=x, y=y)$statistic, digits=4)
p.val <- t.test(x=x, y=y)$p.value
p.val <- ifelse(p.val > 0.5, 1 - p.val, p.val)
t.pval[i] <- signif(p.val, digits=4)
}
annot[i] <- paste(MI[i], " ", AUC[i], " (", AUC.pval[i], ") ", t.stat[i], " (", t.pval[i], ")", sep="")
}
if ((N > 1) & (sort.results == T)) {
MI.order <- order(MI, decreasing=T)
MI <- MI[MI.order]
AUC <- AUC[MI.order]
AUC.pval <- AUC.pval[MI.order]
t.stat <- t.stat[MI.order]
t.pval <= t.pval[MI.order]
m <- as.matrix(m)[MI.order,]
annot <- annot[MI.order]
}
mycol <- vector(length=512, mode = "numeric")
for (k in 1:256) mycol[k] <- rgb(255, k - 1, k - 1, maxColorValue=255)
for (k in 257:512) mycol[k] <- rgb(511 - (k - 1), 511 - (k - 1), 255, maxColorValue=255)
mycol <- rev(mycol)
ncolors <- length(mycol)
nf <- layout(matrix(c(1, 2), 2, 1, byrow=T), 1, c(4, 10), FALSE)
par(mar = c(1, 18, 3, 11))
max.v <- max(max(target), -min(target))
V1 <- target
image(1:length(target), 1:1, as.matrix(V1), zlim = c(0, 1), col=c("yellow", "purple"), axes=FALSE, main="", sub = "", xlab= "", ylab="")
axis(2, at=1:1, labels=paste(phenotype, target.class), adj= 0.5, tick=FALSE, las = 1, cex.axis=cex.axis, font.axis=1, line=-1)
axis(4, at=1:1, labels=paste("NMI AUC (p-val) t-test (p-val)", sep=""), adj= 0.5, tick=FALSE,
las = 1, cex.axis=cex.axis, font.axis=1, line=-1)
par(mar = c(5, 18, 1, 11))
if (display.top.n > N) display.top.n <- N
if (N == 1) {
V1 <- m
V1 <- (V1 - mean(V1))/sd(V1)
} else {
V1 <- m[1:display.top.n, ]
for (i in 1:display.top.n) V1[i,] <- (V1[i,] - mean(V1[i,]))/sd(V1[i,])
}
max.v <- max(max(V1), -min(V1))
V1 <- ceiling(ncolors * (V1 - (- max.v))/(1.001*(max.v - (- max.v))))
if (N > 1) {
V1 <- apply(V1, MARGIN=2, FUN=rev)
image(1:Ns, 1:display.top.n, t(V1), zlim = c(0, ncolors), col=mycol, axes=FALSE, main="", sub = "", xlab= "", ylab="")
axis(2, at=1:display.top.n, labels=row.names(V1), adj= 0.5, tick=FALSE, las = 1, cex.axis=cex.axis, font.axis=1, line=-1)
axis(4, at=1:display.top.n, labels=rev(annot[1:display.top.n]), adj= 0.5, tick=FALSE, las = 1, cex.axis=cex.axis, font.axis=1, line=-1)
axis(1, at=1:Ns, labels=sample.names, adj= 0.5, tick=FALSE, las = 3, cex.axis=cex.axis, font.axis=1, line=-1)
} else {
image(1:Ns, 1:1, as.matrix(V1), zlim = c(0, ncolors), col=mycol, axes=FALSE, main="", sub = "", xlab= "", ylab="")
axis(2, at=1:1, labels=model.names, adj= 0.5, tick=FALSE, las = 1, cex.axis=cex.axis, font.axis=1, line=-1)
axis(4, at=1:1, labels=annot[1], adj= 0.5, tick=FALSE, las = 1, cex.axis=cex.axis, font.axis=1, line=-1)
axis(1, at=1:Ns, labels=sample.names, adj= 0.5, tick=FALSE, las = 3, cex.axis=cex.axis, font.axis=1, line=-1)
}
dev.off()
annot2 <- data.frame(cbind(MI, AUC, AUC.pval, t.stat, t.pval))
row.names(annot2) <- row.names(m)
write(paste(c("gene set ", noquote(colnames(annot2))), collapse="\t"), file = output.txt, append = F,
ncolumns = length(colnames(annot2)))
write.table(annot2, file=output.txt, append=T, quote=F, sep="\t", eol="\n", col.names=F, row.names=T)
}
OPAM.Evaluate.Results.Yan <- function(
# this version has to option of using the Kernel method, its adjusted variant and the knn approach from the parmigene package
input.ds,
input.cls,
phenotype = NULL,
target.class = NULL,
target.type = "discrete",
MI.method = "kernel_I", # kernel_I, kernel_Adj or knn
sort.results = T,
display.top.n = 20,
output.txt,
output.pdf) {
library(parmigene)
pdf(file=output.pdf, height=8.5, width=11)
dataset <- MSIG.Gct2Frame(filename = input.ds) # Read gene expression dataset (GCT format)
m <- data.matrix(dataset$ds)
N <- dim(m)[1]
model.names <- dataset$row.names
model.descs <- dataset$descs
Ns <- length(as.matrix(m)[1,])
for (i in 1:N) {
if (sd(as.matrix(m)[i,]) == 0) {
val <- as.matrix(m)[i, 1]
m[i,] <- as.matrix(m)[i,] + runif(n=Ns, min= val - 0.001, max=val + 0.001) # add small noise to flat profiles
}
}
dim(m)
sample.names <- dataset$names
CLS <- MSIG.ReadPhenFile.2(file = input.cls) # Read phenotype file (CLS format)
cls.labels <- CLS$class.v
cls.phen <- CLS$phen
cls.list <- CLS$class.list
library(verification)
if (is.null(phenotype)) {
phen.loc <- 1
} else {
phen.loc <- match(phenotype, CLS$phen.names)
}
if (is.vector(CLS$class.list)) {
target.vec <- CLS$class.list
} else {
target.vec <- CLS$class.list[phen.loc,]
}
if (target.type == "continuous") {
target <- target.vec
} else if (target.type == "discrete") {
target <- ifelse(target.vec == target.class, 1, 0)
}
ind <- order(target)
target <- target[ind]
target.vec <- target.vec[ind]
m <- as.matrix(m)[, ind]
sample.names <- sample.names[ind]
class.v <- CLS$class.v
if (is.vector(class.v)) {
class.v <- class.v[ind]
} else {
class.v <- class.v[, ind]
}
annot <- MI <- AUC <- AUC.pval <- t.stat <- t.pval <- vector(length=N, mode="numeric")
if (MI.method == "kernel_I") {
NMI.ref <- NMI(x=target, y=target, n.grid=50, make.plot=F)$NMI
} else if (MI.method == "kernel_Adj") {
rho <- cor(target, target)
adj <- log(1/(abs(rho) + 0.25)) + 0.75
delta.param <- 1
delta <- c(delta.param * adj * bcv(target), delta.param * adj * bcv(target))
NMI.ref <- NMI(x=target, y=target, n.grid=50, delta = delta, make.plot=F)$NMI
} else { # knn method
NMI.ref <- cor(target, target) * knnmi(target, target, k=3, noise=1e-12)/NMI(x=target, y=target, n.grid=50, make.plot=F)$HXY
}
for (i in 1:N) {
if (N == 1) {
feature <- m
} else {
feature <- m[i,]
}
if (MI.method == "kernel_I") {
NMI.val <- NMI(x=target, y=feature, n.grid=50, make.plot=F)$NMI # MI according to our kernel method
} else if (MI.method == "kernel_Adj") {
rho <- cor(target, feature)
adj <- log(1/(abs(rho) + 0.25)) + 0.75
delta.param <- 1
delta <- c(delta.param * adj * bcv(target), delta.param * adj * bcv(feature))
NMI.val <- NMI(x=target, y=feature, n.grid=50, delta = delta, make.plot=F)$NMI # MI according to our kernel method
} else { # knn method
NMI.val<- cor(target, feature) * knnmi(target, feature, k=3, noise=1e-12)/NMI(x=target, y=feature, n.grid=50, make.plot=F)$HXY
}
MI[i] <- signif(NMI.val/NMI.ref, 4)
if (target.type == "continuous") {
AUC[i] <- AUC.pval[i] <- t.stat[i] <- t.pval[i] <- "-"
} else if (target.type == "discrete") {
feature.norm <- (feature - min(feature))/(max(feature) - min(feature))
perf.auc <- roc.area(target, feature.norm)
AUC[i] <- ifelse(perf.auc$A < 0.5, -(1 - perf.auc$A), perf.auc$A)
AUC[i] <- signif(AUC[i], digits=4)
p.val <- perf.auc$p.value
p.val <- ifelse(p.val > 0.5, 1 - p.val, p.val)
AUC.pval[i] <- signif(p.val, digits=4)
temp <- split(feature, target)
x <- temp$'1'
y <- temp$'0'
t.stat[i] <- signif(t.test(x=x, y=y)$statistic, digits=4)
p.val <- t.test(x=x, y=y)$p.value
p.val <- ifelse(p.val > 0.5, 1 - p.val, p.val)
t.pval[i] <- signif(p.val, digits=4)
}
annot[i] <- paste(MI[i], " ", AUC[i], " (", AUC.pval[i], ") ", t.stat[i], " (", t.pval[i], ")", sep="")
}
if ((N > 1) & (sort.results == T)) {
MI.order <- order(MI, decreasing=T)
MI <- MI[MI.order]
AUC <- AUC[MI.order]
AUC.pval <- AUC.pval[MI.order]
t.stat <- t.stat[MI.order]
t.pval <= t.pval[MI.order]
m <- as.matrix(m)[MI.order,]
annot <- annot[MI.order]
}
mycol <- vector(length=512, mode = "numeric")
for (k in 1:256) mycol[k] <- rgb(255, k - 1, k - 1, maxColorValue=255)
for (k in 257:512) mycol[k] <- rgb(511 - (k - 1), 511 - (k - 1), 255, maxColorValue=255)
mycol <- rev(mycol)
cex.axis = 1
ncolors <- length(mycol)
nf <- layout(matrix(c(1, 2), 2, 1, byrow=T), 1, c(4, 10), FALSE)
par(mar = c(1, 15, 5, 15))
max.v <- max(max(target), -min(target))
V1 <- target
image(1:length(target), 1:1, as.matrix(V1), zlim = c(0, 1), col=c("yellow", "purple"), axes=FALSE, main="", sub = "", xlab= "", ylab="")
axis(2, at=1:1, labels=paste(phenotype, target.class), adj= 0.5, tick=FALSE, las = 1, cex.axis=0.70, font.axis=1, line=-1)
axis(4, at=1:1, labels=paste("NMI AUC (p-val) t-test (p-val)", sep=""), adj= 0.5, tick=FALSE,
las = 1, cex.axis=0.80, font.axis=1, line=-1)
par(mar = c(5, 15, 1, 15))
if (display.top.n > N) display.top.n <- N
if (N == 1) {
V1 <- m
V1 <- (V1 - mean(V1))/sd(V1)
} else {
V1 <- m[1:display.top.n, ]
for (i in 1:display.top.n) V1[i,] <- (V1[i,] - mean(V1[i,]))/sd(V1[i,])
}
max.v <- max(max(V1), -min(V1))
V1 <- ceiling(ncolors * (V1 - (- max.v))/(1.001*(max.v - (- max.v))))
if (N > 1) {
V1 <- apply(V1, MARGIN=2, FUN=rev)
image(1:Ns, 1:display.top.n, t(V1), zlim = c(0, ncolors), col=mycol, axes=FALSE, main="", sub = "", xlab= "", ylab="")
axis(2, at=1:display.top.n, labels=row.names(V1), adj= 0.5, tick=FALSE, las = 1, cex.axis=0.70, font.axis=1, line=-1)
axis(4, at=1:display.top.n, labels=rev(annot[1:display.top.n]), adj= 0.5, tick=FALSE, las = 1, cex.axis=0.70, font.axis=1, line=-1)
axis(1, at=1:Ns, labels=sample.names, adj= 0.5, tick=FALSE, las = 3, cex.axis=0.60, font.axis=1, line=-1)
} else {
image(1:Ns, 1:1, as.matrix(V1), zlim = c(0, ncolors), col=mycol, axes=FALSE, main="", sub = "", xlab= "", ylab="")
axis(2, at=1:1, labels=model.names, adj= 0.5, tick=FALSE, las = 1, cex.axis=0.70, font.axis=1, line=-1)
axis(4, at=1:1, labels=annot[1], adj= 0.5, tick=FALSE, las = 1, cex.axis=0.70, font.axis=1, line=-1)
axis(1, at=1:Ns, labels=sample.names, adj= 0.5, tick=FALSE, las = 3, cex.axis=0.60, font.axis=1, line=-1)
}
dev.off()
annot2 <- data.frame(cbind(MI, AUC, AUC.pval, t.stat, t.pval))
row.names(annot2) <- row.names(m)
write(paste(c("gene set ", noquote(colnames(annot2))), collapse="\t"), file = output.txt, append = F,
ncolumns = length(colnames(annot2)))
write.table(annot2, file=output.txt, append=T, quote=F, sep="\t", eol="\n", col.names=F, row.names=T)
}
NMI <- function(x, y, n.grid=100, make.plot=F, delta = c(bcv(x), bcv(y))) {
# for definitions of mutual information and the universal metric (NMI) see the
# definition of "Mutual Information" in wikipedia and Thomas and Cover's book
kde2d.xy <- kde2d(x, y, n = n.grid, h = delta)
X <- kde2d.xy$x
Y <- kde2d.xy$y
PXY <- kde2d.xy$z + .Machine$double.eps
PXY <- PXY/sum(PXY)
PX <- rowSums(PXY)
PX <- PX/sum(PX)
HX <- -sum(PX * log2(PX))
PY <- colSums(PXY)
PY <- PY/sum(PY)
HY <- -sum(PY * log2(PY))
HXY <- - sum(PXY * log2(PXY))
NMI <- sign(cor(x, y)) * ((HX + HY)/HXY - 1) # use pearson correlation the get the sign (directionality)
PX <- matrix(PX, nrow=n.grid, ncol=n.grid)
PY <- matrix(PY, byrow = TRUE, nrow=n.grid, ncol=n.grid)
MI <- sum(PXY * log2(PXY/(PX*PY)))
if (make.plot != F) {
mycol <- vector(length=512, mode = "numeric")
for (k in 1:256) mycol[k] <- rgb(255, k - 1, k - 1, maxColorValue=255)
for (k in 257:512) mycol[k] <- rgb(511 - (k - 1), 511 - (k - 1), 255, maxColorValue=255)
mycol <- rev(mycol)
quartz(width=12, height=8)
nf <- layout(matrix(c(1,2,3,4), 2, 2, byrow=T), c(1,1), c(1,1), TRUE)
plot(X, PX[,1], type="l", main="X")
plot(Y, PY[1,], type="l", main="Y")
MIXY <- PXY * log2(PXY/(PX*PY))
image(PXY, main = paste("P(x, y)", " pearson cor=", signif(cor(x, y), 3)), col=mycol)
sub <- ifelse(make.plot != T, make.plot, " ")
image(MIXY, main=paste("MI:", signif(MI, 3)), col=mycol, sub=sub)
}
return(list(NMI=NMI, MI=MI, HXY=HXY, HX=HX, HY=HY))
}
|
library(countrycode)
library(ggplot2)
library(treemap)
# Read the csv file
popfem <- read.csv('/home/ubuntu/Dropbox/Manoj/MSAN622_DataViz_Sophie/data/tot_pop_female_2050.csv',
header=TRUE, stringsAsFactors=FALSE)
# extract Continent
popfem$continent <- as.factor(countrycode(popfem$country, "country.name", "continent"))
# Extract Region
popfem$region <- as.factor(countrycode(popfem$country, "country.name", "region"))
names(popfem)
contidx <- which(is.na(popfem$region))
popfem$region[which(popfem$country=='Caribbean')] = "Caribbean"
popfem$continent[which(popfem$country=='Caribbean')] = "Americas"
popfem$region[which(popfem$country=='Channel Islands')] = "Western Europe"
popfem$continent[which(popfem$country=='Channel Islands')] = "Europe"
popfem$region[which(popfem$country=='Melanesia')] = "Melanesia"
popfem$continent[which(popfem$country=='Melanesia')] = "Oceania"
popfem$region[which(popfem$country=='Polynesia')] = "Polynesia"
popfem$continent[which(popfem$country=='Polynesia')] = "Oceania"
# gsub(" ", "", paste("p", seq(1:5)))
# aggregate(cbind(X2099, X2100) ~ continent, data = popfem, sum)
# plot time
# Stacked Area plot
library(reshape)
melted_df <- melt(popfem, id=c("country", "region", "continent", "land_sqkm"))
melted_df$variable <- gsub("X","", melted_df$variable)
head(melted_df)
popmale <- read.csv('/home/ubuntu/Dropbox/Manoj/MSAN622_DataViz_Sophie/data/tot_pop_male_2050.csv',
header=TRUE, stringsAsFactors=FALSE)
# extract Continent
popmale$continent <- as.factor(countrycode(popmale$country, "country.name", "continent"))
# Extract Region
popmale$region <- as.factor(countrycode(popmale$country, "country.name", "region"))
names(popmale)
popmale$region[which(popmale$country=='Caribbean')] = "Caribbean"
popmale$continent[which(popmale$country=='Caribbean')] = "Americas"
popmale$region[which(popmale$country=='Channel Islands')] = "Western Europe"
popmale$continent[which(popmale$country=='Channel Islands')] = "Europe"
popmale$region[which(popmale$country=='Melanesia')] = "Melanesia"
popmale$continent[which(popmale$country=='Melanesia')] = "Oceania"
popmale$region[which(popmale$country=='Polynesia')] = "Polynesia"
popmale$continent[which(popmale$country=='Polynesia')] = "Oceania"
melted_dfm <- melt(popmale, id=c("country", "region", "continent"))
melted_dfm$variable <- gsub("X","", melted_dfm$variable)
# MERGE THE DATA TO A SINGLE DATAFRAME
all_data <- merge(melted_df, melted_dfm,
by=c( "variable", "country", "region", "continent"))
colnames(all_data)[1] <- "Year"
colnames(all_data)[5] <- "areaSqkm"
colnames(all_data)[6] <- "femPop"
colnames(all_data)[7] <- "malePop"
# Read aggregated data
popagg <- read.csv('/home/ubuntu/Dropbox/Manoj/MSAN622_DataViz_Sophie/data/continent_pop_2050.csv',
header=TRUE)
popagg <- melt(popagg, id=c("Region"))
popagg$variable <- gsub("X","", popagg$variable)
head(popagg)
# fertility estimates
fert <- read.csv('/home/ubuntu/Dropbox/Manoj/MSAN622_DataViz_Sophie/data/tot_fertitlity_2099.csv',
header=TRUE)
fert <- melt(fert, id=c("Country"))
fert$variable <- gsub("X","", fert$variable)
colnames(fert)[3] <- "fert"
# Life expectancy estimates
lifexp <- read.csv('/home/ubuntu/Dropbox/Manoj/MSAN622_DataViz_Sophie/data/life_exp_at_birth_2099.csv',
header=TRUE)
lifexp <- melt(lifexp, id=c("Country"))
lifexp$variable <- gsub("X","", lifexp$variable)
colnames(lifexp)[3] <- "lifexp"
merged <- merge(fert, lifexp, by=c("Country", "variable"), all.x=TRUE)
# GDP per capita
gdpall <- read.csv('/home/ubuntu/Dropbox/Manoj/MSAN622_DataViz_Sophie/data/gdp_per_capita_2012.csv',
header=TRUE)
gdpall <- melt(gdpall, id=c("Country"))
gdpall$variable <- gsub("X","", gdpall$variable)
colnames(gdpall)[3] <- "gdppc"
merged <- merge(merged, gdpall, by=c("Country", "variable"), all.x=TRUE)
colnames(merged)[2] <- "Year"
colnames(merged)[1] <- "country"
all_data <- merge(all_data, merged, by=c('country', 'Year'), all.x=TRUE)
all_data$country <- as.factor(all_data$country)
all_data$Year <- as.numeric(all_data$Year)
all_data$totalpop <- all_data$femPop + all_data$malePop
all_data$popdens <- all_data$totalpop / all_data$areaSqkm
all_data$fmr <- all_data$femPop / all_data$malePop
all_data$totgdpb <- all_data$gdppc * all_data$totalpop / 1000000000
write.csv(all_data, file='all_data.csv', row.names=FALSE)
|
/final-project/dataprep.R
|
no_license
|
manoj-v/msan622
|
R
| false
| false
| 4,439
|
r
|
library(countrycode)
library(ggplot2)
library(treemap)
# Read the csv file
popfem <- read.csv('/home/ubuntu/Dropbox/Manoj/MSAN622_DataViz_Sophie/data/tot_pop_female_2050.csv',
header=TRUE, stringsAsFactors=FALSE)
# extract Continent
popfem$continent <- as.factor(countrycode(popfem$country, "country.name", "continent"))
# Extract Region
popfem$region <- as.factor(countrycode(popfem$country, "country.name", "region"))
names(popfem)
contidx <- which(is.na(popfem$region))
popfem$region[which(popfem$country=='Caribbean')] = "Caribbean"
popfem$continent[which(popfem$country=='Caribbean')] = "Americas"
popfem$region[which(popfem$country=='Channel Islands')] = "Western Europe"
popfem$continent[which(popfem$country=='Channel Islands')] = "Europe"
popfem$region[which(popfem$country=='Melanesia')] = "Melanesia"
popfem$continent[which(popfem$country=='Melanesia')] = "Oceania"
popfem$region[which(popfem$country=='Polynesia')] = "Polynesia"
popfem$continent[which(popfem$country=='Polynesia')] = "Oceania"
# gsub(" ", "", paste("p", seq(1:5)))
# aggregate(cbind(X2099, X2100) ~ continent, data = popfem, sum)
# plot time
# Stacked Area plot
library(reshape)
melted_df <- melt(popfem, id=c("country", "region", "continent", "land_sqkm"))
melted_df$variable <- gsub("X","", melted_df$variable)
head(melted_df)
popmale <- read.csv('/home/ubuntu/Dropbox/Manoj/MSAN622_DataViz_Sophie/data/tot_pop_male_2050.csv',
header=TRUE, stringsAsFactors=FALSE)
# extract Continent
popmale$continent <- as.factor(countrycode(popmale$country, "country.name", "continent"))
# Extract Region
popmale$region <- as.factor(countrycode(popmale$country, "country.name", "region"))
names(popmale)
popmale$region[which(popmale$country=='Caribbean')] = "Caribbean"
popmale$continent[which(popmale$country=='Caribbean')] = "Americas"
popmale$region[which(popmale$country=='Channel Islands')] = "Western Europe"
popmale$continent[which(popmale$country=='Channel Islands')] = "Europe"
popmale$region[which(popmale$country=='Melanesia')] = "Melanesia"
popmale$continent[which(popmale$country=='Melanesia')] = "Oceania"
popmale$region[which(popmale$country=='Polynesia')] = "Polynesia"
popmale$continent[which(popmale$country=='Polynesia')] = "Oceania"
melted_dfm <- melt(popmale, id=c("country", "region", "continent"))
melted_dfm$variable <- gsub("X","", melted_dfm$variable)
# MERGE THE DATA TO A SINGLE DATAFRAME
all_data <- merge(melted_df, melted_dfm,
by=c( "variable", "country", "region", "continent"))
colnames(all_data)[1] <- "Year"
colnames(all_data)[5] <- "areaSqkm"
colnames(all_data)[6] <- "femPop"
colnames(all_data)[7] <- "malePop"
# Read aggregated data
popagg <- read.csv('/home/ubuntu/Dropbox/Manoj/MSAN622_DataViz_Sophie/data/continent_pop_2050.csv',
header=TRUE)
popagg <- melt(popagg, id=c("Region"))
popagg$variable <- gsub("X","", popagg$variable)
head(popagg)
# fertility estimates
fert <- read.csv('/home/ubuntu/Dropbox/Manoj/MSAN622_DataViz_Sophie/data/tot_fertitlity_2099.csv',
header=TRUE)
fert <- melt(fert, id=c("Country"))
fert$variable <- gsub("X","", fert$variable)
colnames(fert)[3] <- "fert"
# Life expectancy estimates
lifexp <- read.csv('/home/ubuntu/Dropbox/Manoj/MSAN622_DataViz_Sophie/data/life_exp_at_birth_2099.csv',
header=TRUE)
lifexp <- melt(lifexp, id=c("Country"))
lifexp$variable <- gsub("X","", lifexp$variable)
colnames(lifexp)[3] <- "lifexp"
merged <- merge(fert, lifexp, by=c("Country", "variable"), all.x=TRUE)
# GDP per capita
gdpall <- read.csv('/home/ubuntu/Dropbox/Manoj/MSAN622_DataViz_Sophie/data/gdp_per_capita_2012.csv',
header=TRUE)
gdpall <- melt(gdpall, id=c("Country"))
gdpall$variable <- gsub("X","", gdpall$variable)
colnames(gdpall)[3] <- "gdppc"
merged <- merge(merged, gdpall, by=c("Country", "variable"), all.x=TRUE)
colnames(merged)[2] <- "Year"
colnames(merged)[1] <- "country"
all_data <- merge(all_data, merged, by=c('country', 'Year'), all.x=TRUE)
all_data$country <- as.factor(all_data$country)
all_data$Year <- as.numeric(all_data$Year)
all_data$totalpop <- all_data$femPop + all_data$malePop
all_data$popdens <- all_data$totalpop / all_data$areaSqkm
all_data$fmr <- all_data$femPop / all_data$malePop
all_data$totgdpb <- all_data$gdppc * all_data$totalpop / 1000000000
write.csv(all_data, file='all_data.csv', row.names=FALSE)
|
"SI" "VC" "IN" "CO" "BD" "MR" "VP" "FW" "PCm" "DS" "AR" "LN" "SS" "CD" "CA"
1.00 0.74 0.64 0.71 0.49 0.51 0.44 0.53 0.44 0.48 0.54 0.45 0.35 0.41 0.23
0.74 1.00 0.73 0.74 0.45 0.51 0.42 0.53 0.39 0.50 0.57 0.48 0.34 0.41 0.24
0.64 0.73 1.00 0.66 0.44 0.49 0.43 0.51 0.41 0.43 0.57 0.43 0.34 0.34 0.22
0.71 0.74 0.66 1.00 0.44 0.49 0.43 0.53 0.40 0.48 0.55 0.47 0.32 0.39 0.21
0.49 0.45 0.44 0.44 1.00 0.54 0.64 0.56 0.49 0.45 0.50 0.42 0.41 0.40 0.34
0.51 0.51 0.49 0.49 0.54 1.00 0.53 0.57 0.42 0.47 0.52 0.45 0.39 0.45 0.26
0.44 0.42 0.43 0.43 0.64 0.53 1.00 0.58 0.48 0.40 0.48 0.41 0.38 0.37 0.32
0.53 0.53 0.51 0.53 0.56 0.57 0.58 1.00 0.41 0.50 0.61 0.48 0.34 0.36 0.29
0.44 0.39 0.41 0.40 0.49 0.42 0.48 0.41 1.00 0.39 0.37 0.37 0.41 0.38 0.33
0.48 0.50 0.43 0.48 0.45 0.47 0.40 0.50 0.39 1.00 0.60 0.69 0.40 0.45 0.34
0.54 0.57 0.57 0.55 0.50 0.52 0.48 0.61 0.37 0.60 1.00 0.46 0.37 0.43 0.31
0.45 0.48 0.43 0.47 0.42 0.45 0.41 0.48 0.37 0.69 0.46 1.00 0.37 0.38 0.30
0.35 0.34 0.34 0.32 0.41 0.39 0.38 0.34 0.41 0.40 0.37 0.37 1.00 0.65 0.46
0.41 0.41 0.34 0.39 0.40 0.45 0.37 0.36 0.38 0.45 0.43 0.38 0.65 1.00 0.42
0.23 0.24 0.22 0.21 0.34 0.26 0.32 0.29 0.33 0.34 0.31 0.30 0.46 0.42 1.00
|
/WAISData.R
|
no_license
|
KJKan/nwsem
|
R
| false
| false
| 1,221
|
r
|
"SI" "VC" "IN" "CO" "BD" "MR" "VP" "FW" "PCm" "DS" "AR" "LN" "SS" "CD" "CA"
1.00 0.74 0.64 0.71 0.49 0.51 0.44 0.53 0.44 0.48 0.54 0.45 0.35 0.41 0.23
0.74 1.00 0.73 0.74 0.45 0.51 0.42 0.53 0.39 0.50 0.57 0.48 0.34 0.41 0.24
0.64 0.73 1.00 0.66 0.44 0.49 0.43 0.51 0.41 0.43 0.57 0.43 0.34 0.34 0.22
0.71 0.74 0.66 1.00 0.44 0.49 0.43 0.53 0.40 0.48 0.55 0.47 0.32 0.39 0.21
0.49 0.45 0.44 0.44 1.00 0.54 0.64 0.56 0.49 0.45 0.50 0.42 0.41 0.40 0.34
0.51 0.51 0.49 0.49 0.54 1.00 0.53 0.57 0.42 0.47 0.52 0.45 0.39 0.45 0.26
0.44 0.42 0.43 0.43 0.64 0.53 1.00 0.58 0.48 0.40 0.48 0.41 0.38 0.37 0.32
0.53 0.53 0.51 0.53 0.56 0.57 0.58 1.00 0.41 0.50 0.61 0.48 0.34 0.36 0.29
0.44 0.39 0.41 0.40 0.49 0.42 0.48 0.41 1.00 0.39 0.37 0.37 0.41 0.38 0.33
0.48 0.50 0.43 0.48 0.45 0.47 0.40 0.50 0.39 1.00 0.60 0.69 0.40 0.45 0.34
0.54 0.57 0.57 0.55 0.50 0.52 0.48 0.61 0.37 0.60 1.00 0.46 0.37 0.43 0.31
0.45 0.48 0.43 0.47 0.42 0.45 0.41 0.48 0.37 0.69 0.46 1.00 0.37 0.38 0.30
0.35 0.34 0.34 0.32 0.41 0.39 0.38 0.34 0.41 0.40 0.37 0.37 1.00 0.65 0.46
0.41 0.41 0.34 0.39 0.40 0.45 0.37 0.36 0.38 0.45 0.43 0.38 0.65 1.00 0.42
0.23 0.24 0.22 0.21 0.34 0.26 0.32 0.29 0.33 0.34 0.31 0.30 0.46 0.42 1.00
|
###############
library(ChemmineR)
library(hwriter)
library(parallel)
###############
#data(apset)
#data(sdfsample)
#sdfset<-sdfsample
#smiset<-sdf2smiles(sdfset)
apset<-sdf2ap(sdfset)
#fpset<-desc2fp(apset)
##
naebors<-8
nnm <- nearestNeighbors(apset,numNbrs=naebors)
###############
png<-list.files(pattern="smi.png",recursive=F)
###############
#himg=hwriteImage(sort(png), table=FALSE)
#DrawList<-matrix(paste(nnm$names, ".smi.png", sep="")[matrix(nnm$index, byrow=T)])
#himg=hwriteImage(matrix(paste(nnm$names, ".smi.png", sep="")[matrix(nnm$index, byrow=T)]), table=FALSE)
#himg=hwriteImage(matrix(paste(nnm$names[matrix(nnm$index, byrow=F)], ".smi.png", sep="")), table=FALSE)
Draw<-cbind(paste(nnm$names[nnm$index[,1]], ".smi.png", sep=""), paste(nnm$names[nnm$index[,2]], ".smi.png", sep=""), paste(nnm$names[nnm$index[,3]], ".smi.png", sep=""), paste(nnm$names[nnm$index[,4]], ".smi.png", sep=""),paste(nnm$names[nnm$index[,5]], ".smi.png", sep=""),paste(nnm$names[nnm$index[,6]], ".smi.png", sep=""),paste(nnm$names[nnm$index[,7]], ".smi.png", sep=""),paste(nnm$names[nnm$index[,8]], ".smi.png", sep=""))
###################################
himg=hwriteImage(Draw, link=paste("file:///home/debisbad/Desktop/NewGit/hwriter4ChemmineR/DataBlockDump/WorkHere/", Draw, ".html", sep=""), table=FALSE)
###################################
#mat=cbind(1:length(png), substring(sort(names(as.list(as.character(smiset)))),1,25), himg, himg)# as.character(smiset))
mat=cbind(1:length(png), nnm$names, himg)
##
colnames(mat)=c('#', 'name', 'Query compound', 'nnm1', 'nnm2','nnm3', 'nnm4','nnm5', 'nnm6','nnm7')
##
hwrite(mat, 'test.html', br=TRUE, center=TRUE, row.bgcolor=list(Species=c('#ffaacc', '#ff88aa', '#ff6688')), col.bgcolor='#ffffaa', row.style=list(Species='text-align:center'))
##############
#Look at the html
##############
##############
##############
#######################
homepage<-paste(nnm$names[nnm$index[,1]], ".smi.png", sep="")
name<-nnm$names[nnm$index[,1]]
WriteIt<-function(a){
#######################
p=openPage(paste(homepage[a], ".html", sep=""))
hwrite(paste("ID", name[a], sep=""), p, br=TRUE)
hwriteImage(homepage[a], p, br=TRUE)
hwrite('',p, br=TRUE)
hwrite(blockmatrix <-gsub("\\__", "", datablock2ma(datablocklist=datablock(sdfset[a]))), p)
closePage(p)
#######################
}
a<-1:length(name)
mclapply(a, WriteIt, mc.cores=4)
#img=hwriteImage('iris3.jpg', center=TRUE)
#cap=hwrite(c('Plantae', hwrite('Monocots', link='http://mc.org'), 'Iris'))
#hwrite(c(img, cap), 'test.html', dim=c(2,1), center=TRUE)
|
/Figure3/ToxCast_nnm/WorkHere/hwriteMore.R
|
permissive
|
andrewdefries/ToxCast
|
R
| false
| false
| 2,554
|
r
|
###############
library(ChemmineR)
library(hwriter)
library(parallel)
###############
#data(apset)
#data(sdfsample)
#sdfset<-sdfsample
#smiset<-sdf2smiles(sdfset)
apset<-sdf2ap(sdfset)
#fpset<-desc2fp(apset)
##
naebors<-8
nnm <- nearestNeighbors(apset,numNbrs=naebors)
###############
png<-list.files(pattern="smi.png",recursive=F)
###############
#himg=hwriteImage(sort(png), table=FALSE)
#DrawList<-matrix(paste(nnm$names, ".smi.png", sep="")[matrix(nnm$index, byrow=T)])
#himg=hwriteImage(matrix(paste(nnm$names, ".smi.png", sep="")[matrix(nnm$index, byrow=T)]), table=FALSE)
#himg=hwriteImage(matrix(paste(nnm$names[matrix(nnm$index, byrow=F)], ".smi.png", sep="")), table=FALSE)
Draw<-cbind(paste(nnm$names[nnm$index[,1]], ".smi.png", sep=""), paste(nnm$names[nnm$index[,2]], ".smi.png", sep=""), paste(nnm$names[nnm$index[,3]], ".smi.png", sep=""), paste(nnm$names[nnm$index[,4]], ".smi.png", sep=""),paste(nnm$names[nnm$index[,5]], ".smi.png", sep=""),paste(nnm$names[nnm$index[,6]], ".smi.png", sep=""),paste(nnm$names[nnm$index[,7]], ".smi.png", sep=""),paste(nnm$names[nnm$index[,8]], ".smi.png", sep=""))
###################################
himg=hwriteImage(Draw, link=paste("file:///home/debisbad/Desktop/NewGit/hwriter4ChemmineR/DataBlockDump/WorkHere/", Draw, ".html", sep=""), table=FALSE)
###################################
#mat=cbind(1:length(png), substring(sort(names(as.list(as.character(smiset)))),1,25), himg, himg)# as.character(smiset))
mat=cbind(1:length(png), nnm$names, himg)
##
colnames(mat)=c('#', 'name', 'Query compound', 'nnm1', 'nnm2','nnm3', 'nnm4','nnm5', 'nnm6','nnm7')
##
hwrite(mat, 'test.html', br=TRUE, center=TRUE, row.bgcolor=list(Species=c('#ffaacc', '#ff88aa', '#ff6688')), col.bgcolor='#ffffaa', row.style=list(Species='text-align:center'))
##############
#Look at the html
##############
##############
##############
#######################
homepage<-paste(nnm$names[nnm$index[,1]], ".smi.png", sep="")
name<-nnm$names[nnm$index[,1]]
WriteIt<-function(a){
#######################
p=openPage(paste(homepage[a], ".html", sep=""))
hwrite(paste("ID", name[a], sep=""), p, br=TRUE)
hwriteImage(homepage[a], p, br=TRUE)
hwrite('',p, br=TRUE)
hwrite(blockmatrix <-gsub("\\__", "", datablock2ma(datablocklist=datablock(sdfset[a]))), p)
closePage(p)
#######################
}
a<-1:length(name)
mclapply(a, WriteIt, mc.cores=4)
#img=hwriteImage('iris3.jpg', center=TRUE)
#cap=hwrite(c('Plantae', hwrite('Monocots', link='http://mc.org'), 'Iris'))
#hwrite(c(img, cap), 'test.html', dim=c(2,1), center=TRUE)
|
testlist <- list(mu = 5.43230922490124e-311, var = 0)
result <- do.call(metafolio:::est_beta_params,testlist)
str(result)
|
/metafolio/inst/testfiles/est_beta_params/libFuzzer_est_beta_params/est_beta_params_valgrind_files/1612988126-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 121
|
r
|
testlist <- list(mu = 5.43230922490124e-311, var = 0)
result <- do.call(metafolio:::est_beta_params,testlist)
str(result)
|
library(sp)
function(points_with_cluster_id, subject){
# Get CHULL for each cluster of points
get_chull_poly <- function(points_to_chull){
chull_coords <- points_to_chull[chull(points_to_chull),]
polys <- Polygons(list(Polygon(chull_coords)), "ID")
}
chull_polys <- by(st_coordinates(subject[points_with_cluster_id$custom_id,]),
points_with_cluster_id$cluster_id, get_chull_poly)
# Force chull_polys to be a list class not by class
class(chull_polys) <- "list"
for (i in 1:length(chull_polys)){
slot(chull_polys[[i]], "ID") = as.character(i)
}
SpatialPolygonsDataFrame(SpatialPolygons(chull_polys),
data = data.frame(cluster_id = 1:length(chull_polys)))
}
|
/function/get_cluster_chulls.R
|
permissive
|
disarm-platform/fn-dbscan-clusterer
|
R
| false
| false
| 865
|
r
|
library(sp)
function(points_with_cluster_id, subject){
# Get CHULL for each cluster of points
get_chull_poly <- function(points_to_chull){
chull_coords <- points_to_chull[chull(points_to_chull),]
polys <- Polygons(list(Polygon(chull_coords)), "ID")
}
chull_polys <- by(st_coordinates(subject[points_with_cluster_id$custom_id,]),
points_with_cluster_id$cluster_id, get_chull_poly)
# Force chull_polys to be a list class not by class
class(chull_polys) <- "list"
for (i in 1:length(chull_polys)){
slot(chull_polys[[i]], "ID") = as.character(i)
}
SpatialPolygonsDataFrame(SpatialPolygons(chull_polys),
data = data.frame(cluster_id = 1:length(chull_polys)))
}
|
#' @include globals.R
NULL
#' Create a logger
#'
#' The logger emits log events in a format that is
#' consistent with [`logga`](https://github.com/stencila/logga):
#' newline-delimited JSON streamed to `stderr`, with the `tag`,
#' a `message` string and an integer `level`:
#'
#' - 0: error
#' - 1: warning
#' - 2: info
#' - 3: debug
#'
#' This consistency allows for log events to be merged with
#' those from other Stencila executors, e.g. Encoda, Pyla
#' for easier cross application and cross language tracing
#' and debugging.
#'
#' @param tag The tag for all log events emitted
logger <- function(tag) {
log_event <- function(level, message, stack) {
data <- list(
tag = tag,
time = as.POSIXlt(Sys.time(), "UTC"),
level = level,
message = message
)
if (!missing(stack)) data["stack"] <- stack
handler <- globals$log_handler
if (!is.function(handler)) handler <- default_log_handler
handler(data)
}
list(
error = function(message, stack) log_event(0, message, stack),
warn = function(message) log_event(1, message),
info = function(message) log_event(2, message),
debug = function(message) log_event(3, message)
)
}
#' Get the log handler
get_log_handler <- function() {
globals$log_handler
}
#' Set the log handler
#'
#' @param handler The log event handler
set_log_handler <- function(handler) {
globals$log_handler <- handler
}
#' Default log handler
#'
#' Writes log data as JSON to stderr.
#' Replace this
#'
#' @param data The log event data to handle
default_log_handler <- function(data) {
json <- jsonlite::toJSON(data, auto_unbox = TRUE, force = TRUE)
dest <- stderr()
write(json, dest)
flush(dest)
}
set_log_handler(default_log_handler)
|
/R/logger.R
|
permissive
|
stencila/rasta
|
R
| false
| false
| 1,743
|
r
|
#' @include globals.R
NULL
#' Create a logger
#'
#' The logger emits log events in a format that is
#' consistent with [`logga`](https://github.com/stencila/logga):
#' newline-delimited JSON streamed to `stderr`, with the `tag`,
#' a `message` string and an integer `level`:
#'
#' - 0: error
#' - 1: warning
#' - 2: info
#' - 3: debug
#'
#' This consistency allows for log events to be merged with
#' those from other Stencila executors, e.g. Encoda, Pyla
#' for easier cross application and cross language tracing
#' and debugging.
#'
#' @param tag The tag for all log events emitted
logger <- function(tag) {
log_event <- function(level, message, stack) {
data <- list(
tag = tag,
time = as.POSIXlt(Sys.time(), "UTC"),
level = level,
message = message
)
if (!missing(stack)) data["stack"] <- stack
handler <- globals$log_handler
if (!is.function(handler)) handler <- default_log_handler
handler(data)
}
list(
error = function(message, stack) log_event(0, message, stack),
warn = function(message) log_event(1, message),
info = function(message) log_event(2, message),
debug = function(message) log_event(3, message)
)
}
#' Get the log handler
get_log_handler <- function() {
globals$log_handler
}
#' Set the log handler
#'
#' @param handler The log event handler
set_log_handler <- function(handler) {
globals$log_handler <- handler
}
#' Default log handler
#'
#' Writes log data as JSON to stderr.
#' Replace this
#'
#' @param data The log event data to handle
default_log_handler <- function(data) {
json <- jsonlite::toJSON(data, auto_unbox = TRUE, force = TRUE)
dest <- stderr()
write(json, dest)
flush(dest)
}
set_log_handler(default_log_handler)
|
#' Load and select auxiliary plot trees-VRI specific
#'
#' @description This function loads and selects auxiliary data (\code{vi_i}, cardi) based on cluster/plot header.
#'
#'
#' @param clusterplotHeader data.table, Cluster and plot level attributes, an output from \code{\link{VRIInit_clusterplot}}.
#'
#' @param dataSourcePath character, Specifies the path that directs to the VRI original data soruce, i.e.,
#' \code{//Mayhem/GIS_TIB/RDW/RDW_Data2/Work_Areas/VRI_ASCII_PROD/vri_sa}.
#'
#'
#' @return A data table that contains auxiliary plot tree data.
#'
#'
#' @importFrom data.table data.table ':=' set rbindlist setnames setkey
#' @importFrom dplyr '%>%'
#' @importFrom FAIBBase merge_dupUpdate PHFCalculator
#'
#' @export
#' @docType methods
#' @rdname VRIInit_auxTree
#'
#' @author Yong Luo
VRIInit_auxTree<- function(clusterplotHeader,
dataSourcePath){
vi_i <- readRDS(file.path(dataSourcePath, "vi_i.rds")) %>% data.table
names(vi_i) <- toupper(names(vi_i))
clusterplotHeader[, clusterplot := paste(CLSTR_ID, PLOT, sep = "_")]
vi_i[, clusterplot := paste(CLSTR_ID, PLOT, sep = "_")]
vi_i <- vi_i[clusterplot %in% clusterplotHeader$clusterplot,]
if(nrow(vi_i) > 0){
vi_i <- unique(vi_i, by = c("CLSTR_ID", "PLOT", "TREE_NO"))
vi_i[is.na(LV_D), LV_D := "L"]
vi_i[, TREE_WT := 1]
vi_i[DBH != 0, BA_TREE := pi * ((DBH/200)^2)]
vi_i <- FAIBBase::merge_dupUpdate(vi_i, clusterplotHeader[, .(clusterplot, SAMP_TYP,
BLOWUP, PLOT_WT,
BEC_ZONE,
BEC_SBZ)],
by = "clusterplot", all.x = TRUE)
vi_i[, SPECIES_ORG := SPECIES]
vi_i[, SPECIES := speciesCorrection(SPECIES,
BEC_ZONE,
BEC_SBZ)]
vi_i[, PHF_TREE := FAIBBase::PHFCalculator(sampleType = SAMP_TYP, blowUp = BLOWUP, treeWeight = TREE_WT,
plotWeight = PLOT_WT, treeBasalArea = BA_TREE)]
return(vi_i[,.(CLSTR_ID, PLOT, TREE_NO, SPECIES_ORG, SPECIES,
DBH, BA_TREE,
PHF_TREE, LV_D)])
} else {
return(vi_i[,.(CLSTR_ID, PLOT, SPECIES, TREE_NO)])
}
## please note that for auxiliary plots, they are part of VRI design, therefore, there no small tree plot (DBH<9),
## no need for adjusting tree per ha factor
}
|
/R/VRIInit_auxTree.R
|
permissive
|
bcgov/FAIBCompiler
|
R
| false
| false
| 2,560
|
r
|
#' Load and select auxiliary plot trees-VRI specific
#'
#' @description This function loads and selects auxiliary data (\code{vi_i}, cardi) based on cluster/plot header.
#'
#'
#' @param clusterplotHeader data.table, Cluster and plot level attributes, an output from \code{\link{VRIInit_clusterplot}}.
#'
#' @param dataSourcePath character, Specifies the path that directs to the VRI original data soruce, i.e.,
#' \code{//Mayhem/GIS_TIB/RDW/RDW_Data2/Work_Areas/VRI_ASCII_PROD/vri_sa}.
#'
#'
#' @return A data table that contains auxiliary plot tree data.
#'
#'
#' @importFrom data.table data.table ':=' set rbindlist setnames setkey
#' @importFrom dplyr '%>%'
#' @importFrom FAIBBase merge_dupUpdate PHFCalculator
#'
#' @export
#' @docType methods
#' @rdname VRIInit_auxTree
#'
#' @author Yong Luo
VRIInit_auxTree<- function(clusterplotHeader,
dataSourcePath){
vi_i <- readRDS(file.path(dataSourcePath, "vi_i.rds")) %>% data.table
names(vi_i) <- toupper(names(vi_i))
clusterplotHeader[, clusterplot := paste(CLSTR_ID, PLOT, sep = "_")]
vi_i[, clusterplot := paste(CLSTR_ID, PLOT, sep = "_")]
vi_i <- vi_i[clusterplot %in% clusterplotHeader$clusterplot,]
if(nrow(vi_i) > 0){
vi_i <- unique(vi_i, by = c("CLSTR_ID", "PLOT", "TREE_NO"))
vi_i[is.na(LV_D), LV_D := "L"]
vi_i[, TREE_WT := 1]
vi_i[DBH != 0, BA_TREE := pi * ((DBH/200)^2)]
vi_i <- FAIBBase::merge_dupUpdate(vi_i, clusterplotHeader[, .(clusterplot, SAMP_TYP,
BLOWUP, PLOT_WT,
BEC_ZONE,
BEC_SBZ)],
by = "clusterplot", all.x = TRUE)
vi_i[, SPECIES_ORG := SPECIES]
vi_i[, SPECIES := speciesCorrection(SPECIES,
BEC_ZONE,
BEC_SBZ)]
vi_i[, PHF_TREE := FAIBBase::PHFCalculator(sampleType = SAMP_TYP, blowUp = BLOWUP, treeWeight = TREE_WT,
plotWeight = PLOT_WT, treeBasalArea = BA_TREE)]
return(vi_i[,.(CLSTR_ID, PLOT, TREE_NO, SPECIES_ORG, SPECIES,
DBH, BA_TREE,
PHF_TREE, LV_D)])
} else {
return(vi_i[,.(CLSTR_ID, PLOT, SPECIES, TREE_NO)])
}
## please note that for auxiliary plots, they are part of VRI design, therefore, there no small tree plot (DBH<9),
## no need for adjusting tree per ha factor
}
|
run_analysis <- function() {
## Assume that this script is run in the following directory of the
## unzipped files "UCI HAR Dataset"
## load libraries
library(dplyr)
library(reshape2)
## read in test data
test_data <- read.table("./test/X_test.txt", header=F)
test_subject <- read.table("./test/subject_test.txt",header=F)
test_activity <- read.table("./test/y_test.txt", header=F)
## extract only the columns that has to do with mean. Find this
## column using the file "features.txt"
feature <- read.table("features.txt",header=F)
## extract the rows where the word "mean" appears for mean values
featureC <- as.character(feature$V2)
with_mean <- grep("mean",featureC)
## extract the rows where the word "std" appears for standard deviation
## values
with_std <- grep("std",featureC)
## the columns that are of interest to us, mean and standard deviation
## values
columns <- c(with_mean,with_std)
## extract the columns of interest from "./test/X_test.txt"
subset_test_data <- test_data[columns]
## extract description name of the columns of interest to us
## in "feature.txt"
names <- extract_names(featureC, columns)
## Use the names to label the columns so that it is easy to read
colnames(subset_test_data) <- names
## change the column name of subject id.
## Subject id is stored in "./test/subject_test.txt"
colnames(test_subject) <- c("subjectId")
## change the column name of activity id.
## Subject id is stored in "./test/y_test.txt"
colnames(test_activity) <- c("activity")
## merge test_data with subjectId, activity
merged_test_data <- cbind(test_subject, test_activity, subset_test_data)
## read in train data
train_data <- read.table("./train/X_train.txt", header=F)
train_subject <- read.table("./train/subject_train.txt",header=F)
train_activity <- read.table("./train/y_train.txt", header=F)
## extract the columns of interest from "./train/X_test.txt"
subset_train_data <- train_data[columns]
## Use the names to label the columns so that it is easy to read
colnames(subset_train_data) <- names
## change the column name of subject id.
## Subject id is stored in "./train/subject_train.txt"
colnames(train_subject) <- c("subjectId")
## change the column name of activity id.
## Subject id is stored in "./train/y_train.txt"
colnames(train_activity) <- c("activity")
## merge train_data with subjectId, activity
merged_train_data <- cbind(train_subject, train_activity, subset_train_data)
## merge the selected test data to train data
cleaned_data <- rbind(merged_test_data, merged_train_data)
## group data by subjectId, and then activity
cleaned_data <- arrange(cleaned_data, subjectId, activity)
## change the activity to a description name
cleaned_data$activity[cleaned_data$activity == 1] <- "walking"
cleaned_data$activity[cleaned_data$activity == 2] <- "walking_upstairs"
cleaned_data$activity[cleaned_data$activity == 3] <- "walking_downstairs"
cleaned_data$activity[cleaned_data$activity == 4] <- "sitting"
cleaned_data$activity[cleaned_data$activity == 5] <- "standing"
cleaned_data$activity[cleaned_data$activity == 6] <- "laying"
## use "reshape2" library to melt the data
melted_data <- melt(cleaned_data, id=c("subjectId","activity"))
## use dcast to get the mean of each feature
## here we assume the long format
final_data <- dcast(melted_data, subjectId + activity ~ variable, mean)
## write to a text file that is assumed to be created in the same directory
write.table(final_data, "tidyData.txt",row.name=FALSE)
}
extract_names <- function(data,columns) {
## this function will extract the description name of the feature
## based on the input columns that specifies the column name to be
## extracted from input data
data2 = c(character())
for (i in 1:length(columns))
{
data2[i] = data[columns[i]]
}
return (data2)
}
|
/run_analysis.R
|
no_license
|
bbqsatay/GettingAndCleaningDataRepo
|
R
| false
| false
| 4,060
|
r
|
run_analysis <- function() {
## Assume that this script is run in the following directory of the
## unzipped files "UCI HAR Dataset"
## load libraries
library(dplyr)
library(reshape2)
## read in test data
test_data <- read.table("./test/X_test.txt", header=F)
test_subject <- read.table("./test/subject_test.txt",header=F)
test_activity <- read.table("./test/y_test.txt", header=F)
## extract only the columns that has to do with mean. Find this
## column using the file "features.txt"
feature <- read.table("features.txt",header=F)
## extract the rows where the word "mean" appears for mean values
featureC <- as.character(feature$V2)
with_mean <- grep("mean",featureC)
## extract the rows where the word "std" appears for standard deviation
## values
with_std <- grep("std",featureC)
## the columns that are of interest to us, mean and standard deviation
## values
columns <- c(with_mean,with_std)
## extract the columns of interest from "./test/X_test.txt"
subset_test_data <- test_data[columns]
## extract description name of the columns of interest to us
## in "feature.txt"
names <- extract_names(featureC, columns)
## Use the names to label the columns so that it is easy to read
colnames(subset_test_data) <- names
## change the column name of subject id.
## Subject id is stored in "./test/subject_test.txt"
colnames(test_subject) <- c("subjectId")
## change the column name of activity id.
## Subject id is stored in "./test/y_test.txt"
colnames(test_activity) <- c("activity")
## merge test_data with subjectId, activity
merged_test_data <- cbind(test_subject, test_activity, subset_test_data)
## read in train data
train_data <- read.table("./train/X_train.txt", header=F)
train_subject <- read.table("./train/subject_train.txt",header=F)
train_activity <- read.table("./train/y_train.txt", header=F)
## extract the columns of interest from "./train/X_test.txt"
subset_train_data <- train_data[columns]
## Use the names to label the columns so that it is easy to read
colnames(subset_train_data) <- names
## change the column name of subject id.
## Subject id is stored in "./train/subject_train.txt"
colnames(train_subject) <- c("subjectId")
## change the column name of activity id.
## Subject id is stored in "./train/y_train.txt"
colnames(train_activity) <- c("activity")
## merge train_data with subjectId, activity
merged_train_data <- cbind(train_subject, train_activity, subset_train_data)
## merge the selected test data to train data
cleaned_data <- rbind(merged_test_data, merged_train_data)
## group data by subjectId, and then activity
cleaned_data <- arrange(cleaned_data, subjectId, activity)
## change the activity to a description name
cleaned_data$activity[cleaned_data$activity == 1] <- "walking"
cleaned_data$activity[cleaned_data$activity == 2] <- "walking_upstairs"
cleaned_data$activity[cleaned_data$activity == 3] <- "walking_downstairs"
cleaned_data$activity[cleaned_data$activity == 4] <- "sitting"
cleaned_data$activity[cleaned_data$activity == 5] <- "standing"
cleaned_data$activity[cleaned_data$activity == 6] <- "laying"
## use "reshape2" library to melt the data
melted_data <- melt(cleaned_data, id=c("subjectId","activity"))
## use dcast to get the mean of each feature
## here we assume the long format
final_data <- dcast(melted_data, subjectId + activity ~ variable, mean)
## write to a text file that is assumed to be created in the same directory
write.table(final_data, "tidyData.txt",row.name=FALSE)
}
extract_names <- function(data,columns) {
## this function will extract the description name of the feature
## based on the input columns that specifies the column name to be
## extracted from input data
data2 = c(character())
for (i in 1:length(columns))
{
data2[i] = data[columns[i]]
}
return (data2)
}
|
test_that("HostHttpServer$stop+start", {
s1 = HostHttpServer$new(NULL)
s2 = HostHttpServer$new(NULL)
expect_equal(s1$url, NULL)
s1$start()
expect_true(str_detect(s1$url, '^http://127.0.0.1'))
s2$start()
p1 <- as.integer(str_match(s1$url, '^http://127.0.0.1:(\\d+)')[1, 2])
p2 <- as.integer(str_match(s2$url, '^http://127.0.0.1:(\\d+)')[1, 2])
expect_equal(p2, p1+10)
s2$stop()
# Unfortunately this timesout here. But will work from a separate R process.
#r = GET(s$origin, timeout(10))
s1$stop()
expect_equal(s1$url, NULL)
})
test_that("HostHttpServer.route", {
s = HostHttpServer$new(NULL)
expect_equal(s$route('OPTIONS', NULL), list(s$options))
expect_equal(s$route('GET', '/'), list(s$home))
expect_equal(s$route('GET', '/static/some/file.js'), list(s$static, 'some/file.js'))
expect_equal(s$route('GET', '/favicon.ico'), list(s$static, 'favicon.ico'))
expect_equal(s$route('POST', '/type'), list(s$post, 'type'))
expect_equal(s$route('GET', '/id'), list(s$get, 'id'))
expect_equal(s$route('PUT', '/id!method'), list(s$put, 'id', 'method'))
expect_equal(s$route('DELETE', '/id'), list(s$delete, 'id'))
})
test_that("HostHttpServer.handle", {
s = HostHttpServer$new(host)
r <- s$handle(list(
PATH_INFO = '/',
REQUEST_METHOD = 'GET',
HTTP_ACCEPT = '',
rook.input = list(read_lines = function() NULL)
))
expect_equal(r$status, 403)
# Authorization using a ticket
r <- s$handle(list(
PATH_INFO = '/',
QUERY_STRING = paste0('?ticket=', s$ticket_create()),
REQUEST_METHOD = 'GET',
HTTP_ACCEPT = '',
rook.input = list(read_lines = function() NULL)
))
expect_equal(r$status, 200)
expect_equal(str_sub(r$headers['Set-Cookie'], 1, 6), 'token=')
token <- str_match(r$headers['Set-Cookie'], 'token=([a-zA-Z0-9]+);')[1,2]
expect_equal(str_sub(r$body, 1, 23), '<!doctype html>\n<html>\n')
# Authorization using a token
r <- s$handle(list(
PATH_INFO = '/',
REQUEST_METHOD = 'GET',
HTTP_ACCEPT = 'application/json',
HTTP_COOKIE = paste0('token=', token),
rook.input = list(read_lines = function() NULL)
))
expect_equal(r$status, 200)
expect_equal(str_sub(r$body, 1, 22), '{"stencila":{"package"')
# Browser-based CORS request
for (origin in c('http://127.0.0.1:2000', 'http://localhost:2010', 'https://open.stenci.la')) {
r <- s$handle(list(
PATH_INFO = '/',
REQUEST_METHOD = 'GET',
HTTP_COOKIE = paste0('token=', token),
HTTP_REFERER = sprintf('%s/some/file/path', origin),
rook.input = list(read_lines = function() NULL)
))
expect_equal(r$headers[['Access-Control-Allow-Origin']], origin)
expect_equal(r$headers[['Access-Control-Allow-Credentials']], 'true')
}
# Browser-based CORS pre-flight request
for (origin in c('http://127.0.0.1:2000', 'http://localhost:2010', 'https://open.stenci.la')) {
r <- s$handle(list(
PATH_INFO = '/',
REQUEST_METHOD = 'OPTIONS',
HTTP_ORIGIN = origin,
rook.input = list(read_lines = function() NULL)
))
expect_equal(r$headers[['Access-Control-Allow-Origin']], origin)
expect_equal(r$headers[['Access-Control-Allow-Credentials']], 'true')
expect_equal(r$headers[['Access-Control-Allow-Methods']], 'GET, POST, PUT, DELETE, OPTIONS')
expect_equal(r$headers[['Access-Control-Max-Age']], '86400')
}
# Browser-based CORS pre-flight request from third party site
r <- s$handle(list(
PATH_INFO = '/',
REQUEST_METHOD = 'OPTIONS',
HTTP_ORIGIN = 'http://evil.hackers.com',
rook.input = list(read_lines = function() NULL)
))
expect_equal(r$headers[['Access-Control-Allow-Origin']], NULL)
})
test_that("HostHttpServer.options", {
s = HostHttpServer$new(host)
r = s$options()
expect_equal(r$status, 200)
expect_equal(r$body, '')
})
test_that("HostHttpServer.home", {
s = HostHttpServer$new(host)
r = s$home(list(headers=list('Accept'='application/json')))
expect_equal(r$status, 200)
expect_equal(fromJSON(r$body)$stencila, host$manifest()$stencila)
r = s$home(list())
expect_equal(r$status, 200)
expect_equal(r$headers[['Content-Type']], 'text/html')
})
test_that("HostHttpServer.static", {
s = HostHttpServer$new(host)
r = s$static(list(), 'logo-name-beta.svg')
expect_equal(r$status, 200)
expect_equal(r$headers[['Content-Type']], 'image/svg+xml')
expect_equal(str_sub(r$body, 1, 54), '<?xml version="1.0" encoding="UTF-8" standalone="no"?>')
r = s$static(list(), 'foo.bar')
expect_equal(r$status, 404)
r = s$static(list(), '../DESCRIPTION')
expect_equal(r$status, 403)
})
test_that("HostHttpServer.post", {
s = HostHttpServer$new(host)
r = s$post(list(), 'RContext')
expect_equal(r$status, 200)
expect_equal(r$headers[['Content-Type']], 'application/json')
})
test_that("HostHttpServer.get", {
s = HostHttpServer$new(host)
r1 = s$post(list(), 'RContext')
r2 = s$get(list(), fromJSON(r1$body))
expect_equal(r2$status, 200)
expect_equal(r2$headers[['Content-Type']], 'application/json')
expect_equal(r2$body, '{}')
})
test_that("HostHttpServer.put", {
s = HostHttpServer$new(host)
r1 = s$post(list(), 'RContext')
id = fromJSON(r1$body)
r2 = s$put(list(body='{"code":"6*7"}'), id, 'executeCode')
expect_equal(r2$status, 200)
expect_equal(r2$headers[['Content-Type']], 'application/json')
expect_equal(fromJSON(r2$body)$value$data, 42)
})
|
/tests/testthat/test-host-http-server.R
|
permissive
|
RaoOfPhysics/r
|
R
| false
| false
| 5,420
|
r
|
test_that("HostHttpServer$stop+start", {
s1 = HostHttpServer$new(NULL)
s2 = HostHttpServer$new(NULL)
expect_equal(s1$url, NULL)
s1$start()
expect_true(str_detect(s1$url, '^http://127.0.0.1'))
s2$start()
p1 <- as.integer(str_match(s1$url, '^http://127.0.0.1:(\\d+)')[1, 2])
p2 <- as.integer(str_match(s2$url, '^http://127.0.0.1:(\\d+)')[1, 2])
expect_equal(p2, p1+10)
s2$stop()
# Unfortunately this timesout here. But will work from a separate R process.
#r = GET(s$origin, timeout(10))
s1$stop()
expect_equal(s1$url, NULL)
})
test_that("HostHttpServer.route", {
s = HostHttpServer$new(NULL)
expect_equal(s$route('OPTIONS', NULL), list(s$options))
expect_equal(s$route('GET', '/'), list(s$home))
expect_equal(s$route('GET', '/static/some/file.js'), list(s$static, 'some/file.js'))
expect_equal(s$route('GET', '/favicon.ico'), list(s$static, 'favicon.ico'))
expect_equal(s$route('POST', '/type'), list(s$post, 'type'))
expect_equal(s$route('GET', '/id'), list(s$get, 'id'))
expect_equal(s$route('PUT', '/id!method'), list(s$put, 'id', 'method'))
expect_equal(s$route('DELETE', '/id'), list(s$delete, 'id'))
})
test_that("HostHttpServer.handle", {
s = HostHttpServer$new(host)
r <- s$handle(list(
PATH_INFO = '/',
REQUEST_METHOD = 'GET',
HTTP_ACCEPT = '',
rook.input = list(read_lines = function() NULL)
))
expect_equal(r$status, 403)
# Authorization using a ticket
r <- s$handle(list(
PATH_INFO = '/',
QUERY_STRING = paste0('?ticket=', s$ticket_create()),
REQUEST_METHOD = 'GET',
HTTP_ACCEPT = '',
rook.input = list(read_lines = function() NULL)
))
expect_equal(r$status, 200)
expect_equal(str_sub(r$headers['Set-Cookie'], 1, 6), 'token=')
token <- str_match(r$headers['Set-Cookie'], 'token=([a-zA-Z0-9]+);')[1,2]
expect_equal(str_sub(r$body, 1, 23), '<!doctype html>\n<html>\n')
# Authorization using a token
r <- s$handle(list(
PATH_INFO = '/',
REQUEST_METHOD = 'GET',
HTTP_ACCEPT = 'application/json',
HTTP_COOKIE = paste0('token=', token),
rook.input = list(read_lines = function() NULL)
))
expect_equal(r$status, 200)
expect_equal(str_sub(r$body, 1, 22), '{"stencila":{"package"')
# Browser-based CORS request
for (origin in c('http://127.0.0.1:2000', 'http://localhost:2010', 'https://open.stenci.la')) {
r <- s$handle(list(
PATH_INFO = '/',
REQUEST_METHOD = 'GET',
HTTP_COOKIE = paste0('token=', token),
HTTP_REFERER = sprintf('%s/some/file/path', origin),
rook.input = list(read_lines = function() NULL)
))
expect_equal(r$headers[['Access-Control-Allow-Origin']], origin)
expect_equal(r$headers[['Access-Control-Allow-Credentials']], 'true')
}
# Browser-based CORS pre-flight request
for (origin in c('http://127.0.0.1:2000', 'http://localhost:2010', 'https://open.stenci.la')) {
r <- s$handle(list(
PATH_INFO = '/',
REQUEST_METHOD = 'OPTIONS',
HTTP_ORIGIN = origin,
rook.input = list(read_lines = function() NULL)
))
expect_equal(r$headers[['Access-Control-Allow-Origin']], origin)
expect_equal(r$headers[['Access-Control-Allow-Credentials']], 'true')
expect_equal(r$headers[['Access-Control-Allow-Methods']], 'GET, POST, PUT, DELETE, OPTIONS')
expect_equal(r$headers[['Access-Control-Max-Age']], '86400')
}
# Browser-based CORS pre-flight request from third party site
r <- s$handle(list(
PATH_INFO = '/',
REQUEST_METHOD = 'OPTIONS',
HTTP_ORIGIN = 'http://evil.hackers.com',
rook.input = list(read_lines = function() NULL)
))
expect_equal(r$headers[['Access-Control-Allow-Origin']], NULL)
})
test_that("HostHttpServer.options", {
s = HostHttpServer$new(host)
r = s$options()
expect_equal(r$status, 200)
expect_equal(r$body, '')
})
test_that("HostHttpServer.home", {
s = HostHttpServer$new(host)
r = s$home(list(headers=list('Accept'='application/json')))
expect_equal(r$status, 200)
expect_equal(fromJSON(r$body)$stencila, host$manifest()$stencila)
r = s$home(list())
expect_equal(r$status, 200)
expect_equal(r$headers[['Content-Type']], 'text/html')
})
test_that("HostHttpServer.static", {
s = HostHttpServer$new(host)
r = s$static(list(), 'logo-name-beta.svg')
expect_equal(r$status, 200)
expect_equal(r$headers[['Content-Type']], 'image/svg+xml')
expect_equal(str_sub(r$body, 1, 54), '<?xml version="1.0" encoding="UTF-8" standalone="no"?>')
r = s$static(list(), 'foo.bar')
expect_equal(r$status, 404)
r = s$static(list(), '../DESCRIPTION')
expect_equal(r$status, 403)
})
test_that("HostHttpServer.post", {
s = HostHttpServer$new(host)
r = s$post(list(), 'RContext')
expect_equal(r$status, 200)
expect_equal(r$headers[['Content-Type']], 'application/json')
})
test_that("HostHttpServer.get", {
s = HostHttpServer$new(host)
r1 = s$post(list(), 'RContext')
r2 = s$get(list(), fromJSON(r1$body))
expect_equal(r2$status, 200)
expect_equal(r2$headers[['Content-Type']], 'application/json')
expect_equal(r2$body, '{}')
})
test_that("HostHttpServer.put", {
s = HostHttpServer$new(host)
r1 = s$post(list(), 'RContext')
id = fromJSON(r1$body)
r2 = s$put(list(body='{"code":"6*7"}'), id, 'executeCode')
expect_equal(r2$status, 200)
expect_equal(r2$headers[['Content-Type']], 'application/json')
expect_equal(fromJSON(r2$body)$value$data, 42)
})
|
context("super")
test_that("it should call the parent method once", {
calls <- integer(0)
function1 <- function() { calls <<- c(calls, 1L) }
local({
function1 <- function() {
calls <<- c(calls, 2L)
super::super()
}
function1()
})
expect_equal(calls, c(2L, 1L))
})
test_that("it should call the parent method twice removed", {
calls <- integer(0)
function1 <- function() { calls <<- c(calls, 1L) }
local({
local({
function1 <- function() {
calls <<- c(calls, 2L)
super::super()
}
function1()
})
})
expect_equal(calls, c(2L, 1L))
})
test_that("it should call the parent method twice removed with another super call", {
calls <- integer(0)
local({
level1 <- TRUE
function1 <- function() {
calls <<- c(calls, 1L)
}
local({
level2 <- TRUE
function1 <- function() {
calls <<- c(calls, 2L)
super::super()
}
local({
level3 <- TRUE
function1 <- function() {
calls <<- c(calls, 3L)
super::super()
}
function1()
})
})
})
expect_equal(calls, c(3L, 2L, 1L))
})
test_that("it calls the parent method in a single example chain", {
calls <- integer(0)
function1 <- function() {
calls <<- c(calls, 1L)
invisible(NULL)
}
local({
function2 <- function() {
function1 <- function() {
calls <<- c(calls, 2L)
super()
}
function3 <- function() {
function1 <- function() {
calls <<- c(calls, 3L)
super()
}
function1()
}
function3()
}
function2()
})
expect_equal(calls, 3L:1L)
})
test_that("it errors when no super method exists", {
not_a_function <- function() { super() }
expect_error(not_a_function(), "No parent function")
})
test_that("it can call with different super arguments", {
calls <- integer(0)
function1 <- function(x) {
calls <<- c(calls, x)
}
function2 <- function() {
function1 <- function(y) {
calls <<- c(calls, y)
super(2)
}
function1(1)
}
function2()
expect_equal(calls, c(1L, 2L))
})
test_that("it can call without executing twice from a non-base call", {
calls <- integer(0)
function1 <- function(x) {
calls <<- c(calls, x)
}
function2 <- function() {
function1 <- function(y) {
calls <<- c(calls, y)
super(2)
}
local({
function1(1)
})
}
function2()
expect_equal(calls, c(1L, 2L))
})
test_that("it can execute a simple local call", {
expect_output({
out <- function(x) print(x)
local({
out <- function(x) { super::super(x) }
out("hi")
})
}, "hi")
})
test_that("it passes on non-standard evaluation", {
expect_output({
out <- function(x) deparse(substitute(x))
local({
out <- function(x) { super::super(x) }
out(hi)
})
}, "hi")
})
test_that("it passes on non-standard evaluation and scope", {
expect_equal({
out <- function(x) list(x, deparse(substitute(x)))
local({
out <- function(x) { super::super(x) }
val <- 1
out(val)
})
}, list(1, "val"))
})
test_that("it passes on non-standard evaluation and scope with tweaks", {
expect_equal({
out <- function(x) list(x, deparse(substitute(x)))
local({
out <- function(x) { super::super(x + 1) }
val <- 1
out(val)
})
}, list(2, "val + 1"))
})
test_that("it is smart about translating NSE through name changes", {
# TODO: (RK) Is this really the correct behavior?
# options(super.debug=T);on.exit(options(super.debug=F))
expect_equal({
out <- function(x) list(x, deparse(substitute(x)))
local({
out <- function(y) { super::super(y) }
val <- 1
out(y = val)
})
}, list(1, "val"))
})
test_that("it is smart about translating NSE through name swaps", {
expect_equal({
out <- function(x, y) list(x, y, deparse(substitute(x)), deparse(substitute(y)))
local({
out <- function(y, x) { super::super(x, y) }
val <- 1
val2 <- 2
out(y = val, x = val2)
})
}, list(2, 1, "val2", "val"))
})
test_that("it is smart about translating NSE through named name swaps", {
expect_equal({
out <- function(x, y) list(x, y, deparse(substitute(x)), deparse(substitute(y)))
local({
out <- function(y, x) { super::super(y = x, x = y) }
val <- 1
val2 <- 2
out(y = val, x = val2)
})
}, list(1, 2, "val", "val2"))
})
test_that("it does not allow get calls in super (for now)", {
get <- function() { cat("hi") }
local({
get <- function() { super::super() }
local({
get <- function() { super::super() }
expect_error(get(), "super::super does not")
})
})
})
|
/tests/testthat/test-super.R
|
no_license
|
robertzk/super
|
R
| false
| false
| 4,799
|
r
|
context("super")
test_that("it should call the parent method once", {
calls <- integer(0)
function1 <- function() { calls <<- c(calls, 1L) }
local({
function1 <- function() {
calls <<- c(calls, 2L)
super::super()
}
function1()
})
expect_equal(calls, c(2L, 1L))
})
test_that("it should call the parent method twice removed", {
calls <- integer(0)
function1 <- function() { calls <<- c(calls, 1L) }
local({
local({
function1 <- function() {
calls <<- c(calls, 2L)
super::super()
}
function1()
})
})
expect_equal(calls, c(2L, 1L))
})
test_that("it should call the parent method twice removed with another super call", {
calls <- integer(0)
local({
level1 <- TRUE
function1 <- function() {
calls <<- c(calls, 1L)
}
local({
level2 <- TRUE
function1 <- function() {
calls <<- c(calls, 2L)
super::super()
}
local({
level3 <- TRUE
function1 <- function() {
calls <<- c(calls, 3L)
super::super()
}
function1()
})
})
})
expect_equal(calls, c(3L, 2L, 1L))
})
test_that("it calls the parent method in a single example chain", {
calls <- integer(0)
function1 <- function() {
calls <<- c(calls, 1L)
invisible(NULL)
}
local({
function2 <- function() {
function1 <- function() {
calls <<- c(calls, 2L)
super()
}
function3 <- function() {
function1 <- function() {
calls <<- c(calls, 3L)
super()
}
function1()
}
function3()
}
function2()
})
expect_equal(calls, 3L:1L)
})
test_that("it errors when no super method exists", {
not_a_function <- function() { super() }
expect_error(not_a_function(), "No parent function")
})
test_that("it can call with different super arguments", {
calls <- integer(0)
function1 <- function(x) {
calls <<- c(calls, x)
}
function2 <- function() {
function1 <- function(y) {
calls <<- c(calls, y)
super(2)
}
function1(1)
}
function2()
expect_equal(calls, c(1L, 2L))
})
test_that("it can call without executing twice from a non-base call", {
calls <- integer(0)
function1 <- function(x) {
calls <<- c(calls, x)
}
function2 <- function() {
function1 <- function(y) {
calls <<- c(calls, y)
super(2)
}
local({
function1(1)
})
}
function2()
expect_equal(calls, c(1L, 2L))
})
test_that("it can execute a simple local call", {
expect_output({
out <- function(x) print(x)
local({
out <- function(x) { super::super(x) }
out("hi")
})
}, "hi")
})
test_that("it passes on non-standard evaluation", {
expect_output({
out <- function(x) deparse(substitute(x))
local({
out <- function(x) { super::super(x) }
out(hi)
})
}, "hi")
})
test_that("it passes on non-standard evaluation and scope", {
expect_equal({
out <- function(x) list(x, deparse(substitute(x)))
local({
out <- function(x) { super::super(x) }
val <- 1
out(val)
})
}, list(1, "val"))
})
test_that("it passes on non-standard evaluation and scope with tweaks", {
expect_equal({
out <- function(x) list(x, deparse(substitute(x)))
local({
out <- function(x) { super::super(x + 1) }
val <- 1
out(val)
})
}, list(2, "val + 1"))
})
test_that("it is smart about translating NSE through name changes", {
# TODO: (RK) Is this really the correct behavior?
# options(super.debug=T);on.exit(options(super.debug=F))
expect_equal({
out <- function(x) list(x, deparse(substitute(x)))
local({
out <- function(y) { super::super(y) }
val <- 1
out(y = val)
})
}, list(1, "val"))
})
test_that("it is smart about translating NSE through name swaps", {
expect_equal({
out <- function(x, y) list(x, y, deparse(substitute(x)), deparse(substitute(y)))
local({
out <- function(y, x) { super::super(x, y) }
val <- 1
val2 <- 2
out(y = val, x = val2)
})
}, list(2, 1, "val2", "val"))
})
test_that("it is smart about translating NSE through named name swaps", {
expect_equal({
out <- function(x, y) list(x, y, deparse(substitute(x)), deparse(substitute(y)))
local({
out <- function(y, x) { super::super(y = x, x = y) }
val <- 1
val2 <- 2
out(y = val, x = val2)
})
}, list(1, 2, "val", "val2"))
})
test_that("it does not allow get calls in super (for now)", {
get <- function() { cat("hi") }
local({
get <- function() { super::super() }
local({
get <- function() { super::super() }
expect_error(get(), "super::super does not")
})
})
})
|
install.packages("dplyr")
install.packages("ggplot2")
install.packages("ggmap")
install.packages("parallel")
install.packages("doParallel")
install.packages("caret")
install.packages("dplyr")
install.packages("randomForest")
library("randomForest")
library("dplyr")
library("caret")
library("doParallel")
library("parallel")
library("dplyr")
library("ggplot2")
library("ggmap")
library("lubridate")
setwd("C:/Users/Lenovo/Desktop/Ubiqum_data/Wi-fi")
library(readr)
trainingData <- read_csv("C:/Users/Lenovo/Desktop/Ubiqum_data/Wi-fi/trainingData.csv")
ValidationData <- read_csv("C:/Users/Lenovo/Desktop/Ubiqum_data/Wi-fi/validationData.csv")
td<-trainingData
vd<-ValidationData
td$TIMESTAMP<- as_datetime(td$TIMESTAMP)
vd$TIMESTAMP<- as_datetime(vd$TIMESTAMP)
str(td)
summary(td[,1:52])
####Changing the data types####
td$FLOOR<-as.factor(td$FLOOR)
td$BUILDINGID<-as.factor(td$BUILDINGID)
td$SPACEID<-as.factor(td$USERID) # 18 different points. Internal ID number to identify the Space (office, corridor, classroom) where the capture was taken,
td$RELATIVEPOSITION<-as.factor(td$RELATIVEPOSITION) # in vs out
td$USERID<-as.factor(td$USERID)
td$PHONEID<-as.factor(td$PHONEID)
#The same for Validation Data:
vd$FLOOR<-as.factor(vd$FLOOR)
vd$BUILDINGID<-as.factor(vd$BUILDINGID)
vd$SPACEID<-as.factor(vd$USERID)
vd$RELATIVEPOSITION<-as.factor(vd$RELATIVEPOSITION)
vd$USERID<-as.factor(vd$USERID)
vd$PHONEID<-as.factor(vd$PHONEID)
####CHANGING THE 100 TO -105 AND REMOVING THE REPEATED ROWS ####
td<-distinct(td)
td[td==100]<--105
vd<-distinct(vd)
vd[vd==100]<--105
#### counting the values which are not equal to 100####
td$count <- apply(td[,1:520], 1, function(x) length(which(x!=-105)))
td$max <- apply(td[,1:520], 1, max)
td$max_2<-apply(td[,1:520],1,function(x) names(td[,1:520])[which(x==max(x))])
td$max3<-apply(td[1:520],1,function(x) names(which.max(x)))
#vd #
vd$count <- apply(vd[,1:520], 1, function(x) length(which(x!=-105)))
vd$max <- apply(vd[,1:520], 1, max)
vd$max_2<-apply(vd[,1:520],1,function(x) names(vd[,1:520])[which(x==max(x))])
vd$max3<-apply(vd[1:520],1,function(x) names(which.max(x)))
#removing the rows ###
td<-subset(td,td$max!=0 & td$count!=0)
td<-subset(td,td$max<=-30 & td$max>=-80 )
#training dataset remove 1 value columns
waps_td<-td[,c(1:520)]
useless_waps<-apply(waps_td, 2, function(x) length(unique(x))==1)
td_new<-td[,-c(which(useless_waps==TRUE))]
waps_vd<-vd[,c(1:520)]
useless_waps_vd<-apply(waps_vd, 2, function(x) length(unique(x))==1)
vd_new<-vd[,-c(which(useless_waps_vd==TRUE))]
#identifying WAPS training
Waps_td_names <- grep("WAP", names(td_new), value = TRUE)
#identifying WAPS VALIDATION
Waps__vd_names <- grep("WAP", names(vd_new), value = TRUE)
Waps_tdvd <- intersect(Waps_td_names, Waps__vd_names)
x <- names(td_new[Waps_td_names]) %in% Waps_tdvd == FALSE
td_new_2 <- td_new[-which(x)]
#remove columns
y <- names(vd_new[Waps__vd_names]) %in% Waps_tdvd == FALSE
vd_new_2 <- vd_new[-which(y)]
####BUILDING PREDICTION ####
detectCores()
clusterF1 <- makeCluster(detectCores()-1)
registerDoParallel(clusterF1)
td_build<- select(td_new_2,-c(TIMESTAMP,USERID,PHONEID,max_2,max,count,SPACEID,FLOOR,
RELATIVEPOSITION,LATITUDE,LONGITUDE,max3))
## Model
set.seed(124)
fitControl <- trainControl(method = "repeatedcv", number=3,repeats = 3,
verboseIter = TRUE, allowParallel = TRUE)
## knn
knnFit <- train(BUILDINGID~.,data = td_build,method = "knn",
metric = "Accuracy",trControl = fitControl,preProcess = c("zv", "center", "scale"))
plot(knnFit)
knnFit
predict.knn <- predict(knnFit ,vd_new_2)
postResample(predict.knn , vd_new_2$BUILDINGID)
ConfusionMatrix<-confusionMatrix(vd_new_2$BUILDINGID , predict.knn)
ConfusionMatrix
save(knnFit, file = "knnFit.rda")
load("knnFit.rda")
rm(ConfusionMatrix)
#svm
SvmFit<-caret::train(BUILDINGID~., data= td_build, method="svmLinear",
trControl=fitControl,preProcess= c("center", "scale"))
SvmFit
predict.svm <- predict(SvmFit ,vd_new_2)
predict.svm2 <- predict(SvmFit ,td_new_2)
postResample(predict.svm , vd_new_2$BUILDINGID)
confusionMatrix(vd_new_2$BUILDINGID , predict.svm)
rm(confusionMatrix)
confusionMatrix
save(SvmFit, file = "SvmFit.rda")
load("SvmFit.rda")
#add prediction column in training dataset
td_new_2$build_prediction<-predict.svm2
vd_new_2$build_prediction<-predict.svm
td_new_2$B_fID<-as.factor(paste(td_new_2$BUILDINGID,td_new_2$FLOOR))
vd_new_2$B_fID<-as.factor(paste(vd_new_2$BUILDINGID,vd_new_2$FLOOR))
####FLOOR ####
td_floor<- select(td_new_2,-c(TIMESTAMP,USERID,PHONEID,max_2,count,SPACEID,BUILDINGID,FLOOR,
RELATIVEPOSITION,LATITUDE,LONGITUDE,max,max3,build_prediction))
#build.0 <- filter(td_floor, build_prediction == 0)
#build.1 <- filter(td_floor, build_prediction == 1)
#build.2 <- filter(td_floor, build_prediction== 2)
#vdbuild.0 <- filter(vd_new_2, build_prediction == 0)
#vdbuild.1 <- filter(vd_new_2, build_prediction == 1)
#vdbuild.2 <- filter(vd_new_2, build_prediction== 2)
#knn
fitControl <- trainControl(method = "repeatedcv", number=3,repeats = 3,
verboseIter = TRUE, allowParallel = TRUE)
#build.0$FLOOR<- factor(build.0$FLOOR)
#vdbuild.0$FLOOR<-factor(vdbuild.0$FLOOR)
## knn
knnFit_floor <- train(B_fID~.,data = td_floor,method = "knn",
metric = "Accuracy",trControl = fitControl,preProcess= c("center", "scale"))
knnFit_floor
#knnFit_floorb0
predict.knn_floor <- predict(knnFit_floor ,vd_new_2)
postResample(predict.knn_floor , vd_new_2$FLOOR)
ConfusionMatrix<-confusionMatrix(vd_new_2$FLOOR , predict.knn_floor)
ConfusionMatrix
save(knnFit_floor, file = "knnFit_floorb0.rda")
load("knnFit_floor.rda")
#svm
SvmFit_floor<-caret::train(B_fID~., data=td_floor, method="svmLinear",
trControl=fitControl,preProcess= c("center", "scale"))
SvmFit_floor
predict.svm_floor <- predict(SvmFit_floor ,vd_new_2)
predict.svm_floor2 <- predict(SvmFit_floor ,td_new_2)
postResample(predict.svm_floor , vd_new_2$B_fID)
confusionMatrix(vd_new_2$B_fID , predict.svm_floor)
rm(confusionMatrix)
save(SvmFit_floor, file = "SvmFit_floor.rda")
td_new_2$floor_prediction<-predict.svm_floor2
vd_new_2$floor_prediction<-predict.svm_floor
# Add dummy variable for BuildingID&floor id
DummyVar <- dummyVars("~BUILDINGID", data = td_new_2, fullRank=T)
DummyVarDF <- data.frame(predict(DummyVar, newdata = td_new_2))
td_new_2<-cbind(td_new_2, DummyVarDF)
DummyVar2 <- dummyVars("~BUILDINGID", data = vd_new_2, fullRank=T)
DummyVarDF2 <- data.frame(predict(DummyVar2, newdata = vd_new_2))
vd_new_2<-cbind(vd_new_2, DummyVarDF2)
#td_floor2<- select(td_new_2,-c(TIMESTAMP,USERID,PHONEID,max_2,count,SPACEID,BUILDINGID,
# RELATIVEPOSITION,LATITUDE,LONGITUDE,max,max3,build_prediction))
#Random Forest
WAP_floor <- grep("WAP", names(td_floor), value=T)
bestmtry<-tuneRF(td_floor[WAP], td_floor$B_fID, ntreeTry=100, stepFactor=2,
improve=0.05,trace=TRUE, plot=T)
system.time(RF_floor<-randomForest(B_fID~.,
data= td_floor,
importance=T,maximize=T,
method="rf", trControl=fitControl,
ntree=100, mtry=52,allowParalel=TRUE))
save(RF_floor, file = "RF_floor.rda")
RF_floor
predict.rf_floor <- predict(RF_floor ,vd_new_2)
predict.rf_floor2 <- predict(RF_floor ,td_new_2)
postResample( predict.rf_floor , vd_new_2$B_fID)
CF_B_fID<-confusionMatrix(vd_new_2$B_fID , predict.rf_floor)
CF_B_fID
#add prediction column in training dataset
td_new_2$floor_prediction<-predict.rf_floor2
vd_new_2$floor_prediction<-predict.rf_floor
####DUMMY FOR FLOOR ####
DummyVar3 <- dummyVars("~FLOOR", data = vd_new_2, fullRank=T)
DummyVarDF3 <- data.frame(predict(DummyVar3, newdata = vd_new_2))
vd_new_2<-cbind(vd_new_2, DummyVarDF3)
DummyVar4 <- dummyVars("~FLOOR", data = td_new_2, fullRank=T)
DummyVarDF4 <- data.frame(predict(DummyVar4, newdata = td_new_2))
td_new_2<-cbind(td_new_2, DummyVarDF4)
#dummy for predicted floors
DummyVar5 <- dummyVars("~floor_prediction", data = td_new_2, fullRank=T)
DummyVarDF5 <- data.frame(predict(DummyVar5, newdata = td_new_2))
td_new_2<-cbind(td_new_2, DummyVarDF5)
DummyVar6 <- dummyVars("~floor_prediction", data = vd_new_2, fullRank=T)
DummyVarDF6 <- data.frame(predict(DummyVar6, newdata = vd_new_2))
vd_new_2<-cbind(vd_new_2, DummyVarDF6)
#LONGITUTDE
td_lon<- select(td_new_2,-c(TIMESTAMP,USERID,PHONEID,max_2,count,SPACEID,floor_prediction,
RELATIVEPOSITION,build_prediction,BUILDINGID,LATITUDE,max,max3,FLOOR,
FLOOR.1,FLOOR.2,FLOOR.3,FLOOR.4,B_fID))
#Random Forest
WAP_lon <- grep("WAP", names(td_lon), value=T)
bestmtry<-tuneRF(td_lon[WAP_lon], td_lon$LONGITUDE, ntreeTry=100, stepFactor=2,
improve=0.05,trace=TRUE, plot=T)
system.time(RF_lon<-randomForest(LONGITUDE~.,
data= td_lon,
importance=T,maximize=T,
method="rf", trControl=fitControl,
ntree=100, mtry=52,allowParalel=TRUE))
save(RF_lon, file = "RF_lon.rda")
RF_lon
predict.rf_lon <- predict(RF_lon ,vd_new_2)
predict.rf_lon2 <- predict(RF_lon ,td_new_2)
postResample( predict.rf_lon , vd_new_2$LONGITUDE)
CF_LON<-confusionMatrix(vd_new_2$LONGITUDE , predict.rf_lon)
##SVM
build0lon <- filter(td_new_2, build_prediction == 0)
build0lon <- filter(td_new_2, build_prediction == 1)
build0lon <- filter(td_new_2, build_prediction== 2)
vdbuild0lon <- filter(vd_new_2, build_prediction == 0)
vdbuild1lon <- filter(vd_new_2, build_prediction == 1)
vdbuild2lon <- filter(vd_new_2, build_prediction== 2)
#SVM FOR BUILDING 0
td_lon_b0<- select(build0lon,-c(TIMESTAMP,USERID,PHONEID,max_2,count,SPACEID,
RELATIVEPOSITION,BUILDINGID,LATITUDE,max,max3,FLOOR,
FLOOR.1,FLOOR.2,FLOOR.3,FLOOR.4,B_fID,floor_prediction,build_prediction))
#SVM
SvmFit_lon_b0<-caret::train(LONGITUDE~., data= td_lon_b0, method="svmLinear",
trControl=fitControl,preProcess= c("center", "scale"))
options(scipen=999)
SvmFit_lon_b0
predict.svm_lonb0 <- predict(SvmFit_lon_b0 ,vdbuild0lon)
predict.svm_lon_b02 <- predict(SvmFit_lon_bo ,build0lon)
postResample(predict.svm_lonb0 , vdbuild0lon$LONGITUDE)
save(SvmFit_lon, file = "SvmFit_lon_b0.rda")
####LATITUDE ####
td_lat<- select(td_new_2,-c(TIMESTAMP,USERID,PHONEID,max_2,count,SPACEID,floor_prediction,
RELATIVEPOSITION,build_prediction,BUILDINGID,LONGITUDE,max,max3,FLOOR))
#Random Forest
WAP <- grep("WAP", names(td_lat), value=T)
bestmtry<-tuneRF(td_lat[WAP], td_lat$LONGITUDE, ntreeTry=100, stepFactor=2,
improve=0.05,trace=TRUE, plot=T)
system.time(RF_lat<-randomForest(LATITUDE~.,
data= td_lat,
importance=T,maximize=T,
method="rf", trControl=fitControl,
ntree=100, mtry=52,allowParalel=TRUE))
save(RF_lat, file = "RF_lat.rda")
RF_lat
predict.rf_lat <- predict(RF_lat ,vd_new_2)
postResample( predict.rf_lat , vd_new_2$LATITUDE)
####checking the errors of building prediction ###
td_err<- td_new_2 %>% select(WAP027,WAP028,SPACEID,USERID,PHONEID,
RELATIVEPOSITION,LATITUDE,LONGITUDE,FLOOR,BUILDINGID) %>%
filter(apply(td_new_2[,1:2],1,function(x) any(x!=-105)))
ggplot() +
geom_point(data = td_new_2, aes(x = LONGITUDE, y = LATITUDE, colour = "Training dataset")) +
geom_point(data = vd_new_2, aes(x = LONGITUDE, y = LATITUDE, colour = "Test dataset")) +
ggtitle("Locations (Training and Test sets)")
#sampling te dataset
td_lon_std<-td_lon
td_lon_std[,c(1:311)]<-td_lon_std[,c(1:311)] +105
td_lon_std<- td_lon_std%>%mutate_if(is.numeric,scale)
pca=princomp(td_lon_std[,c(1:311)], cor=TRUE)
summary(pca)
pca$scores
biplot(pca)
str(td_lon[,300:326])
|
/wifi_last_2.R
|
no_license
|
niluucar/wifi
|
R
| false
| false
| 12,137
|
r
|
install.packages("dplyr")
install.packages("ggplot2")
install.packages("ggmap")
install.packages("parallel")
install.packages("doParallel")
install.packages("caret")
install.packages("dplyr")
install.packages("randomForest")
library("randomForest")
library("dplyr")
library("caret")
library("doParallel")
library("parallel")
library("dplyr")
library("ggplot2")
library("ggmap")
library("lubridate")
setwd("C:/Users/Lenovo/Desktop/Ubiqum_data/Wi-fi")
library(readr)
trainingData <- read_csv("C:/Users/Lenovo/Desktop/Ubiqum_data/Wi-fi/trainingData.csv")
ValidationData <- read_csv("C:/Users/Lenovo/Desktop/Ubiqum_data/Wi-fi/validationData.csv")
td<-trainingData
vd<-ValidationData
td$TIMESTAMP<- as_datetime(td$TIMESTAMP)
vd$TIMESTAMP<- as_datetime(vd$TIMESTAMP)
str(td)
summary(td[,1:52])
####Changing the data types####
td$FLOOR<-as.factor(td$FLOOR)
td$BUILDINGID<-as.factor(td$BUILDINGID)
td$SPACEID<-as.factor(td$USERID) # 18 different points. Internal ID number to identify the Space (office, corridor, classroom) where the capture was taken,
td$RELATIVEPOSITION<-as.factor(td$RELATIVEPOSITION) # in vs out
td$USERID<-as.factor(td$USERID)
td$PHONEID<-as.factor(td$PHONEID)
#The same for Validation Data:
vd$FLOOR<-as.factor(vd$FLOOR)
vd$BUILDINGID<-as.factor(vd$BUILDINGID)
vd$SPACEID<-as.factor(vd$USERID)
vd$RELATIVEPOSITION<-as.factor(vd$RELATIVEPOSITION)
vd$USERID<-as.factor(vd$USERID)
vd$PHONEID<-as.factor(vd$PHONEID)
####CHANGING THE 100 TO -105 AND REMOVING THE REPEATED ROWS ####
td<-distinct(td)
td[td==100]<--105
vd<-distinct(vd)
vd[vd==100]<--105
#### counting the values which are not equal to 100####
td$count <- apply(td[,1:520], 1, function(x) length(which(x!=-105)))
td$max <- apply(td[,1:520], 1, max)
td$max_2<-apply(td[,1:520],1,function(x) names(td[,1:520])[which(x==max(x))])
td$max3<-apply(td[1:520],1,function(x) names(which.max(x)))
#vd #
vd$count <- apply(vd[,1:520], 1, function(x) length(which(x!=-105)))
vd$max <- apply(vd[,1:520], 1, max)
vd$max_2<-apply(vd[,1:520],1,function(x) names(vd[,1:520])[which(x==max(x))])
vd$max3<-apply(vd[1:520],1,function(x) names(which.max(x)))
#removing the rows ###
td<-subset(td,td$max!=0 & td$count!=0)
td<-subset(td,td$max<=-30 & td$max>=-80 )
#training dataset remove 1 value columns
waps_td<-td[,c(1:520)]
useless_waps<-apply(waps_td, 2, function(x) length(unique(x))==1)
td_new<-td[,-c(which(useless_waps==TRUE))]
waps_vd<-vd[,c(1:520)]
useless_waps_vd<-apply(waps_vd, 2, function(x) length(unique(x))==1)
vd_new<-vd[,-c(which(useless_waps_vd==TRUE))]
#identifying WAPS training
Waps_td_names <- grep("WAP", names(td_new), value = TRUE)
#identifying WAPS VALIDATION
Waps__vd_names <- grep("WAP", names(vd_new), value = TRUE)
Waps_tdvd <- intersect(Waps_td_names, Waps__vd_names)
x <- names(td_new[Waps_td_names]) %in% Waps_tdvd == FALSE
td_new_2 <- td_new[-which(x)]
#remove columns
y <- names(vd_new[Waps__vd_names]) %in% Waps_tdvd == FALSE
vd_new_2 <- vd_new[-which(y)]
####BUILDING PREDICTION ####
detectCores()
clusterF1 <- makeCluster(detectCores()-1)
registerDoParallel(clusterF1)
td_build<- select(td_new_2,-c(TIMESTAMP,USERID,PHONEID,max_2,max,count,SPACEID,FLOOR,
RELATIVEPOSITION,LATITUDE,LONGITUDE,max3))
## Model
set.seed(124)
fitControl <- trainControl(method = "repeatedcv", number=3,repeats = 3,
verboseIter = TRUE, allowParallel = TRUE)
## knn
knnFit <- train(BUILDINGID~.,data = td_build,method = "knn",
metric = "Accuracy",trControl = fitControl,preProcess = c("zv", "center", "scale"))
plot(knnFit)
knnFit
predict.knn <- predict(knnFit ,vd_new_2)
postResample(predict.knn , vd_new_2$BUILDINGID)
ConfusionMatrix<-confusionMatrix(vd_new_2$BUILDINGID , predict.knn)
ConfusionMatrix
save(knnFit, file = "knnFit.rda")
load("knnFit.rda")
rm(ConfusionMatrix)
#svm
SvmFit<-caret::train(BUILDINGID~., data= td_build, method="svmLinear",
trControl=fitControl,preProcess= c("center", "scale"))
SvmFit
predict.svm <- predict(SvmFit ,vd_new_2)
predict.svm2 <- predict(SvmFit ,td_new_2)
postResample(predict.svm , vd_new_2$BUILDINGID)
confusionMatrix(vd_new_2$BUILDINGID , predict.svm)
rm(confusionMatrix)
confusionMatrix
save(SvmFit, file = "SvmFit.rda")
load("SvmFit.rda")
#add prediction column in training dataset
td_new_2$build_prediction<-predict.svm2
vd_new_2$build_prediction<-predict.svm
td_new_2$B_fID<-as.factor(paste(td_new_2$BUILDINGID,td_new_2$FLOOR))
vd_new_2$B_fID<-as.factor(paste(vd_new_2$BUILDINGID,vd_new_2$FLOOR))
####FLOOR ####
td_floor<- select(td_new_2,-c(TIMESTAMP,USERID,PHONEID,max_2,count,SPACEID,BUILDINGID,FLOOR,
RELATIVEPOSITION,LATITUDE,LONGITUDE,max,max3,build_prediction))
#build.0 <- filter(td_floor, build_prediction == 0)
#build.1 <- filter(td_floor, build_prediction == 1)
#build.2 <- filter(td_floor, build_prediction== 2)
#vdbuild.0 <- filter(vd_new_2, build_prediction == 0)
#vdbuild.1 <- filter(vd_new_2, build_prediction == 1)
#vdbuild.2 <- filter(vd_new_2, build_prediction== 2)
#knn
fitControl <- trainControl(method = "repeatedcv", number=3,repeats = 3,
verboseIter = TRUE, allowParallel = TRUE)
#build.0$FLOOR<- factor(build.0$FLOOR)
#vdbuild.0$FLOOR<-factor(vdbuild.0$FLOOR)
## knn
knnFit_floor <- train(B_fID~.,data = td_floor,method = "knn",
metric = "Accuracy",trControl = fitControl,preProcess= c("center", "scale"))
knnFit_floor
#knnFit_floorb0
predict.knn_floor <- predict(knnFit_floor ,vd_new_2)
postResample(predict.knn_floor , vd_new_2$FLOOR)
ConfusionMatrix<-confusionMatrix(vd_new_2$FLOOR , predict.knn_floor)
ConfusionMatrix
save(knnFit_floor, file = "knnFit_floorb0.rda")
load("knnFit_floor.rda")
#svm
SvmFit_floor<-caret::train(B_fID~., data=td_floor, method="svmLinear",
trControl=fitControl,preProcess= c("center", "scale"))
SvmFit_floor
predict.svm_floor <- predict(SvmFit_floor ,vd_new_2)
predict.svm_floor2 <- predict(SvmFit_floor ,td_new_2)
postResample(predict.svm_floor , vd_new_2$B_fID)
confusionMatrix(vd_new_2$B_fID , predict.svm_floor)
rm(confusionMatrix)
save(SvmFit_floor, file = "SvmFit_floor.rda")
td_new_2$floor_prediction<-predict.svm_floor2
vd_new_2$floor_prediction<-predict.svm_floor
# Add dummy variable for BuildingID&floor id
DummyVar <- dummyVars("~BUILDINGID", data = td_new_2, fullRank=T)
DummyVarDF <- data.frame(predict(DummyVar, newdata = td_new_2))
td_new_2<-cbind(td_new_2, DummyVarDF)
DummyVar2 <- dummyVars("~BUILDINGID", data = vd_new_2, fullRank=T)
DummyVarDF2 <- data.frame(predict(DummyVar2, newdata = vd_new_2))
vd_new_2<-cbind(vd_new_2, DummyVarDF2)
#td_floor2<- select(td_new_2,-c(TIMESTAMP,USERID,PHONEID,max_2,count,SPACEID,BUILDINGID,
# RELATIVEPOSITION,LATITUDE,LONGITUDE,max,max3,build_prediction))
#Random Forest
WAP_floor <- grep("WAP", names(td_floor), value=T)
bestmtry<-tuneRF(td_floor[WAP], td_floor$B_fID, ntreeTry=100, stepFactor=2,
improve=0.05,trace=TRUE, plot=T)
system.time(RF_floor<-randomForest(B_fID~.,
data= td_floor,
importance=T,maximize=T,
method="rf", trControl=fitControl,
ntree=100, mtry=52,allowParalel=TRUE))
save(RF_floor, file = "RF_floor.rda")
RF_floor
predict.rf_floor <- predict(RF_floor ,vd_new_2)
predict.rf_floor2 <- predict(RF_floor ,td_new_2)
postResample( predict.rf_floor , vd_new_2$B_fID)
CF_B_fID<-confusionMatrix(vd_new_2$B_fID , predict.rf_floor)
CF_B_fID
#add prediction column in training dataset
td_new_2$floor_prediction<-predict.rf_floor2
vd_new_2$floor_prediction<-predict.rf_floor
####DUMMY FOR FLOOR ####
DummyVar3 <- dummyVars("~FLOOR", data = vd_new_2, fullRank=T)
DummyVarDF3 <- data.frame(predict(DummyVar3, newdata = vd_new_2))
vd_new_2<-cbind(vd_new_2, DummyVarDF3)
DummyVar4 <- dummyVars("~FLOOR", data = td_new_2, fullRank=T)
DummyVarDF4 <- data.frame(predict(DummyVar4, newdata = td_new_2))
td_new_2<-cbind(td_new_2, DummyVarDF4)
#dummy for predicted floors
DummyVar5 <- dummyVars("~floor_prediction", data = td_new_2, fullRank=T)
DummyVarDF5 <- data.frame(predict(DummyVar5, newdata = td_new_2))
td_new_2<-cbind(td_new_2, DummyVarDF5)
DummyVar6 <- dummyVars("~floor_prediction", data = vd_new_2, fullRank=T)
DummyVarDF6 <- data.frame(predict(DummyVar6, newdata = vd_new_2))
vd_new_2<-cbind(vd_new_2, DummyVarDF6)
#LONGITUTDE
td_lon<- select(td_new_2,-c(TIMESTAMP,USERID,PHONEID,max_2,count,SPACEID,floor_prediction,
RELATIVEPOSITION,build_prediction,BUILDINGID,LATITUDE,max,max3,FLOOR,
FLOOR.1,FLOOR.2,FLOOR.3,FLOOR.4,B_fID))
#Random Forest
WAP_lon <- grep("WAP", names(td_lon), value=T)
bestmtry<-tuneRF(td_lon[WAP_lon], td_lon$LONGITUDE, ntreeTry=100, stepFactor=2,
improve=0.05,trace=TRUE, plot=T)
system.time(RF_lon<-randomForest(LONGITUDE~.,
data= td_lon,
importance=T,maximize=T,
method="rf", trControl=fitControl,
ntree=100, mtry=52,allowParalel=TRUE))
save(RF_lon, file = "RF_lon.rda")
RF_lon
predict.rf_lon <- predict(RF_lon ,vd_new_2)
predict.rf_lon2 <- predict(RF_lon ,td_new_2)
postResample( predict.rf_lon , vd_new_2$LONGITUDE)
CF_LON<-confusionMatrix(vd_new_2$LONGITUDE , predict.rf_lon)
##SVM
build0lon <- filter(td_new_2, build_prediction == 0)
build0lon <- filter(td_new_2, build_prediction == 1)
build0lon <- filter(td_new_2, build_prediction== 2)
vdbuild0lon <- filter(vd_new_2, build_prediction == 0)
vdbuild1lon <- filter(vd_new_2, build_prediction == 1)
vdbuild2lon <- filter(vd_new_2, build_prediction== 2)
#SVM FOR BUILDING 0
td_lon_b0<- select(build0lon,-c(TIMESTAMP,USERID,PHONEID,max_2,count,SPACEID,
RELATIVEPOSITION,BUILDINGID,LATITUDE,max,max3,FLOOR,
FLOOR.1,FLOOR.2,FLOOR.3,FLOOR.4,B_fID,floor_prediction,build_prediction))
#SVM
SvmFit_lon_b0<-caret::train(LONGITUDE~., data= td_lon_b0, method="svmLinear",
trControl=fitControl,preProcess= c("center", "scale"))
options(scipen=999)
SvmFit_lon_b0
predict.svm_lonb0 <- predict(SvmFit_lon_b0 ,vdbuild0lon)
predict.svm_lon_b02 <- predict(SvmFit_lon_bo ,build0lon)
postResample(predict.svm_lonb0 , vdbuild0lon$LONGITUDE)
save(SvmFit_lon, file = "SvmFit_lon_b0.rda")
####LATITUDE ####
td_lat<- select(td_new_2,-c(TIMESTAMP,USERID,PHONEID,max_2,count,SPACEID,floor_prediction,
RELATIVEPOSITION,build_prediction,BUILDINGID,LONGITUDE,max,max3,FLOOR))
#Random Forest
WAP <- grep("WAP", names(td_lat), value=T)
bestmtry<-tuneRF(td_lat[WAP], td_lat$LONGITUDE, ntreeTry=100, stepFactor=2,
improve=0.05,trace=TRUE, plot=T)
system.time(RF_lat<-randomForest(LATITUDE~.,
data= td_lat,
importance=T,maximize=T,
method="rf", trControl=fitControl,
ntree=100, mtry=52,allowParalel=TRUE))
save(RF_lat, file = "RF_lat.rda")
RF_lat
predict.rf_lat <- predict(RF_lat ,vd_new_2)
postResample( predict.rf_lat , vd_new_2$LATITUDE)
####checking the errors of building prediction ###
td_err<- td_new_2 %>% select(WAP027,WAP028,SPACEID,USERID,PHONEID,
RELATIVEPOSITION,LATITUDE,LONGITUDE,FLOOR,BUILDINGID) %>%
filter(apply(td_new_2[,1:2],1,function(x) any(x!=-105)))
ggplot() +
geom_point(data = td_new_2, aes(x = LONGITUDE, y = LATITUDE, colour = "Training dataset")) +
geom_point(data = vd_new_2, aes(x = LONGITUDE, y = LATITUDE, colour = "Test dataset")) +
ggtitle("Locations (Training and Test sets)")
#sampling te dataset
td_lon_std<-td_lon
td_lon_std[,c(1:311)]<-td_lon_std[,c(1:311)] +105
td_lon_std<- td_lon_std%>%mutate_if(is.numeric,scale)
pca=princomp(td_lon_std[,c(1:311)], cor=TRUE)
summary(pca)
pca$scores
biplot(pca)
str(td_lon[,300:326])
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/interface.R
\name{save.obj}
\alias{save.obj}
\title{Save a object to persistent storage.}
\usage{
save.obj(obj, name, rev = NULL, backend = NULL)
}
\arguments{
\item{obj}{object to save.}
\item{name}{object name on the storage.}
\item{rev}{revision name of the object. this argment can be NULL and then revision is determined automatically.}
\item{backend}{backend storage.}
}
\value{
character vector of length 2(name and rev)
}
\description{
Save a object to persistent storage.
}
|
/man/save.obj.Rd
|
no_license
|
hskksk/rstore
|
R
| false
| true
| 565
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/interface.R
\name{save.obj}
\alias{save.obj}
\title{Save a object to persistent storage.}
\usage{
save.obj(obj, name, rev = NULL, backend = NULL)
}
\arguments{
\item{obj}{object to save.}
\item{name}{object name on the storage.}
\item{rev}{revision name of the object. this argment can be NULL and then revision is determined automatically.}
\item{backend}{backend storage.}
}
\value{
character vector of length 2(name and rev)
}
\description{
Save a object to persistent storage.
}
|
#'@name stackSp
#'
#'@title Join species files
#'
#'@description Join all independent files into a single multi-species file.
#'
#'@param data Vector of characters. Name of the input file.
#'
#'@param rd.frmt Vector of characters. The file format to read.
#'By default it will be read as a R object using the
#' \code{'readRDS'} argument, but it can be read as plain text using the
#' \code{'readTXT'} argument. See details.
#'
#'@param path Vector of characters. Path to the input file.
#'
#'@param save.name Vector of characters. Name of the output file.
#'
#'@param wrt.frmt Vector of characters. Format to save output
#'file. By default it will be written as a R object using
#' \code{'saveRDS'} argument, but it can be saved as plain text using
#' \code{'saveTXT'} argument. See details.
#'
#'@param save.staking.in Vector of characters. Path to the output
#'file.
#'
#'
#'@details
#'The headers of the input file must follow the Darwin Core standard [1].
#'The user can see the guide using data('ID_DarwinCore) command.
#'For more details about the formats to read and/or write, see
#'\code{\link{readAndWrite}} function.
#'
#'@return The output file will be saved with all species concatenated.
#'
#'@author R-Alarcon Viviana and Miranda-Esquivel Daniel R.
#'
#'@note See:
#'R-Alarcon V. and Miranda-Esquivel DR.(submitted) geocleaMT: An R package to
#'cleaning geographical data from electronic biodatabases.
#'
#'@seealso \code{\link{readAndWrite}}
#'
#'@references
#'[1] Wieczorek, J. et al. 2012. Darwin core: An evolving community-developed biodiversity data standard.
#' PloS One 7: e29715.
stackSp <- function(data = NULL,
rd.frmt = 'readRDS',
path = NULL,
save.name = NULL,
save.staking.in = NULL,
wrt.frmt = 'saveRDS'){
stack.temp0 <- NULL
#! lea la primera especie
for (i in 1:length(data)) {
if (is.null(stack.temp0)) {
stack.temp0 <- readAndWrite(action = 'read', frmt = rd.frmt ,
path = path, name = data[i])
} else {
#! lea la segunda especie
stack.temp1 <- readAndWrite(action = 'read', frmt = rd.frmt ,
path = path, name = data[i])
#! una por columnas la primera especie con la segunda
stack.temp0 <- rbind.fill(stack.temp0, stack.temp1)
}
print(paste('Species',i,':',data[i],sep = ''))
}
tab.info <- as.data.frame(matrix(NA,1,2))
colnames(tab.info) <- c('Total.Sp', 'Total.Occ')
tab.info$Total.Sp <- length(unique(stack.temp0$species))
tab.info$Total.Occ<- nrow(stack.temp0)
readAndWrite(action = 'write', frmt = wrt.frmt , object = stack.temp0,
path = save.staking.in, name = save.name)
return(tab.info)
}
|
/R/stackSp.R
|
no_license
|
Dmirandae/geocleaMT
|
R
| false
| false
| 2,872
|
r
|
#'@name stackSp
#'
#'@title Join species files
#'
#'@description Join all independent files into a single multi-species file.
#'
#'@param data Vector of characters. Name of the input file.
#'
#'@param rd.frmt Vector of characters. The file format to read.
#'By default it will be read as a R object using the
#' \code{'readRDS'} argument, but it can be read as plain text using the
#' \code{'readTXT'} argument. See details.
#'
#'@param path Vector of characters. Path to the input file.
#'
#'@param save.name Vector of characters. Name of the output file.
#'
#'@param wrt.frmt Vector of characters. Format to save output
#'file. By default it will be written as a R object using
#' \code{'saveRDS'} argument, but it can be saved as plain text using
#' \code{'saveTXT'} argument. See details.
#'
#'@param save.staking.in Vector of characters. Path to the output
#'file.
#'
#'
#'@details
#'The headers of the input file must follow the Darwin Core standard [1].
#'The user can see the guide using data('ID_DarwinCore) command.
#'For more details about the formats to read and/or write, see
#'\code{\link{readAndWrite}} function.
#'
#'@return The output file will be saved with all species concatenated.
#'
#'@author R-Alarcon Viviana and Miranda-Esquivel Daniel R.
#'
#'@note See:
#'R-Alarcon V. and Miranda-Esquivel DR.(submitted) geocleaMT: An R package to
#'cleaning geographical data from electronic biodatabases.
#'
#'@seealso \code{\link{readAndWrite}}
#'
#'@references
#'[1] Wieczorek, J. et al. 2012. Darwin core: An evolving community-developed biodiversity data standard.
#' PloS One 7: e29715.
stackSp <- function(data = NULL,
rd.frmt = 'readRDS',
path = NULL,
save.name = NULL,
save.staking.in = NULL,
wrt.frmt = 'saveRDS'){
stack.temp0 <- NULL
#! lea la primera especie
for (i in 1:length(data)) {
if (is.null(stack.temp0)) {
stack.temp0 <- readAndWrite(action = 'read', frmt = rd.frmt ,
path = path, name = data[i])
} else {
#! lea la segunda especie
stack.temp1 <- readAndWrite(action = 'read', frmt = rd.frmt ,
path = path, name = data[i])
#! una por columnas la primera especie con la segunda
stack.temp0 <- rbind.fill(stack.temp0, stack.temp1)
}
print(paste('Species',i,':',data[i],sep = ''))
}
tab.info <- as.data.frame(matrix(NA,1,2))
colnames(tab.info) <- c('Total.Sp', 'Total.Occ')
tab.info$Total.Sp <- length(unique(stack.temp0$species))
tab.info$Total.Occ<- nrow(stack.temp0)
readAndWrite(action = 'write', frmt = wrt.frmt , object = stack.temp0,
path = save.staking.in, name = save.name)
return(tab.info)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/state_fips.R
\docType{data}
\name{state_fips}
\alias{state_fips}
\title{Dataset with the lat. / long. of county FIPS codes used for mapping.}
\format{A data frame with 57 rows and 4 variables}
\usage{
data(state_fips)
}
\description{
Built-in dataset for use with the \code{bls_map} function.
To access the data directly, issue the command \code{datastate_fips)}.
\itemize{
\item \code{fips_state}: FIPS code for state
\item \code{state_abb}: State abbreviation
\item \code{state}: State name
\item \code{gnisid}: Geographic Names Information System ID
}
}
\note{
Last updated 2016-05-27
}
\keyword{internal}
|
/man/state_fips.Rd
|
no_license
|
xtmgah/blscrapeR
|
R
| false
| true
| 698
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/state_fips.R
\docType{data}
\name{state_fips}
\alias{state_fips}
\title{Dataset with the lat. / long. of county FIPS codes used for mapping.}
\format{A data frame with 57 rows and 4 variables}
\usage{
data(state_fips)
}
\description{
Built-in dataset for use with the \code{bls_map} function.
To access the data directly, issue the command \code{datastate_fips)}.
\itemize{
\item \code{fips_state}: FIPS code for state
\item \code{state_abb}: State abbreviation
\item \code{state}: State name
\item \code{gnisid}: Geographic Names Information System ID
}
}
\note{
Last updated 2016-05-27
}
\keyword{internal}
|
#!/usr/bin/env Rscript
# Copyright (c) 2018 ISciences, LLC.
# All rights reserved.
#
# WSIM is licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License. You may
# obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
wsim.io::logging_init('wsim_flow')
suppressMessages({
require(Rcpp)
require(wsim.lsm)
require(wsim.io)
})
'
Perform pixel-based flow accumulation
Usage: wsim_flow --input=<file> --flowdir=<file> --varname=<varname> --output=<file> [--wrapx --wrapy --invert]
Options:
--input <file> file containing values to accumulate (e.g., runoff)
--flowdir <file> file containing flow direction values.
When input is a gridded dataset, flowdir should be a grid of the same
extent and resolution, using D8 conventions.
When input is a feature dataset, flowdir should be a list of downstream
feature IDs.
--varname <varname> output variable name for accumulated values
--output <file> file to which accumulated values will be written/appended
--wrapx wrap flow in the x-dimension (during pixel-based accumulation)
--wrapy wrap flow in the y-dimension (during pixel-based accumulation)
--invert output flow originating downstream of each basin
'->usage
main <- function(raw_args) {
args <- parse_args(usage, raw_args)
if (!is.null(args$output) && !can_write(args$output)) {
die_with_message("Cannot open ", args$output, "for writing.")
}
inputs <- wsim.io::read_vars(args$input, expect.nvars=1)
wsim.io::info("Read input values.")
flowdir <- wsim.io::read_vars(args$flowdir,
expect.nvars=1,
expect.dims=dim(inputs$data[[1]]),
expect.extent=inputs$extent,
expect.ids=inputs$ids)
pixel_based <- is.null(inputs$ids)
if (pixel_based) {
wsim.io::info("Read pixel-based flow directions.")
if (args$invert) {
die_with_message("--invert not yet supported.")
}
} else {
wsim.io::info("Read downstream basin ids.")
if (args$wrapx || args$wrapy) {
die_with_message("--wrapx and --wrapy only supported for pixel-based accumulation.")
}
}
results <- list()
if (pixel_based) {
# Pixel-based flow accumulation
results[[args$varname]] <- wsim.lsm::accumulate_flow(flowdir$data[[1]],
inputs$data[[1]],
args$wrapx,
args$wrapy)
} else {
# Downstream ID-based flow accumulation
if (args$invert) {
results[[args$varname]] <- wsim.lsm::downstream_flow(inputs$ids,
flowdir$data[[1]],
inputs$data[[1]])
} else {
results[[args$varname]] <- wsim.lsm::accumulate(inputs$ids,
flowdir$data[[1]],
inputs$data[[1]])
}
}
info('Flow accumulation complete')
wsim.io::write_vars_to_cdf(
vars= results,
filename= args$output,
extent= inputs$extent,
ids= inputs$ids,
prec= 'single',
append= TRUE
)
info('Wrote results to', args$output)
}
tryCatch(
main(commandArgs(trailingOnly=TRUE))
, error=wsim.io::die_with_message)
|
/wsim_flow.R
|
permissive
|
isciences/wsim
|
R
| false
| false
| 3,910
|
r
|
#!/usr/bin/env Rscript
# Copyright (c) 2018 ISciences, LLC.
# All rights reserved.
#
# WSIM is licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License. You may
# obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
wsim.io::logging_init('wsim_flow')
suppressMessages({
require(Rcpp)
require(wsim.lsm)
require(wsim.io)
})
'
Perform pixel-based flow accumulation
Usage: wsim_flow --input=<file> --flowdir=<file> --varname=<varname> --output=<file> [--wrapx --wrapy --invert]
Options:
--input <file> file containing values to accumulate (e.g., runoff)
--flowdir <file> file containing flow direction values.
When input is a gridded dataset, flowdir should be a grid of the same
extent and resolution, using D8 conventions.
When input is a feature dataset, flowdir should be a list of downstream
feature IDs.
--varname <varname> output variable name for accumulated values
--output <file> file to which accumulated values will be written/appended
--wrapx wrap flow in the x-dimension (during pixel-based accumulation)
--wrapy wrap flow in the y-dimension (during pixel-based accumulation)
--invert output flow originating downstream of each basin
'->usage
main <- function(raw_args) {
args <- parse_args(usage, raw_args)
if (!is.null(args$output) && !can_write(args$output)) {
die_with_message("Cannot open ", args$output, "for writing.")
}
inputs <- wsim.io::read_vars(args$input, expect.nvars=1)
wsim.io::info("Read input values.")
flowdir <- wsim.io::read_vars(args$flowdir,
expect.nvars=1,
expect.dims=dim(inputs$data[[1]]),
expect.extent=inputs$extent,
expect.ids=inputs$ids)
pixel_based <- is.null(inputs$ids)
if (pixel_based) {
wsim.io::info("Read pixel-based flow directions.")
if (args$invert) {
die_with_message("--invert not yet supported.")
}
} else {
wsim.io::info("Read downstream basin ids.")
if (args$wrapx || args$wrapy) {
die_with_message("--wrapx and --wrapy only supported for pixel-based accumulation.")
}
}
results <- list()
if (pixel_based) {
# Pixel-based flow accumulation
results[[args$varname]] <- wsim.lsm::accumulate_flow(flowdir$data[[1]],
inputs$data[[1]],
args$wrapx,
args$wrapy)
} else {
# Downstream ID-based flow accumulation
if (args$invert) {
results[[args$varname]] <- wsim.lsm::downstream_flow(inputs$ids,
flowdir$data[[1]],
inputs$data[[1]])
} else {
results[[args$varname]] <- wsim.lsm::accumulate(inputs$ids,
flowdir$data[[1]],
inputs$data[[1]])
}
}
info('Flow accumulation complete')
wsim.io::write_vars_to_cdf(
vars= results,
filename= args$output,
extent= inputs$extent,
ids= inputs$ids,
prec= 'single',
append= TRUE
)
info('Wrote results to', args$output)
}
tryCatch(
main(commandArgs(trailingOnly=TRUE))
, error=wsim.io::die_with_message)
|
#' Easily summarize quantitative data
#'
#' @description \code{ezsummary_quantitative()} summarizes quantitative data.
#'
#' @param tbl A vector, a data.frame or a \code{dplyr} \code{tbl}.
#' @param total a T/F value; total counts of records including both missing
#' and read data records. Default is \code{FALSE}.
#' @param n A T/F value; total counts of records that is not missing. Default
#' is \code{FALSE}.
#' @param missing a T/F value; total counts of records that went missing(
#' \code{NA}). Default is \code{FALSE}.
#' @param mean A T/F value; the average of a set of data. Default value is
#' \code{TRUE}.
#' @param sd A T/F value; the standard deviation of a set of data. Default value
#' is \code{TRUE}.
#' @param sem A T/F value; the standard error of the mean of a set of data.
#' Default value is \code{FALSE}.
#' @param median A T/F value; the median of a set of data. Default value is
#' \code{FALSE}.
#' @param quantile A T/F value controlling 5 outputs; the 0\%, 25\%, 50\%, 75\%
#' and 100\% percentile of a set of data. Default value is \code{FALSE}.
#' @param extra A character vector offering extra customizability to this
#' function. Please see Details for detail.
#' @param digits A numeric value determining the rounding digits; Replacement
#' for \code{round.N}. Default setting is to read from \code{getOption()}.
#' @param rounding_type A character string determining the rounding method;
#' possible values are \code{round}, \code{signif}, \code{ceiling} and
#' \code{floor}. When \code{ceiling} or \code{floor} is selected, \code{digits}
#' won't have any effect.
#' @param flavor A character string with two possible inputs: "long" and "wide".
#' "Long" is the default setting which will put grouping information on the left
#' side of the table. It is more machine readable and is good to be passed into
#' the next analytical stage if needed. "Wide" is more print ready (except for
#' column names, which you can fix in the next step, or fix in LaTex or
#' packages like \code{htmlTable}). In the "wide" mode, the analyzed variable
#' will be the only "ID" variable and all the stats values will be presented
#' ogranized by the grouping variables (if any). If there is no grouping, the
#' outputs of "wide" and "long" will be the same.
#' @param fill If set, missing values created by the "wide" flavor will be
#' replaced with this value. Please check \code{\link[tidyr]{spread}} for
#' details. Default value is \code{0}
#' @param unit_markup When unit_markup is not NULL, it will call the ezmarkup
#' function and perform column combination here. To make everyone's life
#' easier, I'm using the term "unit" here. Each unit mean each group of
#' statistical summary results. If you want to know mean and stand deviation,
#' these two values are your units so you can put something like "[. (.)]" there
#' #' @param P Deprecated; Will change the value of \code{p_type} if used in this
#' version.
#' @param round.N Deprecated; Will change the value of \code{rounding_type} if
#' used in this version.
#'
#' @examples
#' library(dplyr)
#' mtcars %>%
#' group_by(am) %>%
#' select(mpg, wt, qsec) %>%
#' ezsummary_quantitative()
#'
#' @importFrom stats na.omit sd median quantile
#' @export
ezsummary_quantitative <- function(
tbl, total = FALSE, n = FALSE, missing = FALSE,
mean = TRUE, sd = TRUE, sem = FALSE, median = FALSE, quantile = FALSE,
extra = NULL,
digits = 3,
rounding_type = c("round", "signif", "ceiling", "floor"),
round.N=3,
flavor = c("long", "wide"), fill = 0, unit_markup = NULL
){
# Define the following variable to avoid NOTE on RMD check
variable = value = analysis = NULL
if(round.N != 3){
warning("Option round.N has been deprecated. Please use 'digits' instead.")
digits <- round.N
}
rounding_type <- match.arg(rounding_type)
flavor <- match.arg(flavor)
if(is.vector(tbl)){
tbl <- as.tbl(as.data.frame(tbl))
attributes(tbl)$names <- "unknown"
warning('ezsummary cannot detect the naming information from an atomic ',
'vector. If you want to have full naming information, please ',
'pass the value in as a data frame using `select` from dplyr.')
}
group_name <- attributes(tbl)$vars
var_name <- attributes(tbl)$names
if (!is.null(group_name)){
group_name <- as.character(group_name)
var_name <- var_name[!var_name %in% group_name]
}
n_group <- length(group_name)
n_var <- length(var_name)
if(n_group == 0 & flavor == "wide"){flavor <- "long"}
# Generate a list of tasks needed to be done
available_tasks <- c(
total = "length(.)",
n = "length(stats::na.omit(.))",
missing = "sum(is.na(.))",
mean = "mean(., na.rm = TRUE)",
sd = "stats::sd(., na.rm = TRUE)",
sem = "stats::sd(., na.rm = TRUE) / sqrt(length(stats::na.omit(.)))",
median = "stats::median(., na.rm = TRUE)",
q0 = "stats::quantile(., na.rm = TRUE)[1]",
q25 = "stats::quantile(., na.rm = TRUE)[2]",
q50 = "stats::quantile(., na.rm = TRUE)[3]",
q75 = "stats::quantile(., na.rm = TRUE)[4]",
q100 = "stats::quantile(., na.rm = TRUE)[5]"
)
tasks_list <- c(
available_tasks[
c(total, n, missing, mean, sd, sem, median,
quantile, quantile, quantile, quantile, quantile)
],
extra
)
tasks_names <- names(tasks_list)
tbl_summary_raw <- tbl %>%
summarise_each(funs_(tasks_list))
if(n_group == 0){
tbl_summary <- suppressWarnings(gather(tbl_summary_raw, variable, value))
}else{
tbl_summary <- tbl_summary_raw %>%
gather(variable, value, seq(-1, -n_group))
}
tbl_summary <- tbl_summary %>%
mutate(value = `if`(
rounding_type %in% c("round", "signif"),
eval(call(rounding_type, value, digits)),
eval(call(rounding_type, value))
))
if(length(tasks_list) == 1){
tbl_summary["analysis"] <- tasks_names
}else{
if(n_var == 1){
names(tbl_summary)[names(tbl_summary) == "variable"] <- "analysis"
tbl_summary["variable"] <- var_name
}else{
tbl_summary <- tbl_summary %>%
separate(variable, into = c("variable", "analysis"),
sep = "_(?=[^_]+$)")
}
}
if(flavor == "wide"){
tbl_summary[group_name] <- sapply(
group_name, function(x){paste(x, tbl_summary[x][[1]], sep = ".")}
)
tbl_summary <- tbl_summary %>%
unite_("analysis", c(group_name, "analysis"))
group_reorder <- sapply(group_name, function(x){
paste(x, attr(tbl, "labels")[x][[1]], sep = ".")
}) %>%
apply(1, paste, collapse = "_")
tasks_names <- c(sapply(group_reorder, function(x){
paste(x, tasks_names, sep = "_")
}))
}
tbl_summary <- tbl_summary %>%
spread(analysis, value, fill = fill) %>%
ungroup() %>%
mutate(variable = factor(variable, levels = setdiff(var_name, group_name))) %>%
arrange_(c("variable", group_name))
tbl_summary <- tbl_summary[c(
`if`(flavor == "long" & n_group != 0, group_name, NULL),
"variable", tasks_names
)]
# Ezmarkup
if(!is.null(unit_markup)){
if(flavor == "wide" & n_group != 0){
ezmarkup_formula <- paste0(
".", paste0(rep(unit_markup, nrow(attr(tbl, "labels"))), collapse = ""))
}else{
ezmarkup_formula <- paste0(paste0(rep(".", n_group), collapse = ""),
".", unit_markup)
}
tbl_summary <- ezmarkup(tbl_summary, ezmarkup_formula)
}
attr(tbl_summary, "flavor") <- flavor
return(tbl_summary)
}
#' @rdname ezsummary_quantitative
#' @export
ezsummary_q <- ezsummary_quantitative
|
/R/ezsummary_quantitative.r
|
no_license
|
cran/ezsummary
|
R
| false
| false
| 7,575
|
r
|
#' Easily summarize quantitative data
#'
#' @description \code{ezsummary_quantitative()} summarizes quantitative data.
#'
#' @param tbl A vector, a data.frame or a \code{dplyr} \code{tbl}.
#' @param total a T/F value; total counts of records including both missing
#' and read data records. Default is \code{FALSE}.
#' @param n A T/F value; total counts of records that is not missing. Default
#' is \code{FALSE}.
#' @param missing a T/F value; total counts of records that went missing(
#' \code{NA}). Default is \code{FALSE}.
#' @param mean A T/F value; the average of a set of data. Default value is
#' \code{TRUE}.
#' @param sd A T/F value; the standard deviation of a set of data. Default value
#' is \code{TRUE}.
#' @param sem A T/F value; the standard error of the mean of a set of data.
#' Default value is \code{FALSE}.
#' @param median A T/F value; the median of a set of data. Default value is
#' \code{FALSE}.
#' @param quantile A T/F value controlling 5 outputs; the 0\%, 25\%, 50\%, 75\%
#' and 100\% percentile of a set of data. Default value is \code{FALSE}.
#' @param extra A character vector offering extra customizability to this
#' function. Please see Details for detail.
#' @param digits A numeric value determining the rounding digits; Replacement
#' for \code{round.N}. Default setting is to read from \code{getOption()}.
#' @param rounding_type A character string determining the rounding method;
#' possible values are \code{round}, \code{signif}, \code{ceiling} and
#' \code{floor}. When \code{ceiling} or \code{floor} is selected, \code{digits}
#' won't have any effect.
#' @param flavor A character string with two possible inputs: "long" and "wide".
#' "Long" is the default setting which will put grouping information on the left
#' side of the table. It is more machine readable and is good to be passed into
#' the next analytical stage if needed. "Wide" is more print ready (except for
#' column names, which you can fix in the next step, or fix in LaTex or
#' packages like \code{htmlTable}). In the "wide" mode, the analyzed variable
#' will be the only "ID" variable and all the stats values will be presented
#' ogranized by the grouping variables (if any). If there is no grouping, the
#' outputs of "wide" and "long" will be the same.
#' @param fill If set, missing values created by the "wide" flavor will be
#' replaced with this value. Please check \code{\link[tidyr]{spread}} for
#' details. Default value is \code{0}
#' @param unit_markup When unit_markup is not NULL, it will call the ezmarkup
#' function and perform column combination here. To make everyone's life
#' easier, I'm using the term "unit" here. Each unit mean each group of
#' statistical summary results. If you want to know mean and stand deviation,
#' these two values are your units so you can put something like "[. (.)]" there
#' #' @param P Deprecated; Will change the value of \code{p_type} if used in this
#' version.
#' @param round.N Deprecated; Will change the value of \code{rounding_type} if
#' used in this version.
#'
#' @examples
#' library(dplyr)
#' mtcars %>%
#' group_by(am) %>%
#' select(mpg, wt, qsec) %>%
#' ezsummary_quantitative()
#'
#' @importFrom stats na.omit sd median quantile
#' @export
ezsummary_quantitative <- function(
tbl, total = FALSE, n = FALSE, missing = FALSE,
mean = TRUE, sd = TRUE, sem = FALSE, median = FALSE, quantile = FALSE,
extra = NULL,
digits = 3,
rounding_type = c("round", "signif", "ceiling", "floor"),
round.N=3,
flavor = c("long", "wide"), fill = 0, unit_markup = NULL
){
# Define the following variable to avoid NOTE on RMD check
variable = value = analysis = NULL
if(round.N != 3){
warning("Option round.N has been deprecated. Please use 'digits' instead.")
digits <- round.N
}
rounding_type <- match.arg(rounding_type)
flavor <- match.arg(flavor)
if(is.vector(tbl)){
tbl <- as.tbl(as.data.frame(tbl))
attributes(tbl)$names <- "unknown"
warning('ezsummary cannot detect the naming information from an atomic ',
'vector. If you want to have full naming information, please ',
'pass the value in as a data frame using `select` from dplyr.')
}
group_name <- attributes(tbl)$vars
var_name <- attributes(tbl)$names
if (!is.null(group_name)){
group_name <- as.character(group_name)
var_name <- var_name[!var_name %in% group_name]
}
n_group <- length(group_name)
n_var <- length(var_name)
if(n_group == 0 & flavor == "wide"){flavor <- "long"}
# Generate a list of tasks needed to be done
available_tasks <- c(
total = "length(.)",
n = "length(stats::na.omit(.))",
missing = "sum(is.na(.))",
mean = "mean(., na.rm = TRUE)",
sd = "stats::sd(., na.rm = TRUE)",
sem = "stats::sd(., na.rm = TRUE) / sqrt(length(stats::na.omit(.)))",
median = "stats::median(., na.rm = TRUE)",
q0 = "stats::quantile(., na.rm = TRUE)[1]",
q25 = "stats::quantile(., na.rm = TRUE)[2]",
q50 = "stats::quantile(., na.rm = TRUE)[3]",
q75 = "stats::quantile(., na.rm = TRUE)[4]",
q100 = "stats::quantile(., na.rm = TRUE)[5]"
)
tasks_list <- c(
available_tasks[
c(total, n, missing, mean, sd, sem, median,
quantile, quantile, quantile, quantile, quantile)
],
extra
)
tasks_names <- names(tasks_list)
tbl_summary_raw <- tbl %>%
summarise_each(funs_(tasks_list))
if(n_group == 0){
tbl_summary <- suppressWarnings(gather(tbl_summary_raw, variable, value))
}else{
tbl_summary <- tbl_summary_raw %>%
gather(variable, value, seq(-1, -n_group))
}
tbl_summary <- tbl_summary %>%
mutate(value = `if`(
rounding_type %in% c("round", "signif"),
eval(call(rounding_type, value, digits)),
eval(call(rounding_type, value))
))
if(length(tasks_list) == 1){
tbl_summary["analysis"] <- tasks_names
}else{
if(n_var == 1){
names(tbl_summary)[names(tbl_summary) == "variable"] <- "analysis"
tbl_summary["variable"] <- var_name
}else{
tbl_summary <- tbl_summary %>%
separate(variable, into = c("variable", "analysis"),
sep = "_(?=[^_]+$)")
}
}
if(flavor == "wide"){
tbl_summary[group_name] <- sapply(
group_name, function(x){paste(x, tbl_summary[x][[1]], sep = ".")}
)
tbl_summary <- tbl_summary %>%
unite_("analysis", c(group_name, "analysis"))
group_reorder <- sapply(group_name, function(x){
paste(x, attr(tbl, "labels")[x][[1]], sep = ".")
}) %>%
apply(1, paste, collapse = "_")
tasks_names <- c(sapply(group_reorder, function(x){
paste(x, tasks_names, sep = "_")
}))
}
tbl_summary <- tbl_summary %>%
spread(analysis, value, fill = fill) %>%
ungroup() %>%
mutate(variable = factor(variable, levels = setdiff(var_name, group_name))) %>%
arrange_(c("variable", group_name))
tbl_summary <- tbl_summary[c(
`if`(flavor == "long" & n_group != 0, group_name, NULL),
"variable", tasks_names
)]
# Ezmarkup
if(!is.null(unit_markup)){
if(flavor == "wide" & n_group != 0){
ezmarkup_formula <- paste0(
".", paste0(rep(unit_markup, nrow(attr(tbl, "labels"))), collapse = ""))
}else{
ezmarkup_formula <- paste0(paste0(rep(".", n_group), collapse = ""),
".", unit_markup)
}
tbl_summary <- ezmarkup(tbl_summary, ezmarkup_formula)
}
attr(tbl_summary, "flavor") <- flavor
return(tbl_summary)
}
#' @rdname ezsummary_quantitative
#' @export
ezsummary_q <- ezsummary_quantitative
|
# keras model used in ENDGAME LSTM 2016 paper + recurrent dropout
default_keras_model_lstm_endgame_recurrent_dropout_parameters_tune=list(
lstm_size = c(128,64,32),
embedingdim = c(128,50,32),
dropout = c(0.5)
)
#default_keras_model_cnn_argencon_parameters_tune=list(
# nb_filter = c(256,128),
# kernel_size = c(8),
# embedingdim = c(100),
# hidden_size = c(1024)
#)
default_keras_model_lstm_endgame_recurrent_dropout_parameters=list(
embedingdim = 128,
lstm_size = 128,
dropout = 0.2
)
keras_model_lstm_endgame_recurrent_dropout<-function(x,parameters=default_keras_model_lstm_endgame_recurrent_dropout_parameters){
input_shape <- dim(x)[2]
inputs<-layer_input(shape = input_shape)
embeding<- inputs %>% layer_embedding(length(valid_characters_vector), parameters$embedingdim , input_length = input_shape,mask_zero=T)
lstm <- embeding %>%
layer_lstm(units = parameters$lstm_size,recurrent_dropout = parameters$dropout) %>%
#layer_dropout(rate = parameters$dropout) %>%
layer_dense(1, activation = 'sigmoid')
#compile model
model_endgame_recurrent_dropout <- keras_model(inputs = inputs, outputs = lstm)
model_endgame_recurrent_dropout %>% compile(
optimizer = 'adam',
loss = 'binary_crossentropy',
metrics = c('accuracy')
)
summary(model_endgame_recurrent_dropout)
return(model_endgame_recurrent_dropout)
}
funcs[["lstm_endgame_recurrent_dropout"]]=keras_model_lstm_endgame_recurrent_dropout
|
/model_lstm_endgame_recurrent_dropout.R
|
permissive
|
harpomaxx/deepseq
|
R
| false
| false
| 1,478
|
r
|
# keras model used in ENDGAME LSTM 2016 paper + recurrent dropout
default_keras_model_lstm_endgame_recurrent_dropout_parameters_tune=list(
lstm_size = c(128,64,32),
embedingdim = c(128,50,32),
dropout = c(0.5)
)
#default_keras_model_cnn_argencon_parameters_tune=list(
# nb_filter = c(256,128),
# kernel_size = c(8),
# embedingdim = c(100),
# hidden_size = c(1024)
#)
default_keras_model_lstm_endgame_recurrent_dropout_parameters=list(
embedingdim = 128,
lstm_size = 128,
dropout = 0.2
)
keras_model_lstm_endgame_recurrent_dropout<-function(x,parameters=default_keras_model_lstm_endgame_recurrent_dropout_parameters){
input_shape <- dim(x)[2]
inputs<-layer_input(shape = input_shape)
embeding<- inputs %>% layer_embedding(length(valid_characters_vector), parameters$embedingdim , input_length = input_shape,mask_zero=T)
lstm <- embeding %>%
layer_lstm(units = parameters$lstm_size,recurrent_dropout = parameters$dropout) %>%
#layer_dropout(rate = parameters$dropout) %>%
layer_dense(1, activation = 'sigmoid')
#compile model
model_endgame_recurrent_dropout <- keras_model(inputs = inputs, outputs = lstm)
model_endgame_recurrent_dropout %>% compile(
optimizer = 'adam',
loss = 'binary_crossentropy',
metrics = c('accuracy')
)
summary(model_endgame_recurrent_dropout)
return(model_endgame_recurrent_dropout)
}
funcs[["lstm_endgame_recurrent_dropout"]]=keras_model_lstm_endgame_recurrent_dropout
|
#' @title Prepare GDC data
#' @description
#' Reads the data downloaded and prepare it into an R object
#' @param query A query for GDCquery function
#' @param save Save result as RData object?
#' @param save.filename Name of the file to be save if empty an automatic will be created
#' @param directory Directory/Folder where the data was downloaded. Default: GDCdata
#' @param summarizedExperiment Create a summarizedExperiment? Default TRUE (if possible)
#' @param remove.files.prepared Remove the files read? Default: FALSE
#' This argument will be considered only if save argument is set to true
#' @param add.gistic2.mut If a list of genes (gene symbol) is given, columns with gistic2 results from GDAC firehose (hg19)
#' and a column indicating if there is or not mutation in that gene (hg38)
#' (TRUE or FALSE - use the MAF file for more information)
#' will be added to the sample matrix in the summarized Experiment object.
#' @param mut.pipeline If add.gistic2.mut is not NULL this field will be taken in consideration.
#' Four separate variant calling pipelines are implemented for GDC data harmonization.
#' Options: muse, varscan2, somaticsniper, MuTect2. For more information:
#' https://gdc-docs.nci.nih.gov/Data/Bioinformatics_Pipelines/DNA_Seq_Variant_Calling_Pipeline/
#' @param mutant_variant_classification List of mutant_variant_classification that will be
#' consider a sample mutant or not. Default: "Frame_Shift_Del", "Frame_Shift_Ins",
#' "Missense_Mutation", "Nonsense_Mutation", "Splice_Site", "In_Frame_Del",
#' "In_Frame_Ins", "Translation_Start_Site", "Nonstop_Mutation"
#' @export
#' @examples
#' query <- GDCquery(project = "TCGA-KIRP",
#' data.category = "Simple Nucleotide Variation",
#' data.type = "Masked Somatic Mutation",
#' workflow.type = "MuSE Variant Aggregation and Masking")
#' GDCdownload(query, method = "api", directory = "maf")
#' maf <- GDCprepare(query, directory = "maf")
#'
#' \dontrun{
#' # Get GISTIC values
#' gistic.query <- GDCquery(project = "TCGA-ACC",
#' data.category = "Copy Number Variation",
#' data.type = "Gene Level Copy Number Scores",
#' access="open")
#' GDCdownload(gistic.query)
#' gistic <- GDCprepare(gistic.query)
#' }
#' @return A summarizedExperiment or a data.frame
#' @importFrom S4Vectors DataFrame
#' @importFrom SummarizedExperiment metadata<-
#' @importFrom data.table setcolorder setnames
#' @importFrom GenomicRanges GRanges
#' @importFrom IRanges IRanges
GDCprepare <- function(query,
save = FALSE,
save.filename,
directory = "GDCdata",
summarizedExperiment = TRUE,
remove.files.prepared = FALSE,
add.gistic2.mut = NULL,
mut.pipeline = "mutect2",
mutant_variant_classification = c("Frame_Shift_Del",
"Frame_Shift_Ins",
"Missense_Mutation",
"Nonsense_Mutation",
"Splice_Site",
"In_Frame_Del",
"In_Frame_Ins",
"Translation_Start_Site",
"Nonstop_Mutation")){
isServeOK()
if(missing(query)) stop("Please set query parameter")
if(any(duplicated(query$results[[1]]$cases)) & query$data.type != "Clinical data" &
query$data.type != "Protein expression quantification" &
query$data.type != "Raw intensities") {
dup <- query$results[[1]]$cases[duplicated(query$results[[1]]$cases)]
cols <- c("tags","cases","experimental_strategy","analysis_workflow_type")
cols <- cols[cols %in% colnames(query$results[[1]])]
dup <- query$results[[1]][query$results[[1]]$cases %in% dup,cols]
dup <- dup[order(dup$cases),]
print(knitr::kable(dup))
stop("There are samples duplicated. We will not be able to prepare it")
}
if(!save & remove.files.prepared) {
stop("To remove the files, please set save to TRUE. Otherwise, the data will be lost")
}
# We save the files in project/source/data.category/data.type/file_id/file_name
source <- ifelse(query$legacy,"legacy","harmonized")
files <- file.path(query$results[[1]]$project, source,
gsub(" ","_",query$results[[1]]$data_category),
gsub(" ","_",query$results[[1]]$data_type),
gsub(" ","_",query$results[[1]]$file_id),
gsub(" ","_",query$results[[1]]$file_name))
files <- file.path(directory, files)
if(!all(file.exists(files))) stop(paste0("I couldn't find all the files from the query. ",
"Please check if the directory parameter is right or `GDCdownload` downloaded the samples."))
if(grepl("Transcriptome Profiling", query$data.category, ignore.case = TRUE)){
data <- readTranscriptomeProfiling(files = files,
data.type = ifelse(!is.na(query$data.type), as.character(query$data.type), unique(query$results[[1]]$data_type)),
workflow.type = unique(query$results[[1]]$analysis_workflow_type),
cases = query$results[[1]]$cases,
summarizedExperiment)
} else if(grepl("Copy Number Variation",query$data.category,ignore.case = TRUE)) {
if(unique(query$results[[1]]$data_type) == "Gene Level Copy Number Scores") {
data <- readGISTIC(files, query$results[[1]]$cases)
} else {
data <- readCopyNumberVariation(files, query$results[[1]]$cases)
}
} else if(grepl("DNA methylation",query$data.category, ignore.case = TRUE)) {
data <- readDNAmethylation(files, query$results[[1]]$cases, summarizedExperiment, unique(query$platform))
} else if(grepl("Raw intensities",query$data.type, ignore.case = TRUE)) {
# preparing IDAT files
data <- readIDATDNAmethylation(files, query$results[[1]]$cases, summarizedExperiment, unique(query$platform), query$legacy)
} else if(grepl("Protein expression",query$data.category,ignore.case = TRUE)) {
data <- readProteinExpression(files, query$results[[1]]$cases)
} else if(grepl("Simple Nucleotide Variation",query$data.category,ignore.case = TRUE)) {
if(grepl("Masked Somatic Mutation",query$results[[1]]$data_type,ignore.case = TRUE) | source == "legacy")
suppressWarnings(data <- readSimpleNucleotideVariationMaf(files))
} else if(grepl("Clinical|Biospecimen", query$data.category, ignore.case = TRUE)){
data <- readClinical(files, query$data.type, query$results[[1]]$cases)
summarizedExperiment <- FALSE
} else if (grepl("Gene expression",query$data.category,ignore.case = TRUE)) {
if(query$data.type == "Gene expression quantification")
data <- readGeneExpressionQuantification(files = files,
cases = query$results[[1]]$cases,
summarizedExperiment = summarizedExperiment,
genome = ifelse(query$legacy,"hg19","hg38"),
experimental.strategy = unique(query$results[[1]]$experimental_strategy))
if(query$data.type == "miRNA gene quantification")
data <- readGeneExpressionQuantification(files = files,
cases = query$results[[1]]$cases,
summarizedExperiment = FALSE,
genome = ifelse(query$legacy,"hg19","hg38"),
experimental.strategy = unique(query$results[[1]]$experimental_strategy))
if(query$data.type == "miRNA isoform quantification")
data <- readmiRNAIsoformQuantification(files = files,
cases = query$results[[1]]$cases)
if(query$data.type == "Isoform expression quantification")
data <- readIsoformExpressionQuantification(files = files, cases = query$results[[1]]$cases)
if(query$data.type == "Exon quantification")
data <- readExonQuantification(files = files,
cases = query$results[[1]]$cases,
summarizedExperiment = summarizedExperiment)
}
# Add data release to object
if(summarizedExperiment & !is.data.frame(data)){
metadata(data) <- list("data_release" = getGDCInfo()$data_release)
}
if((!is.null(add.gistic2.mut)) & summarizedExperiment) {
message("=> Adding GISTIC2 and mutation information....")
genes <- tolower(levels(EAGenes$Gene))
if(!all(tolower(add.gistic2.mut) %in% genes)) message(paste("These genes were not found:\n",
paste(add.gistic2.mut[! tolower(add.gistic2.mut) %in% genes],collapse = "\n=> ")))
add.gistic2.mut <- add.gistic2.mut[tolower(add.gistic2.mut) %in% tolower(genes)]
if(length(add.gistic2.mut) > 0){
info <- colData(data)
for(i in unlist(query$project)){
info <- get.mut.gistc.information(info,
i,
add.gistic2.mut,
mut.pipeline = mut.pipeline,
mutant_variant_classification = mutant_variant_classification)
}
colData(data) <- info
}
}
if("samples" %in% colnames(data)){
if(any(duplicated(data$sample))) {
message("Replicates found.")
if(any(data$is_ffpe)) message("FFPE should be removed. You can modify the data with the following command:\ndata <- data[,!data$is_ffpe]")
print(as.data.frame(colData(data)[data$sample %in% data$sample[duplicated(data$sample)],c("is_ffpe"),drop=F]))
}
}
if(save){
if(missing(save.filename) & !missing(query)) save.filename <- paste0(query$project,gsub(" ","_", query$data.category),gsub(" ","_",date()),".RData")
message(paste0("Saving file:",save.filename))
save(data, file = save.filename)
message("File saved")
# save is true, due to the check in the beggining of the code
if(remove.files.prepared){
# removes files and empty directories
remove.files.recursively(files)
}
}
return(data)
}
remove.files.recursively <- function(files){
files2rm <- dirname(files)
unlink(files2rm,recursive = TRUE)
files2rm <- dirname(files2rm) # data category
if(length(list.files(files2rm)) == 0) remove.files.recursively(files2rm)
}
readClinical <- function(files, data.type, cases){
if(data.type == "Clinical data"){
suppressMessages({
ret <- plyr::alply(files,.margins = 1,readr::read_tsv, .progress = "text")
})
names(ret) <- gsub("nationwidechildrens.org_","",gsub(".txt","",basename(files)))
}
return(ret)
}
#' @importFrom tidyr separate
readExonQuantification <- function (files, cases, summarizedExperiment = TRUE){
pb <- txtProgressBar(min = 0, max = length(files), style = 3)
for (i in seq_along(files)) {
data <- fread(files[i], header = TRUE, sep = "\t", stringsAsFactors = FALSE)
if(!missing(cases)) {
assay.list <- gsub(" |\\(|\\)|\\/","_",colnames(data)[2:ncol(data)])
# We will use this because there might be more than one col for each samples
setnames(data,colnames(data)[2:ncol(data)],
paste0(gsub(" |\\(|\\)|\\/","_",colnames(data)[2:ncol(data)]),"_",cases[i]))
}
if (i == 1) {
df <- data
} else {
df <- merge(df, data, by=colnames(data)[1], all = TRUE)
}
setTxtProgressBar(pb, i)
}
setDF(df)
rownames(df) <- df[,1]
df <- df %>% separate(exon,into = c("seqnames","coordinates","strand"),sep = ":") %>%
separate(coordinates,into = c("start","end"),sep = "-")
if(summarizedExperiment) {
suppressWarnings({
assays <- lapply(assay.list, function (x) {
return(data.matrix(subset(df, select = grep(x,colnames(df),ignore.case = TRUE))))
})
})
names(assays) <- assay.list
regex <- paste0("[:alnum:]{4}-[:alnum:]{2}-[:alnum:]{4}",
"-[:alnum:]{3}-[:alnum:]{3}-[:alnum:]{4}-[:alnum:]{2}")
samples <- na.omit(unique(str_match(colnames(df),regex)[,1]))
colData <- colDataPrepare(samples)
assays <- lapply(assays, function(x){
colnames(x) <- NULL
rownames(x) <- NULL
return(x)
})
rowRanges <- makeGRangesFromDataFrame(df)
rse <- SummarizedExperiment(assays=assays,
rowRanges=rowRanges,
colData=colData)
return(rse)
}
return(df)
}
readIsoformExpressionQuantification <- function (files, cases){
pb <- txtProgressBar(min = 0, max = length(files), style = 3)
for (i in seq_along(files)) {
data <- fread(files[i], header = TRUE, sep = "\t", stringsAsFactors = FALSE)
if(!missing(cases)) {
assay.list <- gsub(" |\\(|\\)|\\/","_",colnames(data)[2:ncol(data)])
# We will use this because there might be more than one col for each samples
setnames(data,colnames(data)[2:ncol(data)],
paste0(gsub(" |\\(|\\)|\\/","_",colnames(data)[2:ncol(data)]),"_",cases[i]))
}
if (i == 1) {
df <- data
} else {
df <- merge(df, data, by=colnames(data)[1], all = TRUE)
}
setTxtProgressBar(pb, i)
}
setDF(df)
rownames(df) <- df[,1]
df[,1] <- NULL
return(df)
}
readmiRNAIsoformQuantification <- function (files, cases){
pb <- txtProgressBar(min = 0, max = length(files), style = 3)
for (i in seq_along(files)) {
data <- fread(files[i], header = TRUE, sep = "\t", stringsAsFactors = FALSE)
data$barcode <- cases[i]
if (i == 1) {
df <- data
} else {
df <- rbind(df, data)
}
setTxtProgressBar(pb, i)
}
setDF(df)
}
readSimpleNucleotideVariationMaf <- function(files){
ret <- read_tsv(files,
comment = "#",
col_types = cols(
Entrez_Gene_Id = col_integer(),
Start_Position = col_integer(),
End_Position = col_integer(),
t_depth = col_integer(),
t_ref_count = col_integer(),
t_alt_count = col_integer(),
n_depth = col_integer(),
ALLELE_NUM = col_integer(),
TRANSCRIPT_STRAND = col_integer(),
PICK = col_integer(),
TSL = col_integer(),
HGVS_OFFSET = col_integer(),
MINIMISED = col_integer()),
progress = TRUE)
if(ncol(ret) == 1) ret <- read_csv(files,
comment = "#",
col_types = cols(
Entrez_Gene_Id = col_integer(),
Start_Position = col_integer(),
End_Position = col_integer(),
t_depth = col_integer(),
t_ref_count = col_integer(),
t_alt_count = col_integer(),
n_depth = col_integer(),
ALLELE_NUM = col_integer(),
TRANSCRIPT_STRAND = col_integer(),
PICK = col_integer(),
TSL = col_integer(),
HGVS_OFFSET = col_integer(),
MINIMISED = col_integer()),
progress = TRUE)
return(ret)
}
readGeneExpressionQuantification <- function(files,
cases,
genome = "hg19",
summarizedExperiment = TRUE,
experimental.strategy,
platform){
pb <- txtProgressBar(min = 0, max = length(files), style = 3)
skip <- unique((ifelse(experimental.strategy == "Gene expression array",1,0)))
if(length(skip) > 1) stop("It is not possible to handle those different platforms together")
for (i in seq_along(files)) {
suppressWarnings({
data <- fread(files[i],
header = TRUE,
sep = "\t",
stringsAsFactors = FALSE,
skip = skip)
})
if(!missing(cases)) {
assay.list <- gsub(" |\\(|\\)|\\/","_",colnames(data)[2:ncol(data)])
# We will use this because there might be more than one col for each samples
setnames(data,colnames(data)[2:ncol(data)],
paste0(gsub(" |\\(|\\)|\\/","_",colnames(data)[2:ncol(data)]),"_",cases[i]))
}
if (i == 1) {
df <- data
} else {
df <- merge(df, data, by = colnames(data)[1], all = TRUE)
}
setTxtProgressBar(pb, i)
}
setDF(df)
if (summarizedExperiment) {
df <- makeSEfromGeneExpressionQuantification(df,assay.list, genome = genome)
} else {
rownames(df) <- df$gene_id
df$gene_id <- NULL
}
return(df)
}
makeSEfromGeneExpressionQuantification <- function(df, assay.list, genome="hg19"){
gene.location <- get.GRCh.bioMart(genome)
if(all(grepl("\\|",df[,1]))){
aux <- strsplit(df$gene_id,"\\|")
GeneID <- unlist(lapply(aux,function(x) x[2]))
df$entrezgene <- as.numeric(GeneID)
df <- merge(df, gene.location, by="entrezgene")
} else {
df$external_gene_name <- as.character(df[,1])
df <- merge(df, gene.location, by="external_gene_name")
}
if("transcript_id" %in% assay.list){
rowRanges <- GRanges(seqnames = paste0("chr", df$chromosome_name),
ranges = IRanges(start = df$start_position,
end = df$end_position),
strand = df$strand,
gene_id = df$external_gene_name,
entrezgene = df$entrezgene,
ensembl_gene_id = df$ensembl_gene_id,
transcript_id = subset(df, select = 5))
names(rowRanges) <- as.character(df$gene_id)
assay.list <- assay.list[which(assay.list != "transcript_id")]
} else {
rowRanges <- GRanges(seqnames = paste0("chr", df$chromosome_name),
ranges = IRanges(start = df$start_position,
end = df$end_position),
strand = df$strand,
gene_id = df$external_gene_name,
entrezgene = df$entrezgene,
ensembl_gene_id = df$ensembl_gene_id)
names(rowRanges) <- as.character(df$external_gene_name)
}
suppressWarnings({
assays <- lapply(assay.list, function (x) {
return(data.matrix(subset(df, select = grep(x,colnames(df),ignore.case = TRUE))))
})
})
names(assays) <- assay.list
regex <- paste0("[:alnum:]{4}-[:alnum:]{2}-[:alnum:]{4}",
"-[:alnum:]{3}-[:alnum:]{3}-[:alnum:]{4}-[:alnum:]{2}")
samples <- na.omit(unique(str_match(colnames(df),regex)[,1]))
colData <- colDataPrepare(samples)
assays <- lapply(assays, function(x){
colnames(x) <- NULL
rownames(x) <- NULL
return(x)
})
rse <- SummarizedExperiment(assays=assays,
rowRanges=rowRanges,
colData=colData)
return(rse)
}
#' @importFrom downloader download
#' @importFrom S4Vectors DataFrame
makeSEFromDNAMethylationMatrix <- function(betas, genome = "hg38", met.platform = "450K") {
message("=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-")
message("Creating a SummarizedExperiment from DNA methylation input")
# Instead of looking on the size, it is better to set it as a argument as the annotation is different
annotation <- getInfiniumAnnotation(met.platform, genome)
rowRanges <- annotation[names(annotation) %in% rownames(betas),,drop=FALSE]
colData <- DataFrame(samples = colnames(betas))
betas <- betas[rownames(betas) %in% names(rowRanges),,drop = FALSE]
betas <- betas[names(rowRanges),,drop = FALSE]
assay <- data.matrix(betas)
betas <- SummarizedExperiment(assays=assay,
rowRanges=rowRanges,
colData=colData)
return(betas)
}
getInfiniumAnnotation <- function(platform, genome){
base <- "http://zwdzwd.io/InfiniumAnnotation/current/"
path <- file.path(base,platform,paste(platform,"hg19.manifest.rds", sep ="."))
if (grepl("hg38", genome)) path <- gsub("hg19","hg38",path)
if(platform == "EPIC") {
annotation <- paste0(base,"EPIC/EPIC.hg19.manifest.rds")
} else if(platform == "450K") {
annotation <- paste0(base,"hm450/hm450.hg19.manifest.rds")
} else {
annotation <- paste0(base,"hm27/hm27.hg19.manifest.rds")
}
if(grepl("hg38", genome)) annotation <- gsub("hg19","hg38",annotation)
if(!file.exists(basename(annotation))) {
if(Sys.info()["sysname"] == "Windows") mode <- "wb" else mode <- "w"
downloader::download(annotation, basename(annotation), mode = mode)
}
readRDS(basename(annotation))
}
makeSEfromDNAmethylation <- function(df, probeInfo=NULL){
if(is.null(probeInfo)) {
gene.location <- get.GRCh.bioMart()
gene.GR <- GRanges(seqnames = paste0("chr", gene.location$chromosome_name),
ranges = IRanges(start = gene.location$start_position,
end = gene.location$end_position),
strand = gene.location$strand,
symbol = gene.location$external_gene_name,
EntrezID = gene.location$entrezgene)
rowRanges <- GRanges(seqnames = paste0("chr", df$Chromosome),
ranges = IRanges(start = df$Genomic_Coordinate,
end = df$Genomic_Coordinate),
probeID = df$Composite.Element.REF,
Gene_Symbol = df$Gene_Symbol)
names(rowRanges) <- as.character(df$Composite.Element.REF)
colData <- colDataPrepare(colnames(df)[5:ncol(df)])
assay <- data.matrix(subset(df,select = c(5:ncol(df))))
} else {
rowRanges <- makeGRangesFromDataFrame(probeInfo, keep.extra.columns = TRUE)
colData <- colDataPrepare(colnames(df)[(ncol(probeInfo) + 1):ncol(df)])
assay <- data.matrix(subset(df,select = c((ncol(probeInfo) + 1):ncol(df))))
}
colnames(assay) <- rownames(colData)
rownames(assay) <- as.character(df$Composite.Element.REF)
rse <- SummarizedExperiment(assays = assay, rowRanges = rowRanges, colData = colData)
}
readIDATDNAmethylation <- function(files,
barcode,
summarizedExperiment,
platform,
legacy){
if (!requireNamespace("sesame", quietly = TRUE)) {
stop("sesame package is needed for this function to work. Please install it.",
call. = FALSE)
}
moved.files <- file.path(dirname(dirname(files)), basename(files))
# for eah file move it to upper parent folder
plyr::a_ply(files, 1,function(x){
tryCatch(TCGAbiolinks:::move(x,file.path(dirname(dirname(x)), basename(x)),keep.copy = TRUE),error = function(e){})
})
samples <- unique(gsub("_Grn.idat|_Red.idat","",moved.files))
message("Processing IDATs with Sesame - http://bioconductor.org/packages/sesame/")
message("Running opensesame - applying quality masking and nondetection masking (threshold P-value 0.05)")
message("Please cite: doi: 10.1093/nar/gky691 and 10.1093/nar/gkt090")
betas <- openSesame(samples)
barcode <- unique(data.frame("file" = gsub("_Grn.idat|_Red.idat","",basename(moved.files)), "barcode" = barcode))
colnames(betas) <- barcode$barcode[match(basename(samples),barcode$file)]
if(summarizedExperiment){
met.platform <- "EPIC"
if(grepl("450",platform)) met.platform <- "450K"
if(grepl("27",platform)) met.platform <- "27K"
betas <- makeSEFromDNAMethylationMatrix(betas,genome = ifelse(legacy,"hg19","hg38"),met.platform = met.platform)
colData(betas) <- DataFrame(colDataPrepare(colnames(betas)))
}
return(betas)
}
# We will try to make this function easier to use this function in its own data
# In case it is not TCGA I should not consider that there is a barcode in the header
# Instead the user should be able to add the names to his data
# The only problem is that the data from the user will not have all the columns
# TODO: Improve this function to be more generic as possible
#' @importFrom GenomicRanges makeGRangesFromDataFrame
#' @importFrom tibble as_data_frame
readDNAmethylation <- function(files, cases, summarizedExperiment = TRUE, platform){
if (grepl("OMA00",platform)){
pb <- txtProgressBar(min = 0, max = length(files), style = 3)
for (i in seq_along(files)) {
data <- fread(files[i], header = TRUE, sep = "\t",
stringsAsFactors = FALSE,skip = 1,
na.strings="N/A",
colClasses=c("character", # Composite Element REF
"numeric")) # beta value
setnames(data,gsub(" ", "\\.", colnames(data)))
if(!missing(cases)) setnames(data,2,cases[i])
if (i == 1) {
df <- data
} else {
df <- merge(df, data, by = "Composite.Element.REF")
}
setTxtProgressBar(pb, i)
}
setDF(df)
rownames(df) <- df$Composite.Element.REF
df$Composite.Element.REF <- NULL
} else {
pb <- txtProgressBar(min = 0, max = length(files), style = 3)
skip <- ifelse(all(grepl("hg38",files)), 0,1)
colClasses <- NULL
if(!all(grepl("hg38",files))) colClasses <- c("character", # Composite Element REF
"numeric", # beta value
"character", # Gene symbol
"character", # Chromosome
"integer")
for (i in seq_along(files)) {
data <- fread(files[i], header = TRUE, sep = "\t",
stringsAsFactors = FALSE,skip = skip, colClasses = colClasses)
setnames(data,gsub(" ", "\\.", colnames(data)))
if(!missing(cases)) setnames(data,2,cases[i])
if (i == 1) {
setcolorder(data,c(1, 3:ncol(data), 2))
df <- data
} else {
data <- subset(data,select = c(1,2))
df <- merge(df, data, by = "Composite.Element.REF")
}
setTxtProgressBar(pb, i)
}
if (summarizedExperiment) {
if(skip == 0) {
df <- makeSEfromDNAmethylation(df, probeInfo = as_data_frame(df)[,grep("TCGA",colnames(df),invert = TRUE)])
} else {
df <- makeSEfromDNAmethylation(df)
}
} else {
setDF(df)
rownames(df) <- df$Composite.Element.REF
df$Composite.Element.REF <- NULL
}
}
return(df)
}
colDataPrepareTARGET <- function(barcode){
message("Adding description to TARGET samples")
tissue.code <- c('01','02','03','04','05','06','07','08','09','10','11',
'12','13','14','15','16','17','20','40','41','42','50','60','61','99')
definition <- c("Primary solid Tumor", # 01
"Recurrent Solid Tumor", # 02
"Primary Blood Derived Cancer - Peripheral Blood", # 03
"Recurrent Blood Derived Cancer - Bone Marrow", # 04
"Additional - New Primary", # 05
"Metastatic", # 06
"Additional Metastatic", # 07
"Tissue disease-specific post-adjuvant therapy", # 08
"Primary Blood Derived Cancer - Bone Marrow", # 09
"Blood Derived Normal", # 10
"Solid Tissue Normal", # 11
"Buccal Cell Normal", # 12
"EBV Immortalized Normal", # 13
"Bone Marrow Normal", # 14
"Fibroblasts from Bone Marrow Normal", # 15
"Mononuclear Cells from Bone Marrow Normal", # 16
"Lymphatic Tissue Normal (including centroblasts)", # 17
"Control Analyte", # 20
"Recurrent Blood Derived Cancer - Peripheral Blood", # 40
"Blood Derived Cancer- Bone Marrow, Post-treatment", # 41
"Blood Derived Cancer- Peripheral Blood, Post-treatment", # 42
"Cell line from patient tumor", # 50
"Xenograft from patient not grown as intermediate on plastic tissue culture dish", # 60
"Xenograft grown in mice from established cell lines", #61
"Granulocytes after a Ficoll separation") # 99
aux <- DataFrame(tissue.code = tissue.code,definition)
# in case multiple equal barcode
regex <- paste0("[:alnum:]{5}-[:alnum:]{2}-[:alnum:]{6}",
"-[:alnum:]{3}-[:alnum:]{3}")
samples <- str_match(barcode,regex)[,1]
ret <- DataFrame(barcode = barcode,
sample = substr(barcode, 1, 20),
patient = substr(barcode, 1, 16),
tumor.code = substr(barcode, 8, 9),
case.unique.id = substr(barcode, 11, 16),
tissue.code = substr(barcode, 18, 19),
nucleic.acid.code = substr(barcode, 24, 24))
ret <- merge(ret,aux, by = "tissue.code", sort = FALSE)
tumor.code <- c('00','01','02','03','04','10','15','20','21','30','40',
'41','50','51','52','60','61','62','63','64','65','70','71','80','81')
tumor.definition <- c("Non-cancerous tissue", # 00
"Diffuse Large B-Cell Lymphoma (DLBCL)", # 01
"Lung Cancer (all types)", # 02
"Cervical Cancer (all types)", # 03
"Anal Cancer (all types)", # 04
"Acute lymphoblastic leukemia (ALL)", # 10
"Mixed phenotype acute leukemia (MPAL)", # 15
"Acute myeloid leukemia (AML)", # 20
"Induction Failure AML (AML-IF)", # 21
"Neuroblastoma (NBL)", # 30
"Osteosarcoma (OS)", # 40
"Ewing sarcoma", # 41
"Wilms tumor (WT)", # 50
"Clear cell sarcoma of the kidney (CCSK)", # 51
"Rhabdoid tumor (kidney) (RT)", # 52
"CNS, ependymoma", # 60
"CNS, glioblastoma (GBM)", # 61
"CNS, rhabdoid tumor", # 62
"CNS, low grade glioma (LGG)", # 63
"CNS, medulloblastoma", # 64
"CNS, other", # 65
"NHL, anaplastic large cell lymphoma", # 70
"NHL, Burkitt lymphoma (BL)", # 71
"Rhabdomyosarcoma", #80
"Soft tissue sarcoma, non-rhabdomyosarcoma") # 81
aux <- DataFrame(tumor.code = tumor.code,tumor.definition)
ret <- merge(ret,aux, by = "tumor.code", sort = FALSE)
nucleic.acid.code <- c('D','E','W','X','Y','R','S')
nucleic.acid.description <- c("DNA, unamplified, from the first isolation of a tissue",
"DNA, unamplified, from the first isolation of a tissue embedded in FFPE",
"DNA, whole genome amplified by Qiagen (one independent reaction)",
"DNA, whole genome amplified by Qiagen (a second, separate independent reaction)",
"DNA, whole genome amplified by Qiagen (pool of 'W' and 'X' aliquots)",
"RNA, from the first isolation of a tissue",
"RNA, from the first isolation of a tissue embedded in FFPE")
aux <- DataFrame(nucleic.acid.code = nucleic.acid.code,nucleic.acid.description)
ret <- merge(ret,aux, by = "nucleic.acid.code", sort = FALSE)
ret <- ret[match(barcode,ret$barcode),]
rownames(ret) <- gsub("\\.","-",make.names(ret$barcode,unique=TRUE))
ret$code <- NULL
return(DataFrame(ret))
}
colDataPrepareTCGA <- function(barcode){
# For the moment this will work only for TCGA Data
# We should search what TARGET data means
code <- c('01','02','03','04','05','06','07','08','09','10','11',
'12','13','14','20','40','50','60','61')
shortLetterCode <- c("TP","TR","TB","TRBM","TAP","TM","TAM","THOC",
"TBM","NB","NT","NBC","NEBV","NBM","CELLC","TRB",
"CELL","XP","XCL")
definition <- c("Primary solid Tumor", # 01
"Recurrent Solid Tumor", # 02
"Primary Blood Derived Cancer - Peripheral Blood", # 03
"Recurrent Blood Derived Cancer - Bone Marrow", # 04
"Additional - New Primary", # 05
"Metastatic", # 06
"Additional Metastatic", # 07
"Human Tumor Original Cells", # 08
"Primary Blood Derived Cancer - Bone Marrow", # 09
"Blood Derived Normal", # 10
"Solid Tissue Normal", # 11
"Buccal Cell Normal", # 12
"EBV Immortalized Normal", # 13
"Bone Marrow Normal", # 14
"Control Analyte", # 20
"Recurrent Blood Derived Cancer - Peripheral Blood", # 40
"Cell Lines", # 50
"Primary Xenograft Tissue", # 60
"Cell Line Derived Xenograft Tissue") # 61
aux <- DataFrame(code = code,shortLetterCode,definition)
# in case multiple equal barcode
regex <- paste0("[:alnum:]{4}-[:alnum:]{2}-[:alnum:]{4}",
"-[:alnum:]{3}-[:alnum:]{3}-[:alnum:]{4}-[:alnum:]{2}")
samples <- str_match(barcode,regex)[,1]
ret <- DataFrame(barcode = barcode,
patient = substr(barcode, 1, 12),
sample = substr(barcode, 1, 16),
code = substr(barcode, 14, 15))
ret <- merge(ret,aux, by = "code", sort = FALSE)
ret <- ret[match(barcode,ret$barcode),]
rownames(ret) <- gsub("\\.","-",make.names(ret$barcode,unique=TRUE))
ret$code <- NULL
return(DataFrame(ret))
}
#' @title Create samples information matrix for GDC samples
#' @description Create samples information matrix for GDC samples add subtype information
#' @param barcode TCGA or TARGET barcode
#' @examples
#' \dontrun{
#' query.met <- GDCquery(project = c("TCGA-GBM","TCGA-LGG"),
#' legacy = TRUE,
#' data.category = "DNA methylation",
#' platform = c("Illumina Human Methylation 450",
#' "Illumina Human Methylation 27"))
#' colDataPrepare(getResults(query.met)$cases)
#' }
#' @export
colDataPrepare <- function(barcode){
# For the moment this will work only for TCGA Data
# We should search what TARGET data means
message("Starting to add information to samples")
if(all(grepl("TARGET",barcode))) ret <- colDataPrepareTARGET(barcode)
if(all(grepl("TCGA",barcode))) ret <- colDataPrepareTCGA(barcode)
message(" => Add clinical information to samples")
# There is a limitation on the size of the string, so this step will be splited in cases of 100
patient.info <- NULL
patient.info <- tryCatch({
step <- 20 # more than 50 gives a bug =/
for(i in 0:(ceiling(length(ret$patient)/step) - 1)){
start <- 1 + step * i
end <- ifelse(((i + 1) * step) > length(ret$patient), length(ret$patient),((i + 1) * step))
if(is.null(patient.info)) {
patient.info <- getBarcodeInfo(ret$patient[start:end])
} else {
patient.info <- rbind(patient.info,getBarcodeInfo(ret$patient[start:end]))
}
}
patient.info
}, error = function(e) {
step <- 2 # more than 50 gives a bug =/
for(i in 0:(ceiling(length(ret$patient)/step) - 1)){
start <- 1 + step * i
end <- ifelse(((i + 1) * step) > length(ret$patient), length(ret$patient),((i + 1) * step))
if(is.null(patient.info)) {
patient.info <- getBarcodeInfo(ret$patient[start:end])
} else {
patient.info <- rbind(patient.info,getBarcodeInfo(ret$patient[start:end]))
}
}
patient.info
})
ret <- merge(ret,patient.info, by.x = "patient", by.y = "submitter_id", all.x = TRUE )
# Add FFPE information to sample
ret <- addFFPE(ret)
if(!"project_id" %in% colnames(ret)) {
aux <- getGDCprojects()[,5:6]
aux <- aux[aux$disease_type == unique(ret$disease_type),2]
ret$project_id <- as.character(aux)
}
# There is no subtype info for target, return as it is
if(all(grepl("TARGET",barcode))) {
# Put data in the right order
ret <- ret[match(barcode,ret$barcode),]
rownames(ret) <- ret$barcode
return(ret)
}
# remove letter from 01A 01B etc
ret$sample.aux <- substr(ret$sample,1,15)
# na.omit should not be here, exceptional case
out <- NULL
for(proj in na.omit(unique(ret$project_id))){
if(grepl("TCGA",proj,ignore.case = TRUE)) {
message(" => Adding subtype information to samples")
tumor <- gsub("TCGA-","",proj)
available <- c("ACC",
"BRCA",
"BLCA",
"CESC",
"CHOL",
"COAD",
"ESCA",
"GBM",
"HNSC",
"KICH",
"KIRC",
"KIRP",
"LGG",
"LUAD",
"LUSC",
"PAAD",
"PCPG",
"PRAD",
"READ",
"SKCM",
"SARC",
"STAD",
"THCA",
"UCEC",
"UCS",
"UVM")
if (grepl(paste(c(available,"all"),collapse = "|"),tumor,ignore.case = TRUE)) {
subtype <- TCGAquery_subtype(tumor)
colnames(subtype) <- paste0("subtype_", colnames(subtype))
if(all(str_length(subtype$subtype_patient) == 12)){
# Subtype information were to primary tumor in priority
subtype$sample.aux <- paste0(subtype$subtype_patient,"-01")
}
ret.aux <- ret[ret$sample.aux %in% subtype$sample.aux,]
ret.aux <- merge(ret.aux,subtype, by = "sample.aux", all.x = TRUE)
out <- rbind.fill(as.data.frame(out),as.data.frame(ret.aux))
}
}
}
# We need to put together the samples with subtypes with samples without subytpes
ret.aux <- ret[!ret$sample %in% out$sample,]
ret <- rbind.fill(as.data.frame(out),as.data.frame(ret.aux))
ret$sample.aux <- NULL
# Add purity information from http://www.nature.com/articles/ncomms9971
# purity <- getPurityinfo()
# ret <- merge(ret, purity, by = "sample", all.x = TRUE, sort = FALSE)
# Put data in the right order
ret <- ret[match(barcode,ret$barcode),]
rownames(ret) <- ret$barcode
return(ret)
}
#' @title Get hg19 or hg38 information from biomaRt
#' @description Get hg19 or hg38 information from biomaRt
#' @param genome hg38 or hg19
#' @param as.granges Output as GRanges or data.frame
#' @importFrom biomaRt getBM useMart listDatasets useEnsembl
#' @export
get.GRCh.bioMart <- function(genome = "hg19", as.granges = FALSE) {
tries <- 0L
msg <- character()
while (tries < 3L) {
gene.location <- tryCatch({
host <- ifelse(genome == "hg19", "grch37.ensembl.org","www.ensembl.org")
message("Accessing ", host, " to get gene information")
ensembl <- tryCatch({
useEnsembl("ensembl", dataset = "hsapiens_gene_ensembl", host = host)
}, error = function(e) {
message(e)
for(mirror in c("asia","useast","uswest")){
x <- useEnsembl("ensembl",
dataset = "hsapiens_gene_ensembl",
mirror = mirror,
host = host)
if(class(x) == "Mart") {
return(x)
}
}
return(NULL)
})
if(is.null(host)) {
message("Problems accessing ensembl database")
return(NULL)
}
attributes <- c("chromosome_name",
"start_position",
"end_position", "strand",
"ensembl_gene_id",
"entrezgene",
"external_gene_name")
db.datasets <- listDatasets(ensembl)
description <- db.datasets[db.datasets$dataset=="hsapiens_gene_ensembl",]$description
message(paste0("Downloading genome information (try:", tries,") Using: ", description))
filename <- paste0(gsub("[[:punct:]]| ", "_",description),".rda")
if(!file.exists(filename)) {
chrom <- c(1:22, "X", "Y")
gene.location <- getBM(attributes = attributes,
filters = c("chromosome_name"),
values = list(chrom), mart = ensembl)
save(gene.location, file = filename)
} else {
message("Loading from disk")
gene.location <- get(load(filename))
}
gene.location
}, error = function(e) {
msg <<- conditionMessage(e)
tries <<- tries + 1L
})
if(!is.null(gene.location)) break
}
if (tries == 3L) stop("failed to get URL after 3 tries:", "\n error: ", msg)
if(as.granges) {
gene.location$strand[gene.location$strand == 1] <- "+"
gene.location$strand[gene.location$strand == -1] <- "-"
gene.location$chromosome_name <- paste0("chr",gene.location$chromosome_name)
gene.location <- makeGRangesFromDataFrame(gene.location, seqnames.field = "chromosome_name",
start.field = "start_position",
end.field = "end_position",
keep.extra.columns = TRUE) # considering the whole gene no their promoters
}
return(gene.location)
}
readProteinExpression <- function(files,cases) {
pb <- txtProgressBar(min = 0, max = length(files), style = 3)
for (i in seq_along(files)) {
data <- read_tsv(file = files[i], col_names = TRUE,skip = 1, col_types = c("cn"))
if(!missing(cases)) colnames(data)[2] <- cases[i]
if(i == 1) df <- data
if(i != 1) df <- merge(df, data,all=TRUE, by="Composite Element REF")
setTxtProgressBar(pb, i)
}
close(pb)
return(df)
}
makeSEfromTranscriptomeProfiling <- function(data, cases, assay.list){
# How many cases do we have?
# We wil consider col 1 is the ensemble gene id, other ones are data
size <- ncol(data)
# Prepare Patient table
colData <- colDataPrepare(cases)
gene.location <- get.GRCh.bioMart("hg38")
aux <- strsplit(data$X1,"\\.")
data$ensembl_gene_id <- as.character(unlist(lapply(aux,function(x) x[1])))
data <- subset(data, grepl("ENSG", data$ensembl_gene_id))
found.genes <- table(data$ensembl_gene_id %in% gene.location$ensembl_gene_id)
if("FALSE" %in% names(found.genes))
message(paste0("From the ", nrow(data), " genes we couldn't map ", found.genes[["FALSE"]]))
data <- merge(data, gene.location, by="ensembl_gene_id")
# Prepare data table
# Remove the version from the ensembl gene id
assays <- list(data.matrix(data[,2:size+1]))
names(assays) <- assay.list
assays <- lapply(assays, function(x){
colnames(x) <- NULL
rownames(x) <- NULL
return(x)
})
# Prepare rowRanges
rowRanges <- GRanges(seqnames = paste0("chr", data$chromosome_name),
ranges = IRanges(start = data$start_position,
end = data$end_position),
strand = data$strand,
ensembl_gene_id = data$ensembl_gene_id,
external_gene_name = data$external_gene_name,
original_ensembl_gene_id = data$X1)
names(rowRanges) <- as.character(data$ensembl_gene_id)
rse <- SummarizedExperiment(assays=assays,
rowRanges=rowRanges,
colData=colData)
return(rse)
}
readTranscriptomeProfiling <- function(files, data.type, workflow.type, cases,summarizedExperiment) {
if(grepl("Gene Expression Quantification", data.type, ignore.case = TRUE)){
# Status working for:
# - htseq
# - FPKM
# - FPKM-UQ
if(grepl("HTSeq",workflow.type)){
pb <- txtProgressBar(min = 0, max = length(files), style = 3)
for (i in seq_along(files)) {
data <- read_tsv(file = files[i],
col_names = FALSE,
col_types = cols(
X1 = col_character(),
X2 = col_double()
))
if(!missing(cases)) colnames(data)[2] <- cases[i]
if(i == 1) df <- data
if(i != 1) df <- merge(df, data, by=colnames(df)[1],all = TRUE)
setTxtProgressBar(pb, i)
}
close(pb)
if(summarizedExperiment) df <- makeSEfromTranscriptomeProfiling(df,cases,workflow.type)
}
} else if(grepl("miRNA", workflow.type, ignore.case = TRUE) & grepl("miRNA", data.type, ignore.case = TRUE)) {
pb <- txtProgressBar(min = 0, max = length(files), style = 3)
for (i in seq_along(files)) {
data <- read_tsv(file = files[i], col_names = TRUE,col_types = "cidc")
if(!missing(cases))
colnames(data)[2:ncol(data)] <- paste0(colnames(data)[2:ncol(data)],"_",cases[i])
if(i == 1) df <- data
if(i != 1) df <- merge(df, data, by=colnames(df)[1],all = TRUE)
setTxtProgressBar(pb, i)
}
close(pb)
} else if(grepl("Isoform Expression Quantification", data.type, ignore.case = TRUE)){
pb <- txtProgressBar(min = 0, max = length(files), style = 3)
for (i in seq_along(files)) {
data <- read_tsv(file = files[i], col_names = TRUE, col_types = c("ccidcc"))
if(!missing(cases)) data$barcode <- cases[i] else data$file <- i
if(i == 1) df <- data
if(i != 1) df <- rbind(df,data)
setTxtProgressBar(pb, i)
}
close(pb)
}
return(df)
}
#' @importFrom purrr reduce
readGISTIC <- function(files, cases){
message("Reading GISTIC file")
gistic.df <- NULL
gistic.list <- plyr::alply(files,1,.fun = function(file) {
message("Reading file: ", file)
data <- read_tsv(file = file, col_names = TRUE, progress = TRUE,col_types = readr::cols())
patient <- substr(unlist(str_split(cases,",")),1,12)
info <- NULL
info <- tryCatch({
step <- 20 # more than 50 gives a bug =/
for(i in 0:(ceiling(length(patient)/step) - 1)){
start <- 1 + step * i
end <- ifelse(((i + 1) * step) > length(patient), length(patient),((i + 1) * step))
if(is.null(info)) {
info <- getAliquot_ids(patient[start:end])
} else {
info <- rbind(info, getAliquot_ids(patient[start:end]))
}
}
info
}, error = function(e) {
step <- 2
for(i in 0:(ceiling(length(patient)/step) - 1)){
start <- 1 + step * i
end <- ifelse(((i + 1) * step) > length(patient), length(patient),((i + 1) * step))
if(is.null(info)) {
info <- getAliquot_ids(patient[start:end])
} else {
info <- rbind(info, getAliquot_ids(patient[start:end]))
}
}
info
})
barcode <- as.character(info$barcode)[match(colnames(data),as.character(info$aliquot_id))]
idx <- which(!is.na(barcode))
colnames(data)[idx] <- barcode[idx]
return(data)
})
gistic.df <- gistic.list %>% purrr::reduce(dplyr::full_join, by = c("Gene Symbol","Gene ID","Cytoband"))
return(gistic.df)
}
# Reads Copy Number Variation files to a data frame, basically it will rbind it
readCopyNumberVariation <- function(files, cases){
message("Reading copy number variation files")
pb <- txtProgressBar(min = 0, max = length(files), style = 3)
for (i in seq_along(files)) {
data <- read_tsv(file = files[i], col_names = TRUE, col_types = "ccnnnd")
if(!missing(cases)) data$Sample <- cases[i]
if(i == 1) df <- data
if(i != 1) df <- rbind(df, data)
setTxtProgressBar(pb, i)
}
close(pb)
return(df)
}
# getBarcodeInfo(c("TCGA-A6-6650-01B"))
addFFPE <- function(df) {
message("Add FFPE information. More information at: \n=> https://cancergenome.nih.gov/cancersselected/biospeccriteria \n=> http://gdac.broadinstitute.org/runs/sampleReports/latest/FPPP_FFPE_Cases.html")
barcode <- df$barcode
patient <- df$patient
ffpe.info <- NULL
ffpe.info <- tryCatch({
step <- 20 # more than 50 gives a bug =/
for(i in 0:(ceiling(length(df$patient)/step) - 1)){
start <- 1 + step * i
end <- ifelse(((i + 1) * step) > length(df$patient), length(df$patient),((i + 1) * step))
if(is.null(ffpe.info)) {
ffpe.info <- getFFPE(df$patient[start:end])
} else {
ffpe.info <- rbind(ffpe.info,getFFPE(df$patient[start:end]))
}
}
ffpe.info
}, error = function(e) {
step <- 2
for(i in 0:(ceiling(length(df$patient)/step) - 1)){
start <- 1 + step * i
end <- ifelse(((i + 1) * step) > length(df$patient), length(df$patient),((i + 1) * step))
if(is.null(ffpe.info)) {
ffpe.info <- getFFPE(df$patient[start:end])
} else {
ffpe.info <- rbind(ffpe.info,getFFPE(df$patient[start:end]))
}
}
ffpe.info
})
df <- merge(df, ffpe.info,by.x = "sample", by.y = "submitter_id")
df <- df[match(barcode,df$barcode),]
return(df)
}
# getFFPE("TCGA-A6-6650")
#' @importFrom plyr rbind.fill
#' @importFrom httr content
getFFPE <- function(patient){
baseURL <- "https://api.gdc.cancer.gov/cases/?"
options.pretty <- "pretty=true"
options.expand <- "expand=samples"
option.size <- paste0("size=",length(patient))
options.filter <- paste0("filters=",
URLencode('{"op":"and","content":[{"op":"in","content":{"field":"cases.submitter_id","value":['),
paste0('"',paste(patient,collapse = '","')),
URLencode('"]}}]}'))
url <- paste0(baseURL,paste(options.pretty,options.expand, option.size, options.filter, sep = "&"))
json <- tryCatch(
getURL(url,fromJSON,timeout(600),simplifyDataFrame = TRUE),
error = function(e) {
message(paste("Error: ", e, sep = " "))
message("We will retry to access GDC again! URL:")
message(url)
fromJSON(content(getURL(url,GET,timeout(600)), as = "text", encoding = "UTF-8"), simplifyDataFrame = TRUE)
}
)
results <- json$data$hits
results <- rbind.fill(results$samples)[,c("submitter_id","is_ffpe")]
return(results)
}
getAliquot_ids <- function(barcode){
baseURL <- "https://api.gdc.cancer.gov/cases/?"
options.fields <- "fields=samples.portions.analytes.aliquots.aliquot_id,samples.portions.analytes.aliquots.submitter_id"
options.pretty <- "pretty=true"
option.size <- paste0("size=",length(barcode))
#message(paste(barcode,collapse = '","'))
#message(paste0('"',paste(barcode,collapse = '","')))
options.filter <- paste0("filters=",
URLencode('{"op":"and","content":[{"op":"in","content":{"field":"cases.submitter_id","value":['),
paste0('"',paste(barcode,collapse = '","')),
URLencode('"]}}]}'))
#message(paste0(baseURL,paste(options.pretty,options.expand, option.size, options.filter, sep = "&")))
url <- paste0(baseURL,paste(options.pretty,options.fields, option.size, options.filter, sep = "&"))
json <- tryCatch(
getURL(url,fromJSON,timeout(600),simplifyDataFrame = TRUE),
error = function(e) {
message(paste("Error: ", e, sep = " "))
message("We will retry to access GDC again! URL:")
message(url)
fromJSON(content(getURL(url,GET,timeout(600)), as = "text", encoding = "UTF-8"), simplifyDataFrame = TRUE)
}
)
results <- unlist(json$data$hits$samples)
results.barcode <- grep("TCGA",results,value = TRUE)
results.aliquot <- grep("TCGA",results,value = TRUE,invert = TRUE)
df <- data.frame(results.aliquot,results.barcode)
colnames(df) <- c("aliquot_id","barcode")
return(df)
}
# getBarcodeInfo(c("TCGA-A6-6650"))
getBarcodeInfo <- function(barcode) {
baseURL <- "https://api.gdc.cancer.gov/cases/?"
options.pretty <- "pretty=true"
options.expand <- "expand=project,diagnoses,diagnoses.treatments,annotations,family_histories,demographic,exposures"
option.size <- paste0("size=",length(barcode))
#message(paste(barcode,collapse = '","'))
#message(paste0('"',paste(barcode,collapse = '","')))
options.filter <- paste0("filters=",
URLencode('{"op":"and","content":[{"op":"in","content":{"field":"cases.submitter_id","value":['),
paste0('"',paste(barcode,collapse = '","')),
URLencode('"]}}]}'))
#message(paste0(baseURL,paste(options.pretty,options.expand, option.size, options.filter, sep = "&")))
url <- paste0(baseURL,paste(options.pretty,options.expand, option.size, options.filter, sep = "&"))
json <- tryCatch(
getURL(url,fromJSON,timeout(600),simplifyDataFrame = TRUE),
error = function(e) {
message(paste("Error: ", e, sep = " "))
message("We will retry to access GDC again! URL:")
message(url)
fromJSON(content(getURL(url,GET,timeout(600)), as = "text", encoding = "UTF-8"), simplifyDataFrame = TRUE)
}
)
results <- json$data$hits
submitter_id <- results$submitter_id
# We dont have the same cols for TCGA and TARGET so we need to check them
if(!is.null(results$diagnoses)) {
diagnoses <- rbindlist(results$diagnoses, fill = TRUE)
if(any(grepl("submitter_id", colnames(diagnoses)))) {
diagnoses$submitter_id <- gsub("_diagnosis","", diagnoses$submitter_id)
} else {
diagnoses$submitter_id <- submitter_id
}
# this is required since the sample might not have a diagnosis
df <- merge(data.frame("submitter_id" = submitter_id),
diagnoses,
by = "submitter_id",
all.x = T,
sort = FALSE)
} else {
df <- as.data.frame(submitter_id)
}
if(!is.null(results$exposures) > 0) {
exposures <- rbindlist(results$exposures, fill = TRUE)
if(any(grepl("submitter_id", colnames(exposures)))) {
exposures$submitter_id <- gsub("_exposure","", exposures$submitter_id)
} else {
exposures$submitter_id <- submitter_id
}
df <- merge(df,exposures, by = "submitter_id", all = TRUE,sort = FALSE)
}
if(!is.null(results$demographic)) {
demographic <- results$demographic
if(any(grepl("submitter_id", colnames(demographic)))) {
demographic$submitter_id <- gsub("_demographic","", results$demographic$submitter_id)
} else {
demographic$submitter_id <-submitter_id
}
demographic <- demographic[!is.na(demographic$submitter_id),]
df <- merge(df,demographic, by="submitter_id", all = TRUE,sort = FALSE)
}
treatments <- rbindlist(results$treatments,fill = TRUE)
if (nrow(treatments) > 0) {
df[,treatments:=NULL]
if (any(grepl("submitter_id", colnames(treatments)))) {
treatments$submitter_id <- gsub("_treatment","", treatments$submitter_id)
} else {
treatments$submitter_id <-submitter_id
}
df <- merge(df,treatments, by="submitter_id", all = TRUE,sort = FALSE)
}
df$bcr_patient_barcode <- df$submitter_id
projects.info <- results$project
projects.info <- results$project[,grep("state",colnames(projects.info),invert = TRUE)]
df <- merge(df,
cbind("submitter_id" = submitter_id, projects.info),
sort = FALSE, by = "submitter_id")
# Adding in the same order
df <- df[match(barcode,df$submitter_id),]
# This line should not exists, but some patients does not have clinical data
# case: TCGA-R8-A6YH"
# this has been reported to GDC, waiting answers
# So we will remove this NA cases
df <- df[!is.na(df$submitter_id),]
return(df)
}
#' @title Prepare CEL files into an AffyBatch.
#' @description Prepare CEL files into an AffyBatch.
#' @param ClinData write
#' @param PathFolder write
#' @param TabCel write
#' @examples
#' \dontrun{
#' to add example
#' }
#' @export
#' @return Normalizd Expression data from Affy eSets
TCGAprepare_Affy <- function(ClinData, PathFolder, TabCel){
if (!requireNamespace("affy", quietly = TRUE)) {
stop("affy package is needed for this function to work. Please install it.",
call. = FALSE)
}
if (!requireNamespace("Biobase", quietly = TRUE)) {
stop("affy package is needed for this function to work. Please install it.",
call. = FALSE)
}
affy_batch <- affy::ReadAffy(filenames=as.character(paste(TabCel$samples, ".CEL", sep="")))
eset <- affy::rma(affy_batch)
mat <- Biobase::exprs(eset)
return(mat)
}
|
/R/prepare.R
|
no_license
|
Sergiollaneza/TCGAbiolinks
|
R
| false
| false
| 61,770
|
r
|
#' @title Prepare GDC data
#' @description
#' Reads the data downloaded and prepare it into an R object
#' @param query A query for GDCquery function
#' @param save Save result as RData object?
#' @param save.filename Name of the file to be save if empty an automatic will be created
#' @param directory Directory/Folder where the data was downloaded. Default: GDCdata
#' @param summarizedExperiment Create a summarizedExperiment? Default TRUE (if possible)
#' @param remove.files.prepared Remove the files read? Default: FALSE
#' This argument will be considered only if save argument is set to true
#' @param add.gistic2.mut If a list of genes (gene symbol) is given, columns with gistic2 results from GDAC firehose (hg19)
#' and a column indicating if there is or not mutation in that gene (hg38)
#' (TRUE or FALSE - use the MAF file for more information)
#' will be added to the sample matrix in the summarized Experiment object.
#' @param mut.pipeline If add.gistic2.mut is not NULL this field will be taken in consideration.
#' Four separate variant calling pipelines are implemented for GDC data harmonization.
#' Options: muse, varscan2, somaticsniper, MuTect2. For more information:
#' https://gdc-docs.nci.nih.gov/Data/Bioinformatics_Pipelines/DNA_Seq_Variant_Calling_Pipeline/
#' @param mutant_variant_classification List of mutant_variant_classification that will be
#' consider a sample mutant or not. Default: "Frame_Shift_Del", "Frame_Shift_Ins",
#' "Missense_Mutation", "Nonsense_Mutation", "Splice_Site", "In_Frame_Del",
#' "In_Frame_Ins", "Translation_Start_Site", "Nonstop_Mutation"
#' @export
#' @examples
#' query <- GDCquery(project = "TCGA-KIRP",
#' data.category = "Simple Nucleotide Variation",
#' data.type = "Masked Somatic Mutation",
#' workflow.type = "MuSE Variant Aggregation and Masking")
#' GDCdownload(query, method = "api", directory = "maf")
#' maf <- GDCprepare(query, directory = "maf")
#'
#' \dontrun{
#' # Get GISTIC values
#' gistic.query <- GDCquery(project = "TCGA-ACC",
#' data.category = "Copy Number Variation",
#' data.type = "Gene Level Copy Number Scores",
#' access="open")
#' GDCdownload(gistic.query)
#' gistic <- GDCprepare(gistic.query)
#' }
#' @return A summarizedExperiment or a data.frame
#' @importFrom S4Vectors DataFrame
#' @importFrom SummarizedExperiment metadata<-
#' @importFrom data.table setcolorder setnames
#' @importFrom GenomicRanges GRanges
#' @importFrom IRanges IRanges
GDCprepare <- function(query,
save = FALSE,
save.filename,
directory = "GDCdata",
summarizedExperiment = TRUE,
remove.files.prepared = FALSE,
add.gistic2.mut = NULL,
mut.pipeline = "mutect2",
mutant_variant_classification = c("Frame_Shift_Del",
"Frame_Shift_Ins",
"Missense_Mutation",
"Nonsense_Mutation",
"Splice_Site",
"In_Frame_Del",
"In_Frame_Ins",
"Translation_Start_Site",
"Nonstop_Mutation")){
isServeOK()
if(missing(query)) stop("Please set query parameter")
if(any(duplicated(query$results[[1]]$cases)) & query$data.type != "Clinical data" &
query$data.type != "Protein expression quantification" &
query$data.type != "Raw intensities") {
dup <- query$results[[1]]$cases[duplicated(query$results[[1]]$cases)]
cols <- c("tags","cases","experimental_strategy","analysis_workflow_type")
cols <- cols[cols %in% colnames(query$results[[1]])]
dup <- query$results[[1]][query$results[[1]]$cases %in% dup,cols]
dup <- dup[order(dup$cases),]
print(knitr::kable(dup))
stop("There are samples duplicated. We will not be able to prepare it")
}
if(!save & remove.files.prepared) {
stop("To remove the files, please set save to TRUE. Otherwise, the data will be lost")
}
# We save the files in project/source/data.category/data.type/file_id/file_name
source <- ifelse(query$legacy,"legacy","harmonized")
files <- file.path(query$results[[1]]$project, source,
gsub(" ","_",query$results[[1]]$data_category),
gsub(" ","_",query$results[[1]]$data_type),
gsub(" ","_",query$results[[1]]$file_id),
gsub(" ","_",query$results[[1]]$file_name))
files <- file.path(directory, files)
if(!all(file.exists(files))) stop(paste0("I couldn't find all the files from the query. ",
"Please check if the directory parameter is right or `GDCdownload` downloaded the samples."))
if(grepl("Transcriptome Profiling", query$data.category, ignore.case = TRUE)){
data <- readTranscriptomeProfiling(files = files,
data.type = ifelse(!is.na(query$data.type), as.character(query$data.type), unique(query$results[[1]]$data_type)),
workflow.type = unique(query$results[[1]]$analysis_workflow_type),
cases = query$results[[1]]$cases,
summarizedExperiment)
} else if(grepl("Copy Number Variation",query$data.category,ignore.case = TRUE)) {
if(unique(query$results[[1]]$data_type) == "Gene Level Copy Number Scores") {
data <- readGISTIC(files, query$results[[1]]$cases)
} else {
data <- readCopyNumberVariation(files, query$results[[1]]$cases)
}
} else if(grepl("DNA methylation",query$data.category, ignore.case = TRUE)) {
data <- readDNAmethylation(files, query$results[[1]]$cases, summarizedExperiment, unique(query$platform))
} else if(grepl("Raw intensities",query$data.type, ignore.case = TRUE)) {
# preparing IDAT files
data <- readIDATDNAmethylation(files, query$results[[1]]$cases, summarizedExperiment, unique(query$platform), query$legacy)
} else if(grepl("Protein expression",query$data.category,ignore.case = TRUE)) {
data <- readProteinExpression(files, query$results[[1]]$cases)
} else if(grepl("Simple Nucleotide Variation",query$data.category,ignore.case = TRUE)) {
if(grepl("Masked Somatic Mutation",query$results[[1]]$data_type,ignore.case = TRUE) | source == "legacy")
suppressWarnings(data <- readSimpleNucleotideVariationMaf(files))
} else if(grepl("Clinical|Biospecimen", query$data.category, ignore.case = TRUE)){
data <- readClinical(files, query$data.type, query$results[[1]]$cases)
summarizedExperiment <- FALSE
} else if (grepl("Gene expression",query$data.category,ignore.case = TRUE)) {
if(query$data.type == "Gene expression quantification")
data <- readGeneExpressionQuantification(files = files,
cases = query$results[[1]]$cases,
summarizedExperiment = summarizedExperiment,
genome = ifelse(query$legacy,"hg19","hg38"),
experimental.strategy = unique(query$results[[1]]$experimental_strategy))
if(query$data.type == "miRNA gene quantification")
data <- readGeneExpressionQuantification(files = files,
cases = query$results[[1]]$cases,
summarizedExperiment = FALSE,
genome = ifelse(query$legacy,"hg19","hg38"),
experimental.strategy = unique(query$results[[1]]$experimental_strategy))
if(query$data.type == "miRNA isoform quantification")
data <- readmiRNAIsoformQuantification(files = files,
cases = query$results[[1]]$cases)
if(query$data.type == "Isoform expression quantification")
data <- readIsoformExpressionQuantification(files = files, cases = query$results[[1]]$cases)
if(query$data.type == "Exon quantification")
data <- readExonQuantification(files = files,
cases = query$results[[1]]$cases,
summarizedExperiment = summarizedExperiment)
}
# Add data release to object
if(summarizedExperiment & !is.data.frame(data)){
metadata(data) <- list("data_release" = getGDCInfo()$data_release)
}
if((!is.null(add.gistic2.mut)) & summarizedExperiment) {
message("=> Adding GISTIC2 and mutation information....")
genes <- tolower(levels(EAGenes$Gene))
if(!all(tolower(add.gistic2.mut) %in% genes)) message(paste("These genes were not found:\n",
paste(add.gistic2.mut[! tolower(add.gistic2.mut) %in% genes],collapse = "\n=> ")))
add.gistic2.mut <- add.gistic2.mut[tolower(add.gistic2.mut) %in% tolower(genes)]
if(length(add.gistic2.mut) > 0){
info <- colData(data)
for(i in unlist(query$project)){
info <- get.mut.gistc.information(info,
i,
add.gistic2.mut,
mut.pipeline = mut.pipeline,
mutant_variant_classification = mutant_variant_classification)
}
colData(data) <- info
}
}
if("samples" %in% colnames(data)){
if(any(duplicated(data$sample))) {
message("Replicates found.")
if(any(data$is_ffpe)) message("FFPE should be removed. You can modify the data with the following command:\ndata <- data[,!data$is_ffpe]")
print(as.data.frame(colData(data)[data$sample %in% data$sample[duplicated(data$sample)],c("is_ffpe"),drop=F]))
}
}
if(save){
if(missing(save.filename) & !missing(query)) save.filename <- paste0(query$project,gsub(" ","_", query$data.category),gsub(" ","_",date()),".RData")
message(paste0("Saving file:",save.filename))
save(data, file = save.filename)
message("File saved")
# save is true, due to the check in the beggining of the code
if(remove.files.prepared){
# removes files and empty directories
remove.files.recursively(files)
}
}
return(data)
}
remove.files.recursively <- function(files){
files2rm <- dirname(files)
unlink(files2rm,recursive = TRUE)
files2rm <- dirname(files2rm) # data category
if(length(list.files(files2rm)) == 0) remove.files.recursively(files2rm)
}
readClinical <- function(files, data.type, cases){
if(data.type == "Clinical data"){
suppressMessages({
ret <- plyr::alply(files,.margins = 1,readr::read_tsv, .progress = "text")
})
names(ret) <- gsub("nationwidechildrens.org_","",gsub(".txt","",basename(files)))
}
return(ret)
}
#' @importFrom tidyr separate
readExonQuantification <- function (files, cases, summarizedExperiment = TRUE){
pb <- txtProgressBar(min = 0, max = length(files), style = 3)
for (i in seq_along(files)) {
data <- fread(files[i], header = TRUE, sep = "\t", stringsAsFactors = FALSE)
if(!missing(cases)) {
assay.list <- gsub(" |\\(|\\)|\\/","_",colnames(data)[2:ncol(data)])
# We will use this because there might be more than one col for each samples
setnames(data,colnames(data)[2:ncol(data)],
paste0(gsub(" |\\(|\\)|\\/","_",colnames(data)[2:ncol(data)]),"_",cases[i]))
}
if (i == 1) {
df <- data
} else {
df <- merge(df, data, by=colnames(data)[1], all = TRUE)
}
setTxtProgressBar(pb, i)
}
setDF(df)
rownames(df) <- df[,1]
df <- df %>% separate(exon,into = c("seqnames","coordinates","strand"),sep = ":") %>%
separate(coordinates,into = c("start","end"),sep = "-")
if(summarizedExperiment) {
suppressWarnings({
assays <- lapply(assay.list, function (x) {
return(data.matrix(subset(df, select = grep(x,colnames(df),ignore.case = TRUE))))
})
})
names(assays) <- assay.list
regex <- paste0("[:alnum:]{4}-[:alnum:]{2}-[:alnum:]{4}",
"-[:alnum:]{3}-[:alnum:]{3}-[:alnum:]{4}-[:alnum:]{2}")
samples <- na.omit(unique(str_match(colnames(df),regex)[,1]))
colData <- colDataPrepare(samples)
assays <- lapply(assays, function(x){
colnames(x) <- NULL
rownames(x) <- NULL
return(x)
})
rowRanges <- makeGRangesFromDataFrame(df)
rse <- SummarizedExperiment(assays=assays,
rowRanges=rowRanges,
colData=colData)
return(rse)
}
return(df)
}
readIsoformExpressionQuantification <- function (files, cases){
pb <- txtProgressBar(min = 0, max = length(files), style = 3)
for (i in seq_along(files)) {
data <- fread(files[i], header = TRUE, sep = "\t", stringsAsFactors = FALSE)
if(!missing(cases)) {
assay.list <- gsub(" |\\(|\\)|\\/","_",colnames(data)[2:ncol(data)])
# We will use this because there might be more than one col for each samples
setnames(data,colnames(data)[2:ncol(data)],
paste0(gsub(" |\\(|\\)|\\/","_",colnames(data)[2:ncol(data)]),"_",cases[i]))
}
if (i == 1) {
df <- data
} else {
df <- merge(df, data, by=colnames(data)[1], all = TRUE)
}
setTxtProgressBar(pb, i)
}
setDF(df)
rownames(df) <- df[,1]
df[,1] <- NULL
return(df)
}
readmiRNAIsoformQuantification <- function (files, cases){
pb <- txtProgressBar(min = 0, max = length(files), style = 3)
for (i in seq_along(files)) {
data <- fread(files[i], header = TRUE, sep = "\t", stringsAsFactors = FALSE)
data$barcode <- cases[i]
if (i == 1) {
df <- data
} else {
df <- rbind(df, data)
}
setTxtProgressBar(pb, i)
}
setDF(df)
}
readSimpleNucleotideVariationMaf <- function(files){
ret <- read_tsv(files,
comment = "#",
col_types = cols(
Entrez_Gene_Id = col_integer(),
Start_Position = col_integer(),
End_Position = col_integer(),
t_depth = col_integer(),
t_ref_count = col_integer(),
t_alt_count = col_integer(),
n_depth = col_integer(),
ALLELE_NUM = col_integer(),
TRANSCRIPT_STRAND = col_integer(),
PICK = col_integer(),
TSL = col_integer(),
HGVS_OFFSET = col_integer(),
MINIMISED = col_integer()),
progress = TRUE)
if(ncol(ret) == 1) ret <- read_csv(files,
comment = "#",
col_types = cols(
Entrez_Gene_Id = col_integer(),
Start_Position = col_integer(),
End_Position = col_integer(),
t_depth = col_integer(),
t_ref_count = col_integer(),
t_alt_count = col_integer(),
n_depth = col_integer(),
ALLELE_NUM = col_integer(),
TRANSCRIPT_STRAND = col_integer(),
PICK = col_integer(),
TSL = col_integer(),
HGVS_OFFSET = col_integer(),
MINIMISED = col_integer()),
progress = TRUE)
return(ret)
}
readGeneExpressionQuantification <- function(files,
cases,
genome = "hg19",
summarizedExperiment = TRUE,
experimental.strategy,
platform){
pb <- txtProgressBar(min = 0, max = length(files), style = 3)
skip <- unique((ifelse(experimental.strategy == "Gene expression array",1,0)))
if(length(skip) > 1) stop("It is not possible to handle those different platforms together")
for (i in seq_along(files)) {
suppressWarnings({
data <- fread(files[i],
header = TRUE,
sep = "\t",
stringsAsFactors = FALSE,
skip = skip)
})
if(!missing(cases)) {
assay.list <- gsub(" |\\(|\\)|\\/","_",colnames(data)[2:ncol(data)])
# We will use this because there might be more than one col for each samples
setnames(data,colnames(data)[2:ncol(data)],
paste0(gsub(" |\\(|\\)|\\/","_",colnames(data)[2:ncol(data)]),"_",cases[i]))
}
if (i == 1) {
df <- data
} else {
df <- merge(df, data, by = colnames(data)[1], all = TRUE)
}
setTxtProgressBar(pb, i)
}
setDF(df)
if (summarizedExperiment) {
df <- makeSEfromGeneExpressionQuantification(df,assay.list, genome = genome)
} else {
rownames(df) <- df$gene_id
df$gene_id <- NULL
}
return(df)
}
makeSEfromGeneExpressionQuantification <- function(df, assay.list, genome="hg19"){
gene.location <- get.GRCh.bioMart(genome)
if(all(grepl("\\|",df[,1]))){
aux <- strsplit(df$gene_id,"\\|")
GeneID <- unlist(lapply(aux,function(x) x[2]))
df$entrezgene <- as.numeric(GeneID)
df <- merge(df, gene.location, by="entrezgene")
} else {
df$external_gene_name <- as.character(df[,1])
df <- merge(df, gene.location, by="external_gene_name")
}
if("transcript_id" %in% assay.list){
rowRanges <- GRanges(seqnames = paste0("chr", df$chromosome_name),
ranges = IRanges(start = df$start_position,
end = df$end_position),
strand = df$strand,
gene_id = df$external_gene_name,
entrezgene = df$entrezgene,
ensembl_gene_id = df$ensembl_gene_id,
transcript_id = subset(df, select = 5))
names(rowRanges) <- as.character(df$gene_id)
assay.list <- assay.list[which(assay.list != "transcript_id")]
} else {
rowRanges <- GRanges(seqnames = paste0("chr", df$chromosome_name),
ranges = IRanges(start = df$start_position,
end = df$end_position),
strand = df$strand,
gene_id = df$external_gene_name,
entrezgene = df$entrezgene,
ensembl_gene_id = df$ensembl_gene_id)
names(rowRanges) <- as.character(df$external_gene_name)
}
suppressWarnings({
assays <- lapply(assay.list, function (x) {
return(data.matrix(subset(df, select = grep(x,colnames(df),ignore.case = TRUE))))
})
})
names(assays) <- assay.list
regex <- paste0("[:alnum:]{4}-[:alnum:]{2}-[:alnum:]{4}",
"-[:alnum:]{3}-[:alnum:]{3}-[:alnum:]{4}-[:alnum:]{2}")
samples <- na.omit(unique(str_match(colnames(df),regex)[,1]))
colData <- colDataPrepare(samples)
assays <- lapply(assays, function(x){
colnames(x) <- NULL
rownames(x) <- NULL
return(x)
})
rse <- SummarizedExperiment(assays=assays,
rowRanges=rowRanges,
colData=colData)
return(rse)
}
#' @importFrom downloader download
#' @importFrom S4Vectors DataFrame
makeSEFromDNAMethylationMatrix <- function(betas, genome = "hg38", met.platform = "450K") {
message("=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-")
message("Creating a SummarizedExperiment from DNA methylation input")
# Instead of looking on the size, it is better to set it as a argument as the annotation is different
annotation <- getInfiniumAnnotation(met.platform, genome)
rowRanges <- annotation[names(annotation) %in% rownames(betas),,drop=FALSE]
colData <- DataFrame(samples = colnames(betas))
betas <- betas[rownames(betas) %in% names(rowRanges),,drop = FALSE]
betas <- betas[names(rowRanges),,drop = FALSE]
assay <- data.matrix(betas)
betas <- SummarizedExperiment(assays=assay,
rowRanges=rowRanges,
colData=colData)
return(betas)
}
getInfiniumAnnotation <- function(platform, genome){
base <- "http://zwdzwd.io/InfiniumAnnotation/current/"
path <- file.path(base,platform,paste(platform,"hg19.manifest.rds", sep ="."))
if (grepl("hg38", genome)) path <- gsub("hg19","hg38",path)
if(platform == "EPIC") {
annotation <- paste0(base,"EPIC/EPIC.hg19.manifest.rds")
} else if(platform == "450K") {
annotation <- paste0(base,"hm450/hm450.hg19.manifest.rds")
} else {
annotation <- paste0(base,"hm27/hm27.hg19.manifest.rds")
}
if(grepl("hg38", genome)) annotation <- gsub("hg19","hg38",annotation)
if(!file.exists(basename(annotation))) {
if(Sys.info()["sysname"] == "Windows") mode <- "wb" else mode <- "w"
downloader::download(annotation, basename(annotation), mode = mode)
}
readRDS(basename(annotation))
}
makeSEfromDNAmethylation <- function(df, probeInfo=NULL){
if(is.null(probeInfo)) {
gene.location <- get.GRCh.bioMart()
gene.GR <- GRanges(seqnames = paste0("chr", gene.location$chromosome_name),
ranges = IRanges(start = gene.location$start_position,
end = gene.location$end_position),
strand = gene.location$strand,
symbol = gene.location$external_gene_name,
EntrezID = gene.location$entrezgene)
rowRanges <- GRanges(seqnames = paste0("chr", df$Chromosome),
ranges = IRanges(start = df$Genomic_Coordinate,
end = df$Genomic_Coordinate),
probeID = df$Composite.Element.REF,
Gene_Symbol = df$Gene_Symbol)
names(rowRanges) <- as.character(df$Composite.Element.REF)
colData <- colDataPrepare(colnames(df)[5:ncol(df)])
assay <- data.matrix(subset(df,select = c(5:ncol(df))))
} else {
rowRanges <- makeGRangesFromDataFrame(probeInfo, keep.extra.columns = TRUE)
colData <- colDataPrepare(colnames(df)[(ncol(probeInfo) + 1):ncol(df)])
assay <- data.matrix(subset(df,select = c((ncol(probeInfo) + 1):ncol(df))))
}
colnames(assay) <- rownames(colData)
rownames(assay) <- as.character(df$Composite.Element.REF)
rse <- SummarizedExperiment(assays = assay, rowRanges = rowRanges, colData = colData)
}
readIDATDNAmethylation <- function(files,
barcode,
summarizedExperiment,
platform,
legacy){
if (!requireNamespace("sesame", quietly = TRUE)) {
stop("sesame package is needed for this function to work. Please install it.",
call. = FALSE)
}
moved.files <- file.path(dirname(dirname(files)), basename(files))
# for eah file move it to upper parent folder
plyr::a_ply(files, 1,function(x){
tryCatch(TCGAbiolinks:::move(x,file.path(dirname(dirname(x)), basename(x)),keep.copy = TRUE),error = function(e){})
})
samples <- unique(gsub("_Grn.idat|_Red.idat","",moved.files))
message("Processing IDATs with Sesame - http://bioconductor.org/packages/sesame/")
message("Running opensesame - applying quality masking and nondetection masking (threshold P-value 0.05)")
message("Please cite: doi: 10.1093/nar/gky691 and 10.1093/nar/gkt090")
betas <- openSesame(samples)
barcode <- unique(data.frame("file" = gsub("_Grn.idat|_Red.idat","",basename(moved.files)), "barcode" = barcode))
colnames(betas) <- barcode$barcode[match(basename(samples),barcode$file)]
if(summarizedExperiment){
met.platform <- "EPIC"
if(grepl("450",platform)) met.platform <- "450K"
if(grepl("27",platform)) met.platform <- "27K"
betas <- makeSEFromDNAMethylationMatrix(betas,genome = ifelse(legacy,"hg19","hg38"),met.platform = met.platform)
colData(betas) <- DataFrame(colDataPrepare(colnames(betas)))
}
return(betas)
}
# We will try to make this function easier to use this function in its own data
# In case it is not TCGA I should not consider that there is a barcode in the header
# Instead the user should be able to add the names to his data
# The only problem is that the data from the user will not have all the columns
# TODO: Improve this function to be more generic as possible
#' @importFrom GenomicRanges makeGRangesFromDataFrame
#' @importFrom tibble as_data_frame
readDNAmethylation <- function(files, cases, summarizedExperiment = TRUE, platform){
if (grepl("OMA00",platform)){
pb <- txtProgressBar(min = 0, max = length(files), style = 3)
for (i in seq_along(files)) {
data <- fread(files[i], header = TRUE, sep = "\t",
stringsAsFactors = FALSE,skip = 1,
na.strings="N/A",
colClasses=c("character", # Composite Element REF
"numeric")) # beta value
setnames(data,gsub(" ", "\\.", colnames(data)))
if(!missing(cases)) setnames(data,2,cases[i])
if (i == 1) {
df <- data
} else {
df <- merge(df, data, by = "Composite.Element.REF")
}
setTxtProgressBar(pb, i)
}
setDF(df)
rownames(df) <- df$Composite.Element.REF
df$Composite.Element.REF <- NULL
} else {
pb <- txtProgressBar(min = 0, max = length(files), style = 3)
skip <- ifelse(all(grepl("hg38",files)), 0,1)
colClasses <- NULL
if(!all(grepl("hg38",files))) colClasses <- c("character", # Composite Element REF
"numeric", # beta value
"character", # Gene symbol
"character", # Chromosome
"integer")
for (i in seq_along(files)) {
data <- fread(files[i], header = TRUE, sep = "\t",
stringsAsFactors = FALSE,skip = skip, colClasses = colClasses)
setnames(data,gsub(" ", "\\.", colnames(data)))
if(!missing(cases)) setnames(data,2,cases[i])
if (i == 1) {
setcolorder(data,c(1, 3:ncol(data), 2))
df <- data
} else {
data <- subset(data,select = c(1,2))
df <- merge(df, data, by = "Composite.Element.REF")
}
setTxtProgressBar(pb, i)
}
if (summarizedExperiment) {
if(skip == 0) {
df <- makeSEfromDNAmethylation(df, probeInfo = as_data_frame(df)[,grep("TCGA",colnames(df),invert = TRUE)])
} else {
df <- makeSEfromDNAmethylation(df)
}
} else {
setDF(df)
rownames(df) <- df$Composite.Element.REF
df$Composite.Element.REF <- NULL
}
}
return(df)
}
colDataPrepareTARGET <- function(barcode){
message("Adding description to TARGET samples")
tissue.code <- c('01','02','03','04','05','06','07','08','09','10','11',
'12','13','14','15','16','17','20','40','41','42','50','60','61','99')
definition <- c("Primary solid Tumor", # 01
"Recurrent Solid Tumor", # 02
"Primary Blood Derived Cancer - Peripheral Blood", # 03
"Recurrent Blood Derived Cancer - Bone Marrow", # 04
"Additional - New Primary", # 05
"Metastatic", # 06
"Additional Metastatic", # 07
"Tissue disease-specific post-adjuvant therapy", # 08
"Primary Blood Derived Cancer - Bone Marrow", # 09
"Blood Derived Normal", # 10
"Solid Tissue Normal", # 11
"Buccal Cell Normal", # 12
"EBV Immortalized Normal", # 13
"Bone Marrow Normal", # 14
"Fibroblasts from Bone Marrow Normal", # 15
"Mononuclear Cells from Bone Marrow Normal", # 16
"Lymphatic Tissue Normal (including centroblasts)", # 17
"Control Analyte", # 20
"Recurrent Blood Derived Cancer - Peripheral Blood", # 40
"Blood Derived Cancer- Bone Marrow, Post-treatment", # 41
"Blood Derived Cancer- Peripheral Blood, Post-treatment", # 42
"Cell line from patient tumor", # 50
"Xenograft from patient not grown as intermediate on plastic tissue culture dish", # 60
"Xenograft grown in mice from established cell lines", #61
"Granulocytes after a Ficoll separation") # 99
aux <- DataFrame(tissue.code = tissue.code,definition)
# in case multiple equal barcode
regex <- paste0("[:alnum:]{5}-[:alnum:]{2}-[:alnum:]{6}",
"-[:alnum:]{3}-[:alnum:]{3}")
samples <- str_match(barcode,regex)[,1]
ret <- DataFrame(barcode = barcode,
sample = substr(barcode, 1, 20),
patient = substr(barcode, 1, 16),
tumor.code = substr(barcode, 8, 9),
case.unique.id = substr(barcode, 11, 16),
tissue.code = substr(barcode, 18, 19),
nucleic.acid.code = substr(barcode, 24, 24))
ret <- merge(ret,aux, by = "tissue.code", sort = FALSE)
tumor.code <- c('00','01','02','03','04','10','15','20','21','30','40',
'41','50','51','52','60','61','62','63','64','65','70','71','80','81')
tumor.definition <- c("Non-cancerous tissue", # 00
"Diffuse Large B-Cell Lymphoma (DLBCL)", # 01
"Lung Cancer (all types)", # 02
"Cervical Cancer (all types)", # 03
"Anal Cancer (all types)", # 04
"Acute lymphoblastic leukemia (ALL)", # 10
"Mixed phenotype acute leukemia (MPAL)", # 15
"Acute myeloid leukemia (AML)", # 20
"Induction Failure AML (AML-IF)", # 21
"Neuroblastoma (NBL)", # 30
"Osteosarcoma (OS)", # 40
"Ewing sarcoma", # 41
"Wilms tumor (WT)", # 50
"Clear cell sarcoma of the kidney (CCSK)", # 51
"Rhabdoid tumor (kidney) (RT)", # 52
"CNS, ependymoma", # 60
"CNS, glioblastoma (GBM)", # 61
"CNS, rhabdoid tumor", # 62
"CNS, low grade glioma (LGG)", # 63
"CNS, medulloblastoma", # 64
"CNS, other", # 65
"NHL, anaplastic large cell lymphoma", # 70
"NHL, Burkitt lymphoma (BL)", # 71
"Rhabdomyosarcoma", #80
"Soft tissue sarcoma, non-rhabdomyosarcoma") # 81
aux <- DataFrame(tumor.code = tumor.code,tumor.definition)
ret <- merge(ret,aux, by = "tumor.code", sort = FALSE)
nucleic.acid.code <- c('D','E','W','X','Y','R','S')
nucleic.acid.description <- c("DNA, unamplified, from the first isolation of a tissue",
"DNA, unamplified, from the first isolation of a tissue embedded in FFPE",
"DNA, whole genome amplified by Qiagen (one independent reaction)",
"DNA, whole genome amplified by Qiagen (a second, separate independent reaction)",
"DNA, whole genome amplified by Qiagen (pool of 'W' and 'X' aliquots)",
"RNA, from the first isolation of a tissue",
"RNA, from the first isolation of a tissue embedded in FFPE")
aux <- DataFrame(nucleic.acid.code = nucleic.acid.code,nucleic.acid.description)
ret <- merge(ret,aux, by = "nucleic.acid.code", sort = FALSE)
ret <- ret[match(barcode,ret$barcode),]
rownames(ret) <- gsub("\\.","-",make.names(ret$barcode,unique=TRUE))
ret$code <- NULL
return(DataFrame(ret))
}
colDataPrepareTCGA <- function(barcode){
# For the moment this will work only for TCGA Data
# We should search what TARGET data means
code <- c('01','02','03','04','05','06','07','08','09','10','11',
'12','13','14','20','40','50','60','61')
shortLetterCode <- c("TP","TR","TB","TRBM","TAP","TM","TAM","THOC",
"TBM","NB","NT","NBC","NEBV","NBM","CELLC","TRB",
"CELL","XP","XCL")
definition <- c("Primary solid Tumor", # 01
"Recurrent Solid Tumor", # 02
"Primary Blood Derived Cancer - Peripheral Blood", # 03
"Recurrent Blood Derived Cancer - Bone Marrow", # 04
"Additional - New Primary", # 05
"Metastatic", # 06
"Additional Metastatic", # 07
"Human Tumor Original Cells", # 08
"Primary Blood Derived Cancer - Bone Marrow", # 09
"Blood Derived Normal", # 10
"Solid Tissue Normal", # 11
"Buccal Cell Normal", # 12
"EBV Immortalized Normal", # 13
"Bone Marrow Normal", # 14
"Control Analyte", # 20
"Recurrent Blood Derived Cancer - Peripheral Blood", # 40
"Cell Lines", # 50
"Primary Xenograft Tissue", # 60
"Cell Line Derived Xenograft Tissue") # 61
aux <- DataFrame(code = code,shortLetterCode,definition)
# in case multiple equal barcode
regex <- paste0("[:alnum:]{4}-[:alnum:]{2}-[:alnum:]{4}",
"-[:alnum:]{3}-[:alnum:]{3}-[:alnum:]{4}-[:alnum:]{2}")
samples <- str_match(barcode,regex)[,1]
ret <- DataFrame(barcode = barcode,
patient = substr(barcode, 1, 12),
sample = substr(barcode, 1, 16),
code = substr(barcode, 14, 15))
ret <- merge(ret,aux, by = "code", sort = FALSE)
ret <- ret[match(barcode,ret$barcode),]
rownames(ret) <- gsub("\\.","-",make.names(ret$barcode,unique=TRUE))
ret$code <- NULL
return(DataFrame(ret))
}
#' @title Create samples information matrix for GDC samples
#' @description Create samples information matrix for GDC samples add subtype information
#' @param barcode TCGA or TARGET barcode
#' @examples
#' \dontrun{
#' query.met <- GDCquery(project = c("TCGA-GBM","TCGA-LGG"),
#' legacy = TRUE,
#' data.category = "DNA methylation",
#' platform = c("Illumina Human Methylation 450",
#' "Illumina Human Methylation 27"))
#' colDataPrepare(getResults(query.met)$cases)
#' }
#' @export
colDataPrepare <- function(barcode){
# For the moment this will work only for TCGA Data
# We should search what TARGET data means
message("Starting to add information to samples")
if(all(grepl("TARGET",barcode))) ret <- colDataPrepareTARGET(barcode)
if(all(grepl("TCGA",barcode))) ret <- colDataPrepareTCGA(barcode)
message(" => Add clinical information to samples")
# There is a limitation on the size of the string, so this step will be splited in cases of 100
patient.info <- NULL
patient.info <- tryCatch({
step <- 20 # more than 50 gives a bug =/
for(i in 0:(ceiling(length(ret$patient)/step) - 1)){
start <- 1 + step * i
end <- ifelse(((i + 1) * step) > length(ret$patient), length(ret$patient),((i + 1) * step))
if(is.null(patient.info)) {
patient.info <- getBarcodeInfo(ret$patient[start:end])
} else {
patient.info <- rbind(patient.info,getBarcodeInfo(ret$patient[start:end]))
}
}
patient.info
}, error = function(e) {
step <- 2 # more than 50 gives a bug =/
for(i in 0:(ceiling(length(ret$patient)/step) - 1)){
start <- 1 + step * i
end <- ifelse(((i + 1) * step) > length(ret$patient), length(ret$patient),((i + 1) * step))
if(is.null(patient.info)) {
patient.info <- getBarcodeInfo(ret$patient[start:end])
} else {
patient.info <- rbind(patient.info,getBarcodeInfo(ret$patient[start:end]))
}
}
patient.info
})
ret <- merge(ret,patient.info, by.x = "patient", by.y = "submitter_id", all.x = TRUE )
# Add FFPE information to sample
ret <- addFFPE(ret)
if(!"project_id" %in% colnames(ret)) {
aux <- getGDCprojects()[,5:6]
aux <- aux[aux$disease_type == unique(ret$disease_type),2]
ret$project_id <- as.character(aux)
}
# There is no subtype info for target, return as it is
if(all(grepl("TARGET",barcode))) {
# Put data in the right order
ret <- ret[match(barcode,ret$barcode),]
rownames(ret) <- ret$barcode
return(ret)
}
# remove letter from 01A 01B etc
ret$sample.aux <- substr(ret$sample,1,15)
# na.omit should not be here, exceptional case
out <- NULL
for(proj in na.omit(unique(ret$project_id))){
if(grepl("TCGA",proj,ignore.case = TRUE)) {
message(" => Adding subtype information to samples")
tumor <- gsub("TCGA-","",proj)
available <- c("ACC",
"BRCA",
"BLCA",
"CESC",
"CHOL",
"COAD",
"ESCA",
"GBM",
"HNSC",
"KICH",
"KIRC",
"KIRP",
"LGG",
"LUAD",
"LUSC",
"PAAD",
"PCPG",
"PRAD",
"READ",
"SKCM",
"SARC",
"STAD",
"THCA",
"UCEC",
"UCS",
"UVM")
if (grepl(paste(c(available,"all"),collapse = "|"),tumor,ignore.case = TRUE)) {
subtype <- TCGAquery_subtype(tumor)
colnames(subtype) <- paste0("subtype_", colnames(subtype))
if(all(str_length(subtype$subtype_patient) == 12)){
# Subtype information were to primary tumor in priority
subtype$sample.aux <- paste0(subtype$subtype_patient,"-01")
}
ret.aux <- ret[ret$sample.aux %in% subtype$sample.aux,]
ret.aux <- merge(ret.aux,subtype, by = "sample.aux", all.x = TRUE)
out <- rbind.fill(as.data.frame(out),as.data.frame(ret.aux))
}
}
}
# We need to put together the samples with subtypes with samples without subytpes
ret.aux <- ret[!ret$sample %in% out$sample,]
ret <- rbind.fill(as.data.frame(out),as.data.frame(ret.aux))
ret$sample.aux <- NULL
# Add purity information from http://www.nature.com/articles/ncomms9971
# purity <- getPurityinfo()
# ret <- merge(ret, purity, by = "sample", all.x = TRUE, sort = FALSE)
# Put data in the right order
ret <- ret[match(barcode,ret$barcode),]
rownames(ret) <- ret$barcode
return(ret)
}
#' @title Get hg19 or hg38 information from biomaRt
#' @description Get hg19 or hg38 information from biomaRt
#' @param genome hg38 or hg19
#' @param as.granges Output as GRanges or data.frame
#' @importFrom biomaRt getBM useMart listDatasets useEnsembl
#' @export
get.GRCh.bioMart <- function(genome = "hg19", as.granges = FALSE) {
tries <- 0L
msg <- character()
while (tries < 3L) {
gene.location <- tryCatch({
host <- ifelse(genome == "hg19", "grch37.ensembl.org","www.ensembl.org")
message("Accessing ", host, " to get gene information")
ensembl <- tryCatch({
useEnsembl("ensembl", dataset = "hsapiens_gene_ensembl", host = host)
}, error = function(e) {
message(e)
for(mirror in c("asia","useast","uswest")){
x <- useEnsembl("ensembl",
dataset = "hsapiens_gene_ensembl",
mirror = mirror,
host = host)
if(class(x) == "Mart") {
return(x)
}
}
return(NULL)
})
if(is.null(host)) {
message("Problems accessing ensembl database")
return(NULL)
}
attributes <- c("chromosome_name",
"start_position",
"end_position", "strand",
"ensembl_gene_id",
"entrezgene",
"external_gene_name")
db.datasets <- listDatasets(ensembl)
description <- db.datasets[db.datasets$dataset=="hsapiens_gene_ensembl",]$description
message(paste0("Downloading genome information (try:", tries,") Using: ", description))
filename <- paste0(gsub("[[:punct:]]| ", "_",description),".rda")
if(!file.exists(filename)) {
chrom <- c(1:22, "X", "Y")
gene.location <- getBM(attributes = attributes,
filters = c("chromosome_name"),
values = list(chrom), mart = ensembl)
save(gene.location, file = filename)
} else {
message("Loading from disk")
gene.location <- get(load(filename))
}
gene.location
}, error = function(e) {
msg <<- conditionMessage(e)
tries <<- tries + 1L
})
if(!is.null(gene.location)) break
}
if (tries == 3L) stop("failed to get URL after 3 tries:", "\n error: ", msg)
if(as.granges) {
gene.location$strand[gene.location$strand == 1] <- "+"
gene.location$strand[gene.location$strand == -1] <- "-"
gene.location$chromosome_name <- paste0("chr",gene.location$chromosome_name)
gene.location <- makeGRangesFromDataFrame(gene.location, seqnames.field = "chromosome_name",
start.field = "start_position",
end.field = "end_position",
keep.extra.columns = TRUE) # considering the whole gene no their promoters
}
return(gene.location)
}
readProteinExpression <- function(files,cases) {
pb <- txtProgressBar(min = 0, max = length(files), style = 3)
for (i in seq_along(files)) {
data <- read_tsv(file = files[i], col_names = TRUE,skip = 1, col_types = c("cn"))
if(!missing(cases)) colnames(data)[2] <- cases[i]
if(i == 1) df <- data
if(i != 1) df <- merge(df, data,all=TRUE, by="Composite Element REF")
setTxtProgressBar(pb, i)
}
close(pb)
return(df)
}
makeSEfromTranscriptomeProfiling <- function(data, cases, assay.list){
# How many cases do we have?
# We wil consider col 1 is the ensemble gene id, other ones are data
size <- ncol(data)
# Prepare Patient table
colData <- colDataPrepare(cases)
gene.location <- get.GRCh.bioMart("hg38")
aux <- strsplit(data$X1,"\\.")
data$ensembl_gene_id <- as.character(unlist(lapply(aux,function(x) x[1])))
data <- subset(data, grepl("ENSG", data$ensembl_gene_id))
found.genes <- table(data$ensembl_gene_id %in% gene.location$ensembl_gene_id)
if("FALSE" %in% names(found.genes))
message(paste0("From the ", nrow(data), " genes we couldn't map ", found.genes[["FALSE"]]))
data <- merge(data, gene.location, by="ensembl_gene_id")
# Prepare data table
# Remove the version from the ensembl gene id
assays <- list(data.matrix(data[,2:size+1]))
names(assays) <- assay.list
assays <- lapply(assays, function(x){
colnames(x) <- NULL
rownames(x) <- NULL
return(x)
})
# Prepare rowRanges
rowRanges <- GRanges(seqnames = paste0("chr", data$chromosome_name),
ranges = IRanges(start = data$start_position,
end = data$end_position),
strand = data$strand,
ensembl_gene_id = data$ensembl_gene_id,
external_gene_name = data$external_gene_name,
original_ensembl_gene_id = data$X1)
names(rowRanges) <- as.character(data$ensembl_gene_id)
rse <- SummarizedExperiment(assays=assays,
rowRanges=rowRanges,
colData=colData)
return(rse)
}
readTranscriptomeProfiling <- function(files, data.type, workflow.type, cases,summarizedExperiment) {
if(grepl("Gene Expression Quantification", data.type, ignore.case = TRUE)){
# Status working for:
# - htseq
# - FPKM
# - FPKM-UQ
if(grepl("HTSeq",workflow.type)){
pb <- txtProgressBar(min = 0, max = length(files), style = 3)
for (i in seq_along(files)) {
data <- read_tsv(file = files[i],
col_names = FALSE,
col_types = cols(
X1 = col_character(),
X2 = col_double()
))
if(!missing(cases)) colnames(data)[2] <- cases[i]
if(i == 1) df <- data
if(i != 1) df <- merge(df, data, by=colnames(df)[1],all = TRUE)
setTxtProgressBar(pb, i)
}
close(pb)
if(summarizedExperiment) df <- makeSEfromTranscriptomeProfiling(df,cases,workflow.type)
}
} else if(grepl("miRNA", workflow.type, ignore.case = TRUE) & grepl("miRNA", data.type, ignore.case = TRUE)) {
pb <- txtProgressBar(min = 0, max = length(files), style = 3)
for (i in seq_along(files)) {
data <- read_tsv(file = files[i], col_names = TRUE,col_types = "cidc")
if(!missing(cases))
colnames(data)[2:ncol(data)] <- paste0(colnames(data)[2:ncol(data)],"_",cases[i])
if(i == 1) df <- data
if(i != 1) df <- merge(df, data, by=colnames(df)[1],all = TRUE)
setTxtProgressBar(pb, i)
}
close(pb)
} else if(grepl("Isoform Expression Quantification", data.type, ignore.case = TRUE)){
pb <- txtProgressBar(min = 0, max = length(files), style = 3)
for (i in seq_along(files)) {
data <- read_tsv(file = files[i], col_names = TRUE, col_types = c("ccidcc"))
if(!missing(cases)) data$barcode <- cases[i] else data$file <- i
if(i == 1) df <- data
if(i != 1) df <- rbind(df,data)
setTxtProgressBar(pb, i)
}
close(pb)
}
return(df)
}
#' @importFrom purrr reduce
readGISTIC <- function(files, cases){
message("Reading GISTIC file")
gistic.df <- NULL
gistic.list <- plyr::alply(files,1,.fun = function(file) {
message("Reading file: ", file)
data <- read_tsv(file = file, col_names = TRUE, progress = TRUE,col_types = readr::cols())
patient <- substr(unlist(str_split(cases,",")),1,12)
info <- NULL
info <- tryCatch({
step <- 20 # more than 50 gives a bug =/
for(i in 0:(ceiling(length(patient)/step) - 1)){
start <- 1 + step * i
end <- ifelse(((i + 1) * step) > length(patient), length(patient),((i + 1) * step))
if(is.null(info)) {
info <- getAliquot_ids(patient[start:end])
} else {
info <- rbind(info, getAliquot_ids(patient[start:end]))
}
}
info
}, error = function(e) {
step <- 2
for(i in 0:(ceiling(length(patient)/step) - 1)){
start <- 1 + step * i
end <- ifelse(((i + 1) * step) > length(patient), length(patient),((i + 1) * step))
if(is.null(info)) {
info <- getAliquot_ids(patient[start:end])
} else {
info <- rbind(info, getAliquot_ids(patient[start:end]))
}
}
info
})
barcode <- as.character(info$barcode)[match(colnames(data),as.character(info$aliquot_id))]
idx <- which(!is.na(barcode))
colnames(data)[idx] <- barcode[idx]
return(data)
})
gistic.df <- gistic.list %>% purrr::reduce(dplyr::full_join, by = c("Gene Symbol","Gene ID","Cytoband"))
return(gistic.df)
}
# Reads Copy Number Variation files to a data frame, basically it will rbind it
readCopyNumberVariation <- function(files, cases){
message("Reading copy number variation files")
pb <- txtProgressBar(min = 0, max = length(files), style = 3)
for (i in seq_along(files)) {
data <- read_tsv(file = files[i], col_names = TRUE, col_types = "ccnnnd")
if(!missing(cases)) data$Sample <- cases[i]
if(i == 1) df <- data
if(i != 1) df <- rbind(df, data)
setTxtProgressBar(pb, i)
}
close(pb)
return(df)
}
# getBarcodeInfo(c("TCGA-A6-6650-01B"))
addFFPE <- function(df) {
message("Add FFPE information. More information at: \n=> https://cancergenome.nih.gov/cancersselected/biospeccriteria \n=> http://gdac.broadinstitute.org/runs/sampleReports/latest/FPPP_FFPE_Cases.html")
barcode <- df$barcode
patient <- df$patient
ffpe.info <- NULL
ffpe.info <- tryCatch({
step <- 20 # more than 50 gives a bug =/
for(i in 0:(ceiling(length(df$patient)/step) - 1)){
start <- 1 + step * i
end <- ifelse(((i + 1) * step) > length(df$patient), length(df$patient),((i + 1) * step))
if(is.null(ffpe.info)) {
ffpe.info <- getFFPE(df$patient[start:end])
} else {
ffpe.info <- rbind(ffpe.info,getFFPE(df$patient[start:end]))
}
}
ffpe.info
}, error = function(e) {
step <- 2
for(i in 0:(ceiling(length(df$patient)/step) - 1)){
start <- 1 + step * i
end <- ifelse(((i + 1) * step) > length(df$patient), length(df$patient),((i + 1) * step))
if(is.null(ffpe.info)) {
ffpe.info <- getFFPE(df$patient[start:end])
} else {
ffpe.info <- rbind(ffpe.info,getFFPE(df$patient[start:end]))
}
}
ffpe.info
})
df <- merge(df, ffpe.info,by.x = "sample", by.y = "submitter_id")
df <- df[match(barcode,df$barcode),]
return(df)
}
# getFFPE("TCGA-A6-6650")
#' @importFrom plyr rbind.fill
#' @importFrom httr content
getFFPE <- function(patient){
baseURL <- "https://api.gdc.cancer.gov/cases/?"
options.pretty <- "pretty=true"
options.expand <- "expand=samples"
option.size <- paste0("size=",length(patient))
options.filter <- paste0("filters=",
URLencode('{"op":"and","content":[{"op":"in","content":{"field":"cases.submitter_id","value":['),
paste0('"',paste(patient,collapse = '","')),
URLencode('"]}}]}'))
url <- paste0(baseURL,paste(options.pretty,options.expand, option.size, options.filter, sep = "&"))
json <- tryCatch(
getURL(url,fromJSON,timeout(600),simplifyDataFrame = TRUE),
error = function(e) {
message(paste("Error: ", e, sep = " "))
message("We will retry to access GDC again! URL:")
message(url)
fromJSON(content(getURL(url,GET,timeout(600)), as = "text", encoding = "UTF-8"), simplifyDataFrame = TRUE)
}
)
results <- json$data$hits
results <- rbind.fill(results$samples)[,c("submitter_id","is_ffpe")]
return(results)
}
getAliquot_ids <- function(barcode){
baseURL <- "https://api.gdc.cancer.gov/cases/?"
options.fields <- "fields=samples.portions.analytes.aliquots.aliquot_id,samples.portions.analytes.aliquots.submitter_id"
options.pretty <- "pretty=true"
option.size <- paste0("size=",length(barcode))
#message(paste(barcode,collapse = '","'))
#message(paste0('"',paste(barcode,collapse = '","')))
options.filter <- paste0("filters=",
URLencode('{"op":"and","content":[{"op":"in","content":{"field":"cases.submitter_id","value":['),
paste0('"',paste(barcode,collapse = '","')),
URLencode('"]}}]}'))
#message(paste0(baseURL,paste(options.pretty,options.expand, option.size, options.filter, sep = "&")))
url <- paste0(baseURL,paste(options.pretty,options.fields, option.size, options.filter, sep = "&"))
json <- tryCatch(
getURL(url,fromJSON,timeout(600),simplifyDataFrame = TRUE),
error = function(e) {
message(paste("Error: ", e, sep = " "))
message("We will retry to access GDC again! URL:")
message(url)
fromJSON(content(getURL(url,GET,timeout(600)), as = "text", encoding = "UTF-8"), simplifyDataFrame = TRUE)
}
)
results <- unlist(json$data$hits$samples)
results.barcode <- grep("TCGA",results,value = TRUE)
results.aliquot <- grep("TCGA",results,value = TRUE,invert = TRUE)
df <- data.frame(results.aliquot,results.barcode)
colnames(df) <- c("aliquot_id","barcode")
return(df)
}
# getBarcodeInfo(c("TCGA-A6-6650"))
getBarcodeInfo <- function(barcode) {
baseURL <- "https://api.gdc.cancer.gov/cases/?"
options.pretty <- "pretty=true"
options.expand <- "expand=project,diagnoses,diagnoses.treatments,annotations,family_histories,demographic,exposures"
option.size <- paste0("size=",length(barcode))
#message(paste(barcode,collapse = '","'))
#message(paste0('"',paste(barcode,collapse = '","')))
options.filter <- paste0("filters=",
URLencode('{"op":"and","content":[{"op":"in","content":{"field":"cases.submitter_id","value":['),
paste0('"',paste(barcode,collapse = '","')),
URLencode('"]}}]}'))
#message(paste0(baseURL,paste(options.pretty,options.expand, option.size, options.filter, sep = "&")))
url <- paste0(baseURL,paste(options.pretty,options.expand, option.size, options.filter, sep = "&"))
json <- tryCatch(
getURL(url,fromJSON,timeout(600),simplifyDataFrame = TRUE),
error = function(e) {
message(paste("Error: ", e, sep = " "))
message("We will retry to access GDC again! URL:")
message(url)
fromJSON(content(getURL(url,GET,timeout(600)), as = "text", encoding = "UTF-8"), simplifyDataFrame = TRUE)
}
)
results <- json$data$hits
submitter_id <- results$submitter_id
# We dont have the same cols for TCGA and TARGET so we need to check them
if(!is.null(results$diagnoses)) {
diagnoses <- rbindlist(results$diagnoses, fill = TRUE)
if(any(grepl("submitter_id", colnames(diagnoses)))) {
diagnoses$submitter_id <- gsub("_diagnosis","", diagnoses$submitter_id)
} else {
diagnoses$submitter_id <- submitter_id
}
# this is required since the sample might not have a diagnosis
df <- merge(data.frame("submitter_id" = submitter_id),
diagnoses,
by = "submitter_id",
all.x = T,
sort = FALSE)
} else {
df <- as.data.frame(submitter_id)
}
if(!is.null(results$exposures) > 0) {
exposures <- rbindlist(results$exposures, fill = TRUE)
if(any(grepl("submitter_id", colnames(exposures)))) {
exposures$submitter_id <- gsub("_exposure","", exposures$submitter_id)
} else {
exposures$submitter_id <- submitter_id
}
df <- merge(df,exposures, by = "submitter_id", all = TRUE,sort = FALSE)
}
if(!is.null(results$demographic)) {
demographic <- results$demographic
if(any(grepl("submitter_id", colnames(demographic)))) {
demographic$submitter_id <- gsub("_demographic","", results$demographic$submitter_id)
} else {
demographic$submitter_id <-submitter_id
}
demographic <- demographic[!is.na(demographic$submitter_id),]
df <- merge(df,demographic, by="submitter_id", all = TRUE,sort = FALSE)
}
treatments <- rbindlist(results$treatments,fill = TRUE)
if (nrow(treatments) > 0) {
df[,treatments:=NULL]
if (any(grepl("submitter_id", colnames(treatments)))) {
treatments$submitter_id <- gsub("_treatment","", treatments$submitter_id)
} else {
treatments$submitter_id <-submitter_id
}
df <- merge(df,treatments, by="submitter_id", all = TRUE,sort = FALSE)
}
df$bcr_patient_barcode <- df$submitter_id
projects.info <- results$project
projects.info <- results$project[,grep("state",colnames(projects.info),invert = TRUE)]
df <- merge(df,
cbind("submitter_id" = submitter_id, projects.info),
sort = FALSE, by = "submitter_id")
# Adding in the same order
df <- df[match(barcode,df$submitter_id),]
# This line should not exists, but some patients does not have clinical data
# case: TCGA-R8-A6YH"
# this has been reported to GDC, waiting answers
# So we will remove this NA cases
df <- df[!is.na(df$submitter_id),]
return(df)
}
#' @title Prepare CEL files into an AffyBatch.
#' @description Prepare CEL files into an AffyBatch.
#' @param ClinData write
#' @param PathFolder write
#' @param TabCel write
#' @examples
#' \dontrun{
#' to add example
#' }
#' @export
#' @return Normalizd Expression data from Affy eSets
TCGAprepare_Affy <- function(ClinData, PathFolder, TabCel){
if (!requireNamespace("affy", quietly = TRUE)) {
stop("affy package is needed for this function to work. Please install it.",
call. = FALSE)
}
if (!requireNamespace("Biobase", quietly = TRUE)) {
stop("affy package is needed for this function to work. Please install it.",
call. = FALSE)
}
affy_batch <- affy::ReadAffy(filenames=as.character(paste(TabCel$samples, ".CEL", sep="")))
eset <- affy::rma(affy_batch)
mat <- Biobase::exprs(eset)
return(mat)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/overlapping.R
\name{overlapping}
\alias{overlapping}
\alias{overlapping.default}
\alias{overlapping.formula}
\title{Measures of overlapping}
\usage{
overlapping(...)
\method{overlapping}{default}(x, y, measures = "all",
summary = c("mean", "sd"), ...)
\method{overlapping}{formula}(formula, data, measures = "all",
summary = c("mean", "sd"), ...)
}
\arguments{
\item{...}{Not used.}
\item{x}{A data.frame contained only the input attributes.}
\item{y}{A factor response vector with one label for each row/component of x.}
\item{measures}{A list of measures names or \code{"all"} to include all them.}
\item{summary}{A list of summarization functions or empty for all values. See
\link{summarization} method to more information. (Default:
\code{c("mean", "sd")})}
\item{formula}{A formula to define the class column.}
\item{data}{A data.frame dataset contained the input attributes and class.}
}
\value{
A list named by the requested overlapping measure.
}
\description{
Classification task. The overlapping measures evaluate how informative the
available features are to separate the classes. If there is at least one very
discriminative feature in the dataset, the problem can be considered simpler
than if there is no such an attribute.
}
\details{
The following measures are allowed for this method:
\describe{
\item{"F1"}{Maximum Fisher's Discriminant Ratio (F1) measures the overlap
between the values of the features and takes the value of the largest
discriminant ratio among all the available features.}
\item{"F1v"}{Directional-vector maximum Fisher's discriminant ratio (F1v)
complements F1 by searching for a vector able to separate two classes
after the training examples have been projected into it.}
\item{"F2"}{Volume of the overlapping region (F2) computes the overlap of
the distributions of the features values within the classes. F2 can be
determined by finding, for each feature its minimum and maximum values
in the classes.}
\item{"F3"}{The maximum individual feature efficiency (F3) of each
feature is given by the ratio between the number of examples that are
not in the overlapping region of two classes and the total number of
examples. This measure returns the maximum of the values found among
the input features.}
\item{"F4"}{Collective feature efficiency (F4) get an overview on how
various features may work together in data separation. First the most
discriminative feature according to F3 is selected and all examples that
can be separated by this feature are removed from the dataset. The
previous step is repeated on the remaining dataset until all the
features have been considered or no example remains. F4 returns the
ratio of examples that have been discriminated.}
}
}
\examples{
## Extract all overlapping measures for classification task
data(iris)
overlapping(Species ~ ., iris)
}
\references{
Albert Orriols-Puig, Nuria Macia and Tin K Ho. (2010). Documentation for the
data complexity library in C++. Technical Report. La Salle - Universitat
Ramon Llull.
}
\seealso{
Other complexity-measures: \code{\link{balance}},
\code{\link{correlation}}, \code{\link{dimensionality}},
\code{\link{linearity}}, \code{\link{neighborhood}},
\code{\link{network}}, \code{\link{smoothness}}
}
\concept{complexity-measures}
|
/man/overlapping.Rd
|
no_license
|
cran/ECoL
|
R
| false
| true
| 3,474
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/overlapping.R
\name{overlapping}
\alias{overlapping}
\alias{overlapping.default}
\alias{overlapping.formula}
\title{Measures of overlapping}
\usage{
overlapping(...)
\method{overlapping}{default}(x, y, measures = "all",
summary = c("mean", "sd"), ...)
\method{overlapping}{formula}(formula, data, measures = "all",
summary = c("mean", "sd"), ...)
}
\arguments{
\item{...}{Not used.}
\item{x}{A data.frame contained only the input attributes.}
\item{y}{A factor response vector with one label for each row/component of x.}
\item{measures}{A list of measures names or \code{"all"} to include all them.}
\item{summary}{A list of summarization functions or empty for all values. See
\link{summarization} method to more information. (Default:
\code{c("mean", "sd")})}
\item{formula}{A formula to define the class column.}
\item{data}{A data.frame dataset contained the input attributes and class.}
}
\value{
A list named by the requested overlapping measure.
}
\description{
Classification task. The overlapping measures evaluate how informative the
available features are to separate the classes. If there is at least one very
discriminative feature in the dataset, the problem can be considered simpler
than if there is no such an attribute.
}
\details{
The following measures are allowed for this method:
\describe{
\item{"F1"}{Maximum Fisher's Discriminant Ratio (F1) measures the overlap
between the values of the features and takes the value of the largest
discriminant ratio among all the available features.}
\item{"F1v"}{Directional-vector maximum Fisher's discriminant ratio (F1v)
complements F1 by searching for a vector able to separate two classes
after the training examples have been projected into it.}
\item{"F2"}{Volume of the overlapping region (F2) computes the overlap of
the distributions of the features values within the classes. F2 can be
determined by finding, for each feature its minimum and maximum values
in the classes.}
\item{"F3"}{The maximum individual feature efficiency (F3) of each
feature is given by the ratio between the number of examples that are
not in the overlapping region of two classes and the total number of
examples. This measure returns the maximum of the values found among
the input features.}
\item{"F4"}{Collective feature efficiency (F4) get an overview on how
various features may work together in data separation. First the most
discriminative feature according to F3 is selected and all examples that
can be separated by this feature are removed from the dataset. The
previous step is repeated on the remaining dataset until all the
features have been considered or no example remains. F4 returns the
ratio of examples that have been discriminated.}
}
}
\examples{
## Extract all overlapping measures for classification task
data(iris)
overlapping(Species ~ ., iris)
}
\references{
Albert Orriols-Puig, Nuria Macia and Tin K Ho. (2010). Documentation for the
data complexity library in C++. Technical Report. La Salle - Universitat
Ramon Llull.
}
\seealso{
Other complexity-measures: \code{\link{balance}},
\code{\link{correlation}}, \code{\link{dimensionality}},
\code{\link{linearity}}, \code{\link{neighborhood}},
\code{\link{network}}, \code{\link{smoothness}}
}
\concept{complexity-measures}
|
neural<-function()
#This script, so far, only uses synthesized data to train the neural network. The goal is to use this function to create a neural network that will then be used to
#create our prices for our warehouses.
#This script works, but we need to find a way to create better training data
#Train neural network to price based on fake data
#https://www.youtube.com/watch?v=LTg-qP9iGFY
require("neuralnet")
library(neuralnet)
n<-100#number of samples we want to use to train the neural network
populations<-sort(populations,decreasing=TRUE) #sort populations largest to smallest
prices<-sort(prices,decreasing=TRUE) #sort prices largest to smallest
rating<-sort(rating,decreasing=TRUE) #sort ratings largest to smallest
storagetype<-sort(storagetype,decreasing=TRUE) #sort storage types 1-4 largest to smallest
#Create data fame that is split into training/testing data
df<-data.frame(populations,rating,storagetype,prices) #it's important that prices is last
ind<-sample(1:nrow(df),70)
trainDF<-df[ind,]
testDF<-df[-ind,]
#creating formula for neuralnet function
allVars<-colnames(df)
predictorVars<-allVars[!allVars%in%"prices"] #price is target variable
predictorVars<-paste(predictorVars,collapse = "+")
form<-as.formula(paste("prices~",predictorVars,collapse="+")) #writes formula how its needed for neuralnet
#Train neural net --
#Input Hidden1 Output
#3 2 1
neural<-neuralnet(formula=form,hidden=2, linear.output=T, data=trainDF)
#plot(neural)
#This is how you would test/use neural net
#predictions<-compute(neural,testDF[,1:3]) #[,1:3] ensures that we use all variables used for prediction, excluding price (bc price is the target)
#predicted_prices<-round(predictions[[2]],digits=2)
|
/simulation/neural.R
|
no_license
|
awiddy/WARIE
|
R
| false
| false
| 1,764
|
r
|
neural<-function()
#This script, so far, only uses synthesized data to train the neural network. The goal is to use this function to create a neural network that will then be used to
#create our prices for our warehouses.
#This script works, but we need to find a way to create better training data
#Train neural network to price based on fake data
#https://www.youtube.com/watch?v=LTg-qP9iGFY
require("neuralnet")
library(neuralnet)
n<-100#number of samples we want to use to train the neural network
populations<-sort(populations,decreasing=TRUE) #sort populations largest to smallest
prices<-sort(prices,decreasing=TRUE) #sort prices largest to smallest
rating<-sort(rating,decreasing=TRUE) #sort ratings largest to smallest
storagetype<-sort(storagetype,decreasing=TRUE) #sort storage types 1-4 largest to smallest
#Create data fame that is split into training/testing data
df<-data.frame(populations,rating,storagetype,prices) #it's important that prices is last
ind<-sample(1:nrow(df),70)
trainDF<-df[ind,]
testDF<-df[-ind,]
#creating formula for neuralnet function
allVars<-colnames(df)
predictorVars<-allVars[!allVars%in%"prices"] #price is target variable
predictorVars<-paste(predictorVars,collapse = "+")
form<-as.formula(paste("prices~",predictorVars,collapse="+")) #writes formula how its needed for neuralnet
#Train neural net --
#Input Hidden1 Output
#3 2 1
neural<-neuralnet(formula=form,hidden=2, linear.output=T, data=trainDF)
#plot(neural)
#This is how you would test/use neural net
#predictions<-compute(neural,testDF[,1:3]) #[,1:3] ensures that we use all variables used for prediction, excluding price (bc price is the target)
#predicted_prices<-round(predictions[[2]],digits=2)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/input.R
\docType{data}
\name{input}
\alias{input}
\title{Dane wejsciowe dla funkcji \code{emisjadk()}}
\format{
Ramka danych zawiera:
\describe{
\item{Nat}{Natezenie ruchu}
\item{Segment}{Rodzaj nadwozia samochodu}
\item{Fuel}{Rodzaj uzywanego paliwa}
\item{Technology}{Technologia pracy silnika}
}
}
\usage{
input
}
\description{
Dane wejsciowe dla funkcji \code{emisjadk()}
}
\examples{
\dontrun{
input
}
}
\keyword{datasets}
|
/man/input.Rd
|
permissive
|
Gryzzle/emisjaspal
|
R
| false
| true
| 506
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/input.R
\docType{data}
\name{input}
\alias{input}
\title{Dane wejsciowe dla funkcji \code{emisjadk()}}
\format{
Ramka danych zawiera:
\describe{
\item{Nat}{Natezenie ruchu}
\item{Segment}{Rodzaj nadwozia samochodu}
\item{Fuel}{Rodzaj uzywanego paliwa}
\item{Technology}{Technologia pracy silnika}
}
}
\usage{
input
}
\description{
Dane wejsciowe dla funkcji \code{emisjadk()}
}
\examples{
\dontrun{
input
}
}
\keyword{datasets}
|
#' Custom tryCatch to return result, errors and warnings.
#' Copied from http://stackoverflow.com/a/24569739/2271856.
#'
#' @noRd
#' @keywords internal
tryCatchExt <- function(expr) {
warn <- err <- NULL
value <- withCallingHandlers(
tryCatch(expr, error = function(e) {
err <<- conditionMessage(e)
NULL
}), warning = function(w) {
warn <<- c(warn, conditionMessage(w))
invokeRestart("muffleWarning")
})
list(value = value, warning = warn, error = err)
}
#' Helper function for suppressing a single warning message.
#'
#' @noRd
#' @keywords internal
supprWarn <- function(expression,
message) {
withCallingHandlers(expression,
warning = function(w) {
if (grepl(message, w$message)) {
invokeRestart("muffleWarning")
}
})
}
#' Helper function for checking whether error message about 1% change on
#' last iteration for asreml is worth mentioning as a warning.
#' If the corresponding parameter is close to zero then changes of 1%
#' or more can be expected and are ok.
#'
#' @noRd
#' @keywords internal
chkLastIter <- function(model) {
wrnMsg <- "changed by more than 1%"
if (any(grepl(pattern = wrnMsg, x = model$warning))) {
if (asreml4()) {
## EXtract trace df from model object.
mon <- model$value$trace
## Extract values for parameters for last 2 iterations.
## First 3 rows give general model info.
lastIt <- mon[-(1:3), c(ncol(mon) - 1, ncol(mon))]
} else {
## EXtract monitor df from model object.
mon <- model$value$monitor
## Extract values for parameters for last 2 iterations.
## First 3 rows give general model info. Last col a summary.
lastIt <- mon[-(1:3), c(ncol(mon) - 2, ncol(mon) - 1)]
}
## Compute change of parameters in last iteration.
change <- ifelse(lastIt[, 1] == 0, 0, abs((lastIt[, 2] - lastIt[, 1]) /
lastIt[, 1]) * 100)
## Suppress warning if the change was less than 5% or the param value less
## than 0.1.
if (all(change <= 5) || all(lastIt[change > 5, 1] < 0.1)) {
model$warning <- model$warning[!grepl(pattern = wrnMsg,
x = model$warning)]
}
}
return(model)
}
#' Helper function for converting certain asreml warnings to errors.
#'
#' @noRd
#' @keywords internal
wrnToErr <- function(model) {
wrns <- c("Abnormal termination", "returning -Inf")
for (wrn in wrns) {
if (any(grepl(pattern = wrn, x = model$warning))) {
## Remove from warnings and add to errors
model$error <- c(model$error, model$warning[grepl(pattern = wrn,
x = model$warning)])
model$warning <- model$warning[!grepl(pattern = wrn,
x = model$warning)]
}
}
return(model)
}
#' Extended version of asreml.predict
#'
#' Asreml has a bug that may throw a warning message:
#' Abnormal termination
#' Insufficient workspace - (reset workspace or pworkspace arguments)
#' This may be avoided by increasing pworkspace, but this doesn't
#' always work.
#' If this happens pworkspace is increased in 'small' steps.
#'
#' @noRd
#' @keywords internal
predictAsreml <- function(model,
classify = "genotype",
associate = as.formula("~ NULL"),
vcov = TRUE,
TD,
...) {
wrnMsg <- "reset workspace or pworkspace arguments"
## Predict using default settings, i.e. pworkspace = 8e6
modelP <- tryCatchExt(predict(model, classify = classify,
vcov = vcov, associate = associate,
data = TD, maxiter = 20, trace = FALSE, ...))
pWorkSpace <- 8e6
## While there is a warning, increase pWorkSpace and predict again.
while (!is.null(modelP$warning) &&
any(grepl(pattern = wrnMsg, x = modelP$warning))
&& pWorkSpace < 160e6) {
pWorkSpace <- pWorkSpace + 8e6
modelP <- tryCatchExt(predict(model, classify = classify,
vcov = vcov, associate = associate, data = TD,
maxiter = 20, pworkspace = pWorkSpace,
trace = FALSE, ...))
}
if (!is.null(modelP$warning) && !all(grepl(pattern = wrnMsg,
x = modelP$warning))) {
modelP <- chkLastIter(modelP)
if (length(modelP$warning) != 0) {
warning(modelP$warning, call. = FALSE)
}
}
if ((length(modelP$warning) == 0 ||
!all(grepl(pattern = wrnMsg, x = modelP$warning))) &&
is.null(modelP$error)) {
return(modelP$value)
} else {
stop("Error in asreml when running predict. Asreml message:\n",
modelP$error, "\n",
modelP$warning, "\n", call. = FALSE)
}
}
#' Helper function for computing the standard error of the variance.
#'
#' @noRd
#' @keywords internal
seVar <- function(x,
na.rm = FALSE) {
if (inherits(x, c("matrix", "data.frame"))) {
se <- apply(X = x, MARGIN = 2, FUN = seVar, na.rm = na.rm)
} else if (is.vector(x)) {
if (na.rm) {
x <- x[!is.na(x)]
}
n <- length(x)
m1 <- sum(x) / n
m2 <- sum(x ^ 2) / n
m3 <- sum(x ^ 3) / n
m4 <- sum(x ^ 4) / n
se <- sqrt((n * (m4 - 4 * m1 * m3 + 6 * m1 ^ 2 * m2 - 3 * m1 ^ 4) /
(n - 1) - (n * (m2 - m1 ^ 2) / (n - 1)) ^ 2) / n)
} else {
se <- seVar(x = as.vector(x), na.rm = na.rm)
}
return(se)
}
#' Helper function for computing the skewness.
#' This and following formulas taken from
#' https://brownmath.com/stat/shape.htm#Normal.
#'
#' @noRd
#' @keywords internal
skewness <- function(x,
na.rm = FALSE) {
if (inherits(x, c("matrix", "data.frame"))) {
skw <- apply(X = x, MARGIN = 2, FUN = skewness, na.rm = na.rm)
} else if (is.vector(x)) {
if (na.rm) {
x <- x[!is.na(x)]
}
n <- length(x)
skw <- (sum((x - mean(x)) ^ 3) / n) / (sum((x - mean(x)) ^ 2) / n) ^ (3 / 2)
} else {
skw <- skewness(x = as.vector(x), na.rm = na.rm)
}
return(skw)
}
#' Helper function for computing the standard error of the skewness.
#'
#' @noRd
#' @keywords internal
seSkewness <- function(n) {
if (n <= 2) {
warning("For n less than 2 the standard error of skewness cannot be ",
"calculated", call. = FALSE)
return(NA)
}
return(sqrt((6 * n * (n - 1)) / ((n - 2) * (n + 1) * (n + 3))))
}
#' Helper function for computing kurtosis.
#' Rescaled by subtracting 3 from the result to give the normal distribution
#' a kurtosis of 0, so basically the excess kurtosis.
#'
#' @noRd
#' @keywords internal
kurtosis <- function(x,
na.rm = FALSE) {
if (inherits(x, c("matrix", "data.frame"))) {
kurt <- apply(X = x, MARGIN = 2, FUN = kurtosis, na.rm = na.rm)
} else if (is.vector(x)) {
if (na.rm) {
x <- x[!is.na(x)]
}
n <- length(x)
kurt <- n * sum((x - mean(x)) ^ 4) / (sum((x - mean(x)) ^ 2) ^ 2) - 3
} else {
kurt <- kurtosis(x = as.vector(x), na.rm = na.rm)
}
return(kurt)
}
#' Helper function for computing the standard error of the kurtosis.
#'
#' @noRd
#' @keywords internal
seKurtosis <- function(n) {
if (n <= 3) {
warning("For n less than 2 the standard error of kurtosis cannot be ",
"calculated", call. = FALSE)
return(NA)
}
return(sqrt((24 * n * (n - 1) ^ 2) / ((n - 2) * (n - 3) * (n + 3) * (n + 5))))
}
#' Base method for creating a report
#'
#' Base method for creating a .pdf and .tex report from an \code{R} object.
#'
#' @param x An \code{R} object
#' @param ... Further arguments to be passed on to specific report functions.
#'
#' @seealso \code{\link{report.STA}}
#'
#' @export
report <- function(x,
...) {
UseMethod("report")
}
#' Helper function for creating the actual report
#'
#' @noRd
#' @keywords internal
createReport <- function(x,
reportName,
reportPackage,
outfile,
...) {
## Check provided outfile
if (!is.null(outfile)) {
if (!is.character(outfile) || length(outfile) > 1 ||
tools::file_ext(outfile) != "pdf") {
stop("Invalid output filename provided.\n")
}
## Since latex cannot handle spaces in figure paths knitr converts those
## to pathnames with _. To prevent this spaces are not allowed.
if (grepl(pattern = " ", x = outfile)) {
stop("outfile path cannot contain spaces. Provide a path without spaces or
a relative path.\n")
}
} else {
## Create a generic output filenane from the name of the report and
## the current date/time. The file will be placed in the current
## working directory.
timeStamp <- format(Sys.time(), "%Y%m%d%H%M%S")
outfile <- paste0("./" , substring(reportName, first = 1,
last = nchar(reportName) - 4),
"_", timeStamp, ".pdf")
}
## Extract output directory from outfile.
outDir <- dirname(outfile)
## If output directory doesn't exist, create it.
if (!dir.exists(outDir)) {
dir.create(outDir, recursive = TRUE)
}
## When the output need to be written to a top level directory on windows
## there may be an extra / at the end of the filename.
## This is removed here so file.path works properly further on.
if (tolower(Sys.info()[["sysname"]]) == "windows") {
if (substring(text = outDir,
first = nchar(outDir)) == .Platform$file.sep) {
outDir <- substring(text = outDir, first = 1,
last = nchar(outDir) - 1)
}
}
## Extract the name of the outputfile, so without path and extension.
outBase <- substring(basename(outfile), first = 1,
last = nchar(basename(outfile)) - 3)
## Construct the output name of the .tex file
outTex <- file.path(outDir, paste0(outBase, "tex"))
## Get the report file from the directory where the package is installed.
reportFile <- system.file("reports", reportName, package = reportPackage)
## Save knitr options for reset when exiting function.
knitrOptsOrig <- knitr::opts_chunk$get()
on.exit(knitr::opts_chunk$set(knitrOptsOrig))
## Run knitr with chunk options set to produce proper ppt.
figPrefix <- paste0(format(Sys.time(), "%m%d%y%H%M%S"), "-")
knitr::opts_chunk$set(fig.show = "hold",
fig.path = file.path(outDir, "figures", figPrefix),
fig.process = function(x) {
paste0("./figures/", basename(x))
})
knitr::knit(input = reportFile, output = outTex, quiet = TRUE)
## Construct shell commands for calling pdf latex.
## First only draftmode for speed.
cmdRun1 <- paste0(Sys.which("pdflatex"), " -interaction=nonstopmode -draftmode ",
outBase, "tex")
cmdRun2 <- paste0(Sys.which("pdflatex"), " -interaction=nonstopmode ",
outBase, "tex")
## Run shell commands. System doesn't work for windows.
## Two runs needed to get references right.
switch(tolower(Sys.info()[["sysname"]]),
windows = {
## Construct shell command for changing directory.
## cd /d is used instead of cd to account for changing drives on windows.
## Note that here dirname(outfile) is needed instead of outDir.
cmdDir <- paste0("cd /d ", dirname(outfile))
shell(cmd = paste(cmdDir, "&", cmdRun1, "> nul 2>&1"))
shell(cmd = paste(cmdDir, "&", cmdRun2, "> nul"))
}, linux = {
## Construct shell command for changing directory.
cmdDir <- paste("cd", outDir)
system(command = paste(cmdDir, ";", cmdRun1, "> /dev/null 2>&1"))
system(command = paste(cmdDir, ";", cmdRun2, "> /dev/null"))
}, darwin = {
## Construct shell command for changing directory.
cmdDir <- paste("cd", outDir)
system(command = paste(cmdDir, ";", cmdRun1, "> /dev/null 2>&1"))
system(command = paste(cmdDir, ";", cmdRun2, "> /dev/null"))
})
## Remove extra files generated by pdflatex.
for (extension in c("aux", "log", "out", "toc", "xwm")) {
unlink(file.path(outDir, paste0(outBase, extension)))
}
}
#' Function for escaping special LaTeX characters
#'
#' Taken from knitr package. Copied since it is an internal knitr function.
#'
#' @noRd
#' @keywords internal
escapeLatex = function(x, newlines = FALSE, spaces = FALSE) {
x = gsub('\\\\', '\\\\textbackslash', x)
x = gsub('([#$%&_{}])', '\\\\\\1', x)
x = gsub('\\\\textbackslash', '\\\\textbackslash{}', x)
x = gsub('~', '\\\\textasciitilde{}', x)
x = gsub('\\^', '\\\\textasciicircum{}', x)
if (newlines) x = gsub('(?<!\n)\n(?!\n)', '\\\\\\\\', x, perl = TRUE)
if (spaces) x = gsub(' ', '\\\\ \\\\ ', x)
x
}
#' Helper function for renaming rows to a more user readable output.
#'
#' @noRd
#' @keywords internal
renameRows <- function(dat) {
## data.frame for renaming columns in varcomp and effdim tables.
renameVars <-
matrix(nrow = 2,
dimnames = list(rownames = c("renameFrom", "renameTo")),
data =
c("genotype", "Genotype",
"repId", "Replicate",
"rowId", "Row",
"colId", "Col",
"subBlock", "Block",
"repId:rowId", "Row(replicate)",
"repId:colId", "Col(replicate)",
"repId:subBlock", "Block(replicate)",
"colCoord", "Linear trend along cols",
"rowCoord", "Linear trend along rows",
"rowCoordcolCoord", "Linear trend along rows and cols",
"f(colCoord)", "Smooth trend along cols",
"f(rowCoord)", "Smooth trend along rows",
"f(colCoord):rowCoord", "Linear trend in rows changing smoothly along cols",
"colCoord:f(rowCoord)", "Linear trend in cols changing smoothly along rows",
"f(colCoord):f(rowCoord)", "Smooth-by-smooth interaction trend over rows and cols",
"Nobs", "Number of observations",
"rowId:colId", "Residual",
"R", "Residual",
"variance", "Residual",
"pow", "Power",
"units", "Units"))
renameVars <- as.data.frame(t(renameVars), stringsAsFactors = FALSE)
for (i in seq_along(renameVars[["renameFrom"]])) {
rownames(dat)[rownames(dat) == renameVars[["renameFrom"]][i]] <-
renameVars[["renameTo"]][i]
}
return(dat)
}
#' Function for extracting the table with variance components from a model in
#' a nicely printable format.
#'
#' @noRd
#' @keywords internal
extractVarComp <- function(model,
engine) {
if (engine == "SpATS") {
## Extract variance components directly from model since using summary
## creates a matrix with values already rounded restricting flexibility.
varComp <- matrix(data = c(model$var.comp, model$psi[1]),
dimnames = list(c(names(model$var.comp), "Residual"),
"Variance"))
} else if (engine == "lme4") {
if (inherits(model, "lm")) {
## In this case there is only residual variance since there are no
## random effects.
varComp <- matrix(data = summary(model)$sigma ^ 2,
dimnames = list("Residual", "Variance"))
} else {
varComp <- as.data.frame(lme4::VarCorr(model))
varComp <- matrix(data = varComp$vcov,
dimnames = list(varComp$grp, "Variance"))
}
} else if (engine == "asreml") {
## asreml provides the SE of the variance components as standard output.
## This is included in varComp.
varComp <- as.matrix(summary(model)$varcomp[c("component", "std.error")])
## Remove correlations from output. These are present for spatials models.
varComp <- varComp[!grepl(pattern = ".cor", x = rownames(varComp)), ,
drop = FALSE]
## To extract user readable names similar to the other engines from
## asreml output split the rownames on "." and "!" Depending on the first
## part of the split use the appropriate part as row name.
rownames(varComp) <- sapply(X = strsplit(x = rownames(varComp),
split = "[!.]+"),
FUN = function(split) {
if (split[[1]] == "R") {
return(split[[2]])
} else {
return(split[[1]])
}
})
colnames(varComp) <- c("Variance", "SE")
}
## Rename rows for more user readable output.
varComp <- renameRows(varComp)
## Always put genotype as first row.
if ("Genotype" %in% rownames(varComp)) {
varComp <- rbind(varComp["Genotype", , drop = FALSE],
varComp[rownames(varComp) != "Genotype", , drop = FALSE])
}
## Add an empty row before residuals if it is not already there.
## Only done if there is more than 1 row.
resRow <- which(rownames(varComp) == "Residual")
if (length(resRow) && nrow(varComp) > 1 && rownames(varComp)[resRow - 1] != "") {
varComp <- rbind(varComp[1:(resRow - 1), , drop = FALSE],
rep(NA, times = ncol(varComp)),
varComp[resRow:nrow(varComp), , drop = FALSE])
}
return(varComp)
}
#' Helper function for constructing two data.frames containing the coordinates
#' that can be used for plotting a border around parts of a raster plot using
#' geom_path in ggplot2. This can be used to construct an outside border
#' around each replicate in a plot. ggplot2 itself doesn't have this
#' functionality.
#'
#' @noRd
#' @keywords internal
calcPlotBorders <- function(trDat,
bordVar) {
yMin <- min(trDat$rowCoord)
yMax <- max(trDat$rowCoord)
xMin <- min(trDat$colCoord)
xMax <- max(trDat$colCoord)
## Create matrix containing replicates.
## First create an empty matrix containing all row/column values
## between min and max to assure complete missing rows/columns
## are added.
M <- matrix(nrow = yMax - yMin + 1, ncol = xMax - xMin + 1,
dimnames = list(yMin:yMax, xMin:xMax))
for (i in 1:nrow(trDat)) {
M[as.character(trDat[i, "rowCoord"]),
as.character(trDat[i, "colCoord"])] <- trDat[i, bordVar]
}
## Create an imputed version of M for plotting borders around NA values.
MImp <- M
MImp[is.na(MImp)] <- nlevels(trDat[[bordVar]]) + 1
has.breaks <- function(x) {
ncol(x) == 2 & nrow(x) > 0
}
## Create a data.frame with positions where the value of rep in the
## data changes in vertical direction.
vertW <- do.call(rbind.data.frame,
Filter(f = has.breaks, x = Map(function(i, x) {
cbind(y = i, x = which(diff(c(0, x, 0)) != 0))
}, 1:nrow(MImp), split(MImp, 1:nrow(MImp)))))
## Remove vertical walls that are on the outside bordering an NA value
## to prevent drawing of unneeded lines.
vertW <- vertW[!(vertW$x == 1 & is.na(M[vertW$y, 1])) &
!(vertW$x == ncol(M) + 1 &
is.na(M[vertW$y, ncol(M)])), ]
## Add min row value for plotting in the correct position.
vertW$y <- vertW$y + yMin - 1
vertW$x <- vertW$x + xMin - 1
## For horizontal walls follow the same procedure as above.
horW <- do.call(rbind.data.frame,
Filter(f = has.breaks, x = Map(function(i, y) {
cbind(x = i, y = which(diff(c(0, y, 0)) != 0))
}, 1:ncol(MImp), as.data.frame(MImp))))
horW <- horW[!(horW$y == 1 & is.na(M[1, horW$x])) &
!(horW$y == nrow(M) + 1 & is.na(M[nrow(M), horW$x])), ]
horW$y <- horW$y + yMin - 1
horW$x <- horW$x + xMin - 1
return(list(horW = horW, vertW = vertW))
}
#' This function is a slightly modified copy of map_data from ggplot2 combined
#' with map.fortify also from ggplot2.
#' Using the normal function is not possible because both packages qtl and maps
#' have a class map and when building the vignette this gives an error.
#'
#' @noRd
#' @keywords internal
mapData <- function(xLim,
yLim) {
mapObj <- maps::map("world", exact = FALSE, plot = FALSE,
fill = TRUE, xlim = xLim, ylim = yLim)
df <- data.frame(long = mapObj$x, lat = mapObj$y)
df$group <- cumsum(is.na(df$long) & is.na(df$lat)) + 1
df$order <- 1:nrow(df)
names <- do.call("rbind", lapply(strsplit(mapObj$names, "[:,]"),
"[", 1:2))
df$region <- names[df$group, 1]
df$subregion <- names[df$group, 2]
return(df[stats::complete.cases(df$lat, df$long), ])
}
#' @noRd
#' @keywords internal
extractOptSel <- function(what,
fixed,
random,
engine) {
models <- c("F", "R")[c(fixed, random)]
extractSel <- extractOptions[extractOptions[[engine]] == 1 &
extractOptions[["model"]] %in% models, ]
if (what[1] != "all") {
what <- match.arg(arg = what, choices = extractSel[["result"]],
several.ok = TRUE)
extractSel <- extractSel[extractSel[["result"]] %in% what, ]
}
return(extractSel[["result"]])
}
#' Helper function for detecting the version of asreml installed.
#' This is used wherever the syntax for asreml4 differs from asreml3.
#'
#' @noRd
#' @importFrom utils packageVersion
#' @keywords internal
asreml4 <- function() {
if (requireNamespace("asreml", quietly = TRUE)) {
if (packageVersion("asreml")[1] >= 4) {
## Calling license status apparently also activates the license if this
## was done once before.
licenceStatus <- asreml::asreml.license.status(quiet = TRUE)
if (licenceStatus$status != 0) {
stop("Error checking asreml licence status:\n",
licenceStatus$statusMessage)
}
return(TRUE)
}
return(FALSE)
}
}
#' Helper function for row binding data.frames with different columns.
#'
#' @param dfList A list of data.frames.
#'
#' @noRd
#' @keywords internal
dfBind <- function(dfList) {
## Filter empty data.frames from dfList
dfList <- Filter(f = function(x) nrow(x) > 0, x = dfList)
if (length(dfList) == 0) {
return(data.frame())
}
## Get variable names from all data.frames.
allNms <- unique(unlist(lapply(dfList, names)))
## rbind all data.frames setting values for missing columns to NA.
do.call(rbind,
c(lapply(X = dfList, FUN = function(x) {
nwDat <- sapply(X = setdiff(allNms, names(x)), FUN = function(y) {
NA
})
data.frame(c(x, nwDat), check.names = FALSE,
stringsAsFactors = FALSE)
}), make.row.names = FALSE)
)
}
|
/R/utils.R
|
no_license
|
ntduc11/statgenSTA
|
R
| false
| false
| 23,815
|
r
|
#' Custom tryCatch to return result, errors and warnings.
#' Copied from http://stackoverflow.com/a/24569739/2271856.
#'
#' @noRd
#' @keywords internal
tryCatchExt <- function(expr) {
warn <- err <- NULL
value <- withCallingHandlers(
tryCatch(expr, error = function(e) {
err <<- conditionMessage(e)
NULL
}), warning = function(w) {
warn <<- c(warn, conditionMessage(w))
invokeRestart("muffleWarning")
})
list(value = value, warning = warn, error = err)
}
#' Helper function for suppressing a single warning message.
#'
#' @noRd
#' @keywords internal
supprWarn <- function(expression,
message) {
withCallingHandlers(expression,
warning = function(w) {
if (grepl(message, w$message)) {
invokeRestart("muffleWarning")
}
})
}
#' Helper function for checking whether error message about 1% change on
#' last iteration for asreml is worth mentioning as a warning.
#' If the corresponding parameter is close to zero then changes of 1%
#' or more can be expected and are ok.
#'
#' @noRd
#' @keywords internal
chkLastIter <- function(model) {
wrnMsg <- "changed by more than 1%"
if (any(grepl(pattern = wrnMsg, x = model$warning))) {
if (asreml4()) {
## EXtract trace df from model object.
mon <- model$value$trace
## Extract values for parameters for last 2 iterations.
## First 3 rows give general model info.
lastIt <- mon[-(1:3), c(ncol(mon) - 1, ncol(mon))]
} else {
## EXtract monitor df from model object.
mon <- model$value$monitor
## Extract values for parameters for last 2 iterations.
## First 3 rows give general model info. Last col a summary.
lastIt <- mon[-(1:3), c(ncol(mon) - 2, ncol(mon) - 1)]
}
## Compute change of parameters in last iteration.
change <- ifelse(lastIt[, 1] == 0, 0, abs((lastIt[, 2] - lastIt[, 1]) /
lastIt[, 1]) * 100)
## Suppress warning if the change was less than 5% or the param value less
## than 0.1.
if (all(change <= 5) || all(lastIt[change > 5, 1] < 0.1)) {
model$warning <- model$warning[!grepl(pattern = wrnMsg,
x = model$warning)]
}
}
return(model)
}
#' Helper function for converting certain asreml warnings to errors.
#'
#' @noRd
#' @keywords internal
wrnToErr <- function(model) {
wrns <- c("Abnormal termination", "returning -Inf")
for (wrn in wrns) {
if (any(grepl(pattern = wrn, x = model$warning))) {
## Remove from warnings and add to errors
model$error <- c(model$error, model$warning[grepl(pattern = wrn,
x = model$warning)])
model$warning <- model$warning[!grepl(pattern = wrn,
x = model$warning)]
}
}
return(model)
}
#' Extended version of asreml.predict
#'
#' Asreml has a bug that may throw a warning message:
#' Abnormal termination
#' Insufficient workspace - (reset workspace or pworkspace arguments)
#' This may be avoided by increasing pworkspace, but this doesn't
#' always work.
#' If this happens pworkspace is increased in 'small' steps.
#'
#' @noRd
#' @keywords internal
predictAsreml <- function(model,
classify = "genotype",
associate = as.formula("~ NULL"),
vcov = TRUE,
TD,
...) {
wrnMsg <- "reset workspace or pworkspace arguments"
## Predict using default settings, i.e. pworkspace = 8e6
modelP <- tryCatchExt(predict(model, classify = classify,
vcov = vcov, associate = associate,
data = TD, maxiter = 20, trace = FALSE, ...))
pWorkSpace <- 8e6
## While there is a warning, increase pWorkSpace and predict again.
while (!is.null(modelP$warning) &&
any(grepl(pattern = wrnMsg, x = modelP$warning))
&& pWorkSpace < 160e6) {
pWorkSpace <- pWorkSpace + 8e6
modelP <- tryCatchExt(predict(model, classify = classify,
vcov = vcov, associate = associate, data = TD,
maxiter = 20, pworkspace = pWorkSpace,
trace = FALSE, ...))
}
if (!is.null(modelP$warning) && !all(grepl(pattern = wrnMsg,
x = modelP$warning))) {
modelP <- chkLastIter(modelP)
if (length(modelP$warning) != 0) {
warning(modelP$warning, call. = FALSE)
}
}
if ((length(modelP$warning) == 0 ||
!all(grepl(pattern = wrnMsg, x = modelP$warning))) &&
is.null(modelP$error)) {
return(modelP$value)
} else {
stop("Error in asreml when running predict. Asreml message:\n",
modelP$error, "\n",
modelP$warning, "\n", call. = FALSE)
}
}
#' Helper function for computing the standard error of the variance.
#'
#' @noRd
#' @keywords internal
seVar <- function(x,
na.rm = FALSE) {
if (inherits(x, c("matrix", "data.frame"))) {
se <- apply(X = x, MARGIN = 2, FUN = seVar, na.rm = na.rm)
} else if (is.vector(x)) {
if (na.rm) {
x <- x[!is.na(x)]
}
n <- length(x)
m1 <- sum(x) / n
m2 <- sum(x ^ 2) / n
m3 <- sum(x ^ 3) / n
m4 <- sum(x ^ 4) / n
se <- sqrt((n * (m4 - 4 * m1 * m3 + 6 * m1 ^ 2 * m2 - 3 * m1 ^ 4) /
(n - 1) - (n * (m2 - m1 ^ 2) / (n - 1)) ^ 2) / n)
} else {
se <- seVar(x = as.vector(x), na.rm = na.rm)
}
return(se)
}
#' Helper function for computing the skewness.
#' This and following formulas taken from
#' https://brownmath.com/stat/shape.htm#Normal.
#'
#' @noRd
#' @keywords internal
skewness <- function(x,
na.rm = FALSE) {
if (inherits(x, c("matrix", "data.frame"))) {
skw <- apply(X = x, MARGIN = 2, FUN = skewness, na.rm = na.rm)
} else if (is.vector(x)) {
if (na.rm) {
x <- x[!is.na(x)]
}
n <- length(x)
skw <- (sum((x - mean(x)) ^ 3) / n) / (sum((x - mean(x)) ^ 2) / n) ^ (3 / 2)
} else {
skw <- skewness(x = as.vector(x), na.rm = na.rm)
}
return(skw)
}
#' Helper function for computing the standard error of the skewness.
#'
#' @noRd
#' @keywords internal
seSkewness <- function(n) {
if (n <= 2) {
warning("For n less than 2 the standard error of skewness cannot be ",
"calculated", call. = FALSE)
return(NA)
}
return(sqrt((6 * n * (n - 1)) / ((n - 2) * (n + 1) * (n + 3))))
}
#' Helper function for computing kurtosis.
#' Rescaled by subtracting 3 from the result to give the normal distribution
#' a kurtosis of 0, so basically the excess kurtosis.
#'
#' @noRd
#' @keywords internal
kurtosis <- function(x,
na.rm = FALSE) {
if (inherits(x, c("matrix", "data.frame"))) {
kurt <- apply(X = x, MARGIN = 2, FUN = kurtosis, na.rm = na.rm)
} else if (is.vector(x)) {
if (na.rm) {
x <- x[!is.na(x)]
}
n <- length(x)
kurt <- n * sum((x - mean(x)) ^ 4) / (sum((x - mean(x)) ^ 2) ^ 2) - 3
} else {
kurt <- kurtosis(x = as.vector(x), na.rm = na.rm)
}
return(kurt)
}
#' Helper function for computing the standard error of the kurtosis.
#'
#' @noRd
#' @keywords internal
seKurtosis <- function(n) {
if (n <= 3) {
warning("For n less than 2 the standard error of kurtosis cannot be ",
"calculated", call. = FALSE)
return(NA)
}
return(sqrt((24 * n * (n - 1) ^ 2) / ((n - 2) * (n - 3) * (n + 3) * (n + 5))))
}
#' Base method for creating a report
#'
#' Base method for creating a .pdf and .tex report from an \code{R} object.
#'
#' @param x An \code{R} object
#' @param ... Further arguments to be passed on to specific report functions.
#'
#' @seealso \code{\link{report.STA}}
#'
#' @export
report <- function(x,
...) {
UseMethod("report")
}
#' Helper function for creating the actual report
#'
#' @noRd
#' @keywords internal
createReport <- function(x,
reportName,
reportPackage,
outfile,
...) {
## Check provided outfile
if (!is.null(outfile)) {
if (!is.character(outfile) || length(outfile) > 1 ||
tools::file_ext(outfile) != "pdf") {
stop("Invalid output filename provided.\n")
}
## Since latex cannot handle spaces in figure paths knitr converts those
## to pathnames with _. To prevent this spaces are not allowed.
if (grepl(pattern = " ", x = outfile)) {
stop("outfile path cannot contain spaces. Provide a path without spaces or
a relative path.\n")
}
} else {
## Create a generic output filenane from the name of the report and
## the current date/time. The file will be placed in the current
## working directory.
timeStamp <- format(Sys.time(), "%Y%m%d%H%M%S")
outfile <- paste0("./" , substring(reportName, first = 1,
last = nchar(reportName) - 4),
"_", timeStamp, ".pdf")
}
## Extract output directory from outfile.
outDir <- dirname(outfile)
## If output directory doesn't exist, create it.
if (!dir.exists(outDir)) {
dir.create(outDir, recursive = TRUE)
}
## When the output need to be written to a top level directory on windows
## there may be an extra / at the end of the filename.
## This is removed here so file.path works properly further on.
if (tolower(Sys.info()[["sysname"]]) == "windows") {
if (substring(text = outDir,
first = nchar(outDir)) == .Platform$file.sep) {
outDir <- substring(text = outDir, first = 1,
last = nchar(outDir) - 1)
}
}
## Extract the name of the outputfile, so without path and extension.
outBase <- substring(basename(outfile), first = 1,
last = nchar(basename(outfile)) - 3)
## Construct the output name of the .tex file
outTex <- file.path(outDir, paste0(outBase, "tex"))
## Get the report file from the directory where the package is installed.
reportFile <- system.file("reports", reportName, package = reportPackage)
## Save knitr options for reset when exiting function.
knitrOptsOrig <- knitr::opts_chunk$get()
on.exit(knitr::opts_chunk$set(knitrOptsOrig))
## Run knitr with chunk options set to produce proper ppt.
figPrefix <- paste0(format(Sys.time(), "%m%d%y%H%M%S"), "-")
knitr::opts_chunk$set(fig.show = "hold",
fig.path = file.path(outDir, "figures", figPrefix),
fig.process = function(x) {
paste0("./figures/", basename(x))
})
knitr::knit(input = reportFile, output = outTex, quiet = TRUE)
## Construct shell commands for calling pdf latex.
## First only draftmode for speed.
cmdRun1 <- paste0(Sys.which("pdflatex"), " -interaction=nonstopmode -draftmode ",
outBase, "tex")
cmdRun2 <- paste0(Sys.which("pdflatex"), " -interaction=nonstopmode ",
outBase, "tex")
## Run shell commands. System doesn't work for windows.
## Two runs needed to get references right.
switch(tolower(Sys.info()[["sysname"]]),
windows = {
## Construct shell command for changing directory.
## cd /d is used instead of cd to account for changing drives on windows.
## Note that here dirname(outfile) is needed instead of outDir.
cmdDir <- paste0("cd /d ", dirname(outfile))
shell(cmd = paste(cmdDir, "&", cmdRun1, "> nul 2>&1"))
shell(cmd = paste(cmdDir, "&", cmdRun2, "> nul"))
}, linux = {
## Construct shell command for changing directory.
cmdDir <- paste("cd", outDir)
system(command = paste(cmdDir, ";", cmdRun1, "> /dev/null 2>&1"))
system(command = paste(cmdDir, ";", cmdRun2, "> /dev/null"))
}, darwin = {
## Construct shell command for changing directory.
cmdDir <- paste("cd", outDir)
system(command = paste(cmdDir, ";", cmdRun1, "> /dev/null 2>&1"))
system(command = paste(cmdDir, ";", cmdRun2, "> /dev/null"))
})
## Remove extra files generated by pdflatex.
for (extension in c("aux", "log", "out", "toc", "xwm")) {
unlink(file.path(outDir, paste0(outBase, extension)))
}
}
#' Function for escaping special LaTeX characters
#'
#' Taken from knitr package. Copied since it is an internal knitr function.
#'
#' @noRd
#' @keywords internal
escapeLatex = function(x, newlines = FALSE, spaces = FALSE) {
x = gsub('\\\\', '\\\\textbackslash', x)
x = gsub('([#$%&_{}])', '\\\\\\1', x)
x = gsub('\\\\textbackslash', '\\\\textbackslash{}', x)
x = gsub('~', '\\\\textasciitilde{}', x)
x = gsub('\\^', '\\\\textasciicircum{}', x)
if (newlines) x = gsub('(?<!\n)\n(?!\n)', '\\\\\\\\', x, perl = TRUE)
if (spaces) x = gsub(' ', '\\\\ \\\\ ', x)
x
}
#' Helper function for renaming rows to a more user readable output.
#'
#' @noRd
#' @keywords internal
renameRows <- function(dat) {
## data.frame for renaming columns in varcomp and effdim tables.
renameVars <-
matrix(nrow = 2,
dimnames = list(rownames = c("renameFrom", "renameTo")),
data =
c("genotype", "Genotype",
"repId", "Replicate",
"rowId", "Row",
"colId", "Col",
"subBlock", "Block",
"repId:rowId", "Row(replicate)",
"repId:colId", "Col(replicate)",
"repId:subBlock", "Block(replicate)",
"colCoord", "Linear trend along cols",
"rowCoord", "Linear trend along rows",
"rowCoordcolCoord", "Linear trend along rows and cols",
"f(colCoord)", "Smooth trend along cols",
"f(rowCoord)", "Smooth trend along rows",
"f(colCoord):rowCoord", "Linear trend in rows changing smoothly along cols",
"colCoord:f(rowCoord)", "Linear trend in cols changing smoothly along rows",
"f(colCoord):f(rowCoord)", "Smooth-by-smooth interaction trend over rows and cols",
"Nobs", "Number of observations",
"rowId:colId", "Residual",
"R", "Residual",
"variance", "Residual",
"pow", "Power",
"units", "Units"))
renameVars <- as.data.frame(t(renameVars), stringsAsFactors = FALSE)
for (i in seq_along(renameVars[["renameFrom"]])) {
rownames(dat)[rownames(dat) == renameVars[["renameFrom"]][i]] <-
renameVars[["renameTo"]][i]
}
return(dat)
}
#' Function for extracting the table with variance components from a model in
#' a nicely printable format.
#'
#' @noRd
#' @keywords internal
extractVarComp <- function(model,
engine) {
if (engine == "SpATS") {
## Extract variance components directly from model since using summary
## creates a matrix with values already rounded restricting flexibility.
varComp <- matrix(data = c(model$var.comp, model$psi[1]),
dimnames = list(c(names(model$var.comp), "Residual"),
"Variance"))
} else if (engine == "lme4") {
if (inherits(model, "lm")) {
## In this case there is only residual variance since there are no
## random effects.
varComp <- matrix(data = summary(model)$sigma ^ 2,
dimnames = list("Residual", "Variance"))
} else {
varComp <- as.data.frame(lme4::VarCorr(model))
varComp <- matrix(data = varComp$vcov,
dimnames = list(varComp$grp, "Variance"))
}
} else if (engine == "asreml") {
## asreml provides the SE of the variance components as standard output.
## This is included in varComp.
varComp <- as.matrix(summary(model)$varcomp[c("component", "std.error")])
## Remove correlations from output. These are present for spatials models.
varComp <- varComp[!grepl(pattern = ".cor", x = rownames(varComp)), ,
drop = FALSE]
## To extract user readable names similar to the other engines from
## asreml output split the rownames on "." and "!" Depending on the first
## part of the split use the appropriate part as row name.
rownames(varComp) <- sapply(X = strsplit(x = rownames(varComp),
split = "[!.]+"),
FUN = function(split) {
if (split[[1]] == "R") {
return(split[[2]])
} else {
return(split[[1]])
}
})
colnames(varComp) <- c("Variance", "SE")
}
## Rename rows for more user readable output.
varComp <- renameRows(varComp)
## Always put genotype as first row.
if ("Genotype" %in% rownames(varComp)) {
varComp <- rbind(varComp["Genotype", , drop = FALSE],
varComp[rownames(varComp) != "Genotype", , drop = FALSE])
}
## Add an empty row before residuals if it is not already there.
## Only done if there is more than 1 row.
resRow <- which(rownames(varComp) == "Residual")
if (length(resRow) && nrow(varComp) > 1 && rownames(varComp)[resRow - 1] != "") {
varComp <- rbind(varComp[1:(resRow - 1), , drop = FALSE],
rep(NA, times = ncol(varComp)),
varComp[resRow:nrow(varComp), , drop = FALSE])
}
return(varComp)
}
#' Helper function for constructing two data.frames containing the coordinates
#' that can be used for plotting a border around parts of a raster plot using
#' geom_path in ggplot2. This can be used to construct an outside border
#' around each replicate in a plot. ggplot2 itself doesn't have this
#' functionality.
#'
#' @noRd
#' @keywords internal
calcPlotBorders <- function(trDat,
bordVar) {
yMin <- min(trDat$rowCoord)
yMax <- max(trDat$rowCoord)
xMin <- min(trDat$colCoord)
xMax <- max(trDat$colCoord)
## Create matrix containing replicates.
## First create an empty matrix containing all row/column values
## between min and max to assure complete missing rows/columns
## are added.
M <- matrix(nrow = yMax - yMin + 1, ncol = xMax - xMin + 1,
dimnames = list(yMin:yMax, xMin:xMax))
for (i in 1:nrow(trDat)) {
M[as.character(trDat[i, "rowCoord"]),
as.character(trDat[i, "colCoord"])] <- trDat[i, bordVar]
}
## Create an imputed version of M for plotting borders around NA values.
MImp <- M
MImp[is.na(MImp)] <- nlevels(trDat[[bordVar]]) + 1
has.breaks <- function(x) {
ncol(x) == 2 & nrow(x) > 0
}
## Create a data.frame with positions where the value of rep in the
## data changes in vertical direction.
vertW <- do.call(rbind.data.frame,
Filter(f = has.breaks, x = Map(function(i, x) {
cbind(y = i, x = which(diff(c(0, x, 0)) != 0))
}, 1:nrow(MImp), split(MImp, 1:nrow(MImp)))))
## Remove vertical walls that are on the outside bordering an NA value
## to prevent drawing of unneeded lines.
vertW <- vertW[!(vertW$x == 1 & is.na(M[vertW$y, 1])) &
!(vertW$x == ncol(M) + 1 &
is.na(M[vertW$y, ncol(M)])), ]
## Add min row value for plotting in the correct position.
vertW$y <- vertW$y + yMin - 1
vertW$x <- vertW$x + xMin - 1
## For horizontal walls follow the same procedure as above.
horW <- do.call(rbind.data.frame,
Filter(f = has.breaks, x = Map(function(i, y) {
cbind(x = i, y = which(diff(c(0, y, 0)) != 0))
}, 1:ncol(MImp), as.data.frame(MImp))))
horW <- horW[!(horW$y == 1 & is.na(M[1, horW$x])) &
!(horW$y == nrow(M) + 1 & is.na(M[nrow(M), horW$x])), ]
horW$y <- horW$y + yMin - 1
horW$x <- horW$x + xMin - 1
return(list(horW = horW, vertW = vertW))
}
#' This function is a slightly modified copy of map_data from ggplot2 combined
#' with map.fortify also from ggplot2.
#' Using the normal function is not possible because both packages qtl and maps
#' have a class map and when building the vignette this gives an error.
#'
#' @noRd
#' @keywords internal
mapData <- function(xLim,
yLim) {
mapObj <- maps::map("world", exact = FALSE, plot = FALSE,
fill = TRUE, xlim = xLim, ylim = yLim)
df <- data.frame(long = mapObj$x, lat = mapObj$y)
df$group <- cumsum(is.na(df$long) & is.na(df$lat)) + 1
df$order <- 1:nrow(df)
names <- do.call("rbind", lapply(strsplit(mapObj$names, "[:,]"),
"[", 1:2))
df$region <- names[df$group, 1]
df$subregion <- names[df$group, 2]
return(df[stats::complete.cases(df$lat, df$long), ])
}
#' @noRd
#' @keywords internal
extractOptSel <- function(what,
fixed,
random,
engine) {
models <- c("F", "R")[c(fixed, random)]
extractSel <- extractOptions[extractOptions[[engine]] == 1 &
extractOptions[["model"]] %in% models, ]
if (what[1] != "all") {
what <- match.arg(arg = what, choices = extractSel[["result"]],
several.ok = TRUE)
extractSel <- extractSel[extractSel[["result"]] %in% what, ]
}
return(extractSel[["result"]])
}
#' Helper function for detecting the version of asreml installed.
#' This is used wherever the syntax for asreml4 differs from asreml3.
#'
#' @noRd
#' @importFrom utils packageVersion
#' @keywords internal
asreml4 <- function() {
if (requireNamespace("asreml", quietly = TRUE)) {
if (packageVersion("asreml")[1] >= 4) {
## Calling license status apparently also activates the license if this
## was done once before.
licenceStatus <- asreml::asreml.license.status(quiet = TRUE)
if (licenceStatus$status != 0) {
stop("Error checking asreml licence status:\n",
licenceStatus$statusMessage)
}
return(TRUE)
}
return(FALSE)
}
}
#' Helper function for row binding data.frames with different columns.
#'
#' @param dfList A list of data.frames.
#'
#' @noRd
#' @keywords internal
dfBind <- function(dfList) {
## Filter empty data.frames from dfList
dfList <- Filter(f = function(x) nrow(x) > 0, x = dfList)
if (length(dfList) == 0) {
return(data.frame())
}
## Get variable names from all data.frames.
allNms <- unique(unlist(lapply(dfList, names)))
## rbind all data.frames setting values for missing columns to NA.
do.call(rbind,
c(lapply(X = dfList, FUN = function(x) {
nwDat <- sapply(X = setdiff(allNms, names(x)), FUN = function(y) {
NA
})
data.frame(c(x, nwDat), check.names = FALSE,
stringsAsFactors = FALSE)
}), make.row.names = FALSE)
)
}
|
ui = navbarPage(title = 'at7987',
collapsible = T,
position = 'fixed-top',
theme = teachingApps::add_theme(getShinyOption('theme')),
header = teachingApps::add_css(),
footer = teachingApps::add_logo(),
tabPanel("Data Set",DT::dataTableOutput("table.at7987", height = "80%") ),
tabPanel("Summary", verbatimTextOutput("summary.at7987") ),
tabPanel("Event Plots",
sidebarLayout(
sidebarPanel(width = 3,
selectInput("PLOT_2", label = "Plot:",
choices = c("Event Plot","Histogram"),
selected = "Event Plot")),
mainPanel( plotOutput("eventplot.at7987", height = '650px'), width = 9))),
tabPanel("CDF Plot",
sidebarLayout(
sidebarPanel(width = 3,
selectInput("dist_2",
label = "Distribution:",
choices = c("None",
"Weibull",
"Exponential",
"Normal",
"Lognormal",
"Smallest Extreme Value",
"Largest Extreme Value","Frechet"),
selected = "Weibull"),
selectInput("ci_2", label = "Confidence Level:",
choices = c(0.99, 0.95, 0.90, 0.85, 0.80, 0.50),
selected = 0.95),
selectInput("bt_2",
label = "Band Type:",
choices = c("Pointwise",
"Simultaneous",
"none"),
selected = "Pointwise")),
mainPanel( plotOutput("cdfplot.at7987", height = '650px'), width = 9))),
tabPanel('Code Mirror',
mainPanel(codemirrorR::codemirrorOutput('figures', height = '650px'), width = 12)))
|
/inst/apps/at7987_data/ui.R
|
no_license
|
Ammar-K/SMRD
|
R
| false
| false
| 1,720
|
r
|
ui = navbarPage(title = 'at7987',
collapsible = T,
position = 'fixed-top',
theme = teachingApps::add_theme(getShinyOption('theme')),
header = teachingApps::add_css(),
footer = teachingApps::add_logo(),
tabPanel("Data Set",DT::dataTableOutput("table.at7987", height = "80%") ),
tabPanel("Summary", verbatimTextOutput("summary.at7987") ),
tabPanel("Event Plots",
sidebarLayout(
sidebarPanel(width = 3,
selectInput("PLOT_2", label = "Plot:",
choices = c("Event Plot","Histogram"),
selected = "Event Plot")),
mainPanel( plotOutput("eventplot.at7987", height = '650px'), width = 9))),
tabPanel("CDF Plot",
sidebarLayout(
sidebarPanel(width = 3,
selectInput("dist_2",
label = "Distribution:",
choices = c("None",
"Weibull",
"Exponential",
"Normal",
"Lognormal",
"Smallest Extreme Value",
"Largest Extreme Value","Frechet"),
selected = "Weibull"),
selectInput("ci_2", label = "Confidence Level:",
choices = c(0.99, 0.95, 0.90, 0.85, 0.80, 0.50),
selected = 0.95),
selectInput("bt_2",
label = "Band Type:",
choices = c("Pointwise",
"Simultaneous",
"none"),
selected = "Pointwise")),
mainPanel( plotOutput("cdfplot.at7987", height = '650px'), width = 9))),
tabPanel('Code Mirror',
mainPanel(codemirrorR::codemirrorOutput('figures', height = '650px'), width = 12)))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SMNN_data.R
\docType{data}
\name{data_SMNN}
\alias{data_SMNN}
\title{A list of two expression matrices for two batches. The first batch contains 400 cells of
three cell types, fibroblasts, macrophages and endothelial cells. And the second batches
has 500 cells of the same three cell types.}
\format{An object of class \code{list} of length 2.}
\usage{
data("data_SMNN")
}
\description{
A list of two expression matrices for two batches. The first batch contains 400 cells of
three cell types, fibroblasts, macrophages and endothelial cells. And the second batches
has 500 cells of the same three cell types.
}
\examples{
# Load the example data data_SMNN
data("data_SMNN")
# Provide the marker genes for cluster matching
markers <- c("Col1a1", "Pdgfra", "Ptprc", "Pecam1")
# Specify the cluster labels for each marker gene
cluster.info <- c(1, 1, 2, 3)
# Call function unifiedClusterLabelling to identify the corresponding clusters between two batches
matched_clusters <- unifiedClusterLabelling(data_SMNN$batch1.mat, data_SMNN$batch2.mat, features.use = markers, cluster.labels = cluster.info, min.perc = 0.3)
# Set python version to be compatible with SMNNcorrect implementation
library(reticulate)
use_python("/nas/longleaf/apps/python/3.5.1/bin/python3")
# Perform batch effect correction using SMNNcorrect
corrected.results <- SMNNcorrect(batches = list(data_SMNN$batch1.mat, data_SMNN$batch2.mat), batch.cluster.labels = matched_clusters, matched.clusters = c(1,2,3), k=20, sigma=1, cos.norm.in=TRUE, cos.norm.out=TRUE)
}
\keyword{datasets}
|
/man/data_SMNN.Rd
|
no_license
|
yycunc/SMNN
|
R
| false
| true
| 1,631
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SMNN_data.R
\docType{data}
\name{data_SMNN}
\alias{data_SMNN}
\title{A list of two expression matrices for two batches. The first batch contains 400 cells of
three cell types, fibroblasts, macrophages and endothelial cells. And the second batches
has 500 cells of the same three cell types.}
\format{An object of class \code{list} of length 2.}
\usage{
data("data_SMNN")
}
\description{
A list of two expression matrices for two batches. The first batch contains 400 cells of
three cell types, fibroblasts, macrophages and endothelial cells. And the second batches
has 500 cells of the same three cell types.
}
\examples{
# Load the example data data_SMNN
data("data_SMNN")
# Provide the marker genes for cluster matching
markers <- c("Col1a1", "Pdgfra", "Ptprc", "Pecam1")
# Specify the cluster labels for each marker gene
cluster.info <- c(1, 1, 2, 3)
# Call function unifiedClusterLabelling to identify the corresponding clusters between two batches
matched_clusters <- unifiedClusterLabelling(data_SMNN$batch1.mat, data_SMNN$batch2.mat, features.use = markers, cluster.labels = cluster.info, min.perc = 0.3)
# Set python version to be compatible with SMNNcorrect implementation
library(reticulate)
use_python("/nas/longleaf/apps/python/3.5.1/bin/python3")
# Perform batch effect correction using SMNNcorrect
corrected.results <- SMNNcorrect(batches = list(data_SMNN$batch1.mat, data_SMNN$batch2.mat), batch.cluster.labels = matched_clusters, matched.clusters = c(1,2,3), k=20, sigma=1, cos.norm.in=TRUE, cos.norm.out=TRUE)
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/write_gifti.R
\name{write_surf_gifti}
\alias{write_surf_gifti}
\title{Write CIFTI surface data to GIFTI}
\usage{
write_surf_gifti(
x,
gifti_fname,
hemisphere = c("left", "right"),
encoding = NULL,
endian = c("LittleEndian", "BigEndian")
)
}
\arguments{
\item{x}{A \code{"surf"} object, an object from \code{gifti::readgii}, or a
list with elements "pointset" and "triangle".}
\item{gifti_fname}{Where to write the GIFTI file.}
\item{hemisphere}{"left" (default) or "right". Ignored if \code{data} is already
a "gifti" object, or if it is a \code{"surf"} object with the hemisphere metadata
already specified.}
\item{encoding}{A length-2 vector with elements chosen among "ASCII",
"Base64Binary", and "GZipBase64Binary". If \code{NULL} (default), will use
the metadata if \code{data} is a "gifti" object, or "GZipBase64Binary" for the
"pointset" and "ASCII" for the "traingles" if \code{data} is not already
a GIFTI.}
\item{endian}{"LittleEndian" (default) or "BigEndian".}
}
\value{
Whether the GIFTI was successfully written
}
\description{
Write the data for the left or right surface to a surface GIFTI file.
}
|
/man/write_surf_gifti.Rd
|
no_license
|
yoaman/r-cran-ciftiTools
|
R
| false
| true
| 1,247
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/write_gifti.R
\name{write_surf_gifti}
\alias{write_surf_gifti}
\title{Write CIFTI surface data to GIFTI}
\usage{
write_surf_gifti(
x,
gifti_fname,
hemisphere = c("left", "right"),
encoding = NULL,
endian = c("LittleEndian", "BigEndian")
)
}
\arguments{
\item{x}{A \code{"surf"} object, an object from \code{gifti::readgii}, or a
list with elements "pointset" and "triangle".}
\item{gifti_fname}{Where to write the GIFTI file.}
\item{hemisphere}{"left" (default) or "right". Ignored if \code{data} is already
a "gifti" object, or if it is a \code{"surf"} object with the hemisphere metadata
already specified.}
\item{encoding}{A length-2 vector with elements chosen among "ASCII",
"Base64Binary", and "GZipBase64Binary". If \code{NULL} (default), will use
the metadata if \code{data} is a "gifti" object, or "GZipBase64Binary" for the
"pointset" and "ASCII" for the "traingles" if \code{data} is not already
a GIFTI.}
\item{endian}{"LittleEndian" (default) or "BigEndian".}
}
\value{
Whether the GIFTI was successfully written
}
\description{
Write the data for the left or right surface to a surface GIFTI file.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ELISAtools_IO.R
\name{saveDataText}
\alias{saveDataText}
\title{Save elisa_batch analysis results}
\usage{
saveDataText(batches, file.name)
}
\arguments{
\item{batches}{list of elisa batch data to be serialized.}
\item{file.name}{character specifying name of the output file.}
}
\description{
Save the data analysis results to disk in text format.
}
\details{
The results are written to disk in the text format (tab-delimited) and is
easy to be used for other analysis.
}
\examples{
#'#R code to run 5-parameter logistic regression on ELISA data
#load the library
library(ELISAtools)
#get file folder
dir_file<-system.file("extdata", package="ELISAtools")
batches<-loadData(file.path(dir_file,"design.txt"))
#make a guess for the parameters, the other two parameters a and d
#will be estimated based on data.
model<-"5pl"
pars<-c(7.2,0.5, 0.015) #5pl inits
names(pars)<-c("xmid", "scal", "g")
#do fitting. model will be written into data set.
batches<-runFit(pars=pars, batches=batches, refBatch.ID=1, model=model )
#now call to do predications based on the model.
batches<-predictAll(batches);
#now saving the data in text.
saveDataText(batches, file.path(tempdir(),"elisa_data.txt"));
}
|
/man/saveDataText.Rd
|
no_license
|
cran/ELISAtools
|
R
| false
| true
| 1,281
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ELISAtools_IO.R
\name{saveDataText}
\alias{saveDataText}
\title{Save elisa_batch analysis results}
\usage{
saveDataText(batches, file.name)
}
\arguments{
\item{batches}{list of elisa batch data to be serialized.}
\item{file.name}{character specifying name of the output file.}
}
\description{
Save the data analysis results to disk in text format.
}
\details{
The results are written to disk in the text format (tab-delimited) and is
easy to be used for other analysis.
}
\examples{
#'#R code to run 5-parameter logistic regression on ELISA data
#load the library
library(ELISAtools)
#get file folder
dir_file<-system.file("extdata", package="ELISAtools")
batches<-loadData(file.path(dir_file,"design.txt"))
#make a guess for the parameters, the other two parameters a and d
#will be estimated based on data.
model<-"5pl"
pars<-c(7.2,0.5, 0.015) #5pl inits
names(pars)<-c("xmid", "scal", "g")
#do fitting. model will be written into data set.
batches<-runFit(pars=pars, batches=batches, refBatch.ID=1, model=model )
#now call to do predications based on the model.
batches<-predictAll(batches);
#now saving the data in text.
saveDataText(batches, file.path(tempdir(),"elisa_data.txt"));
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calculations.R
\name{total_THMs}
\alias{total_THMs}
\title{Calculate the total THMs
Calculate the total THMs, equal to the sum of bromodichloromethane,
bromoform, chlorodibromomethane, chloroform}
\usage{
total_THMs(column_names, labdat)
}
\arguments{
\item{column_names}{string vector defined in apply_calculations. The desired final
columns for each calculated dataframe}
\item{labdat}{dataframe containing the parsed lab data}
}
\value{
dataframe containing all required columns, including the calculated
result. Note that the results of \code{determine_NA_NC()} are employed, and that
parameters for which a value cannot be calculated are assigned NA or NaN, as
defined in the function description
}
\description{
Calculate the total THMs
Calculate the total THMs, equal to the sum of bromodichloromethane,
bromoform, chlorodibromomethane, chloroform
}
|
/man/total_THMs.Rd
|
permissive
|
biogeochem/bpwtpR
|
R
| false
| true
| 936
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calculations.R
\name{total_THMs}
\alias{total_THMs}
\title{Calculate the total THMs
Calculate the total THMs, equal to the sum of bromodichloromethane,
bromoform, chlorodibromomethane, chloroform}
\usage{
total_THMs(column_names, labdat)
}
\arguments{
\item{column_names}{string vector defined in apply_calculations. The desired final
columns for each calculated dataframe}
\item{labdat}{dataframe containing the parsed lab data}
}
\value{
dataframe containing all required columns, including the calculated
result. Note that the results of \code{determine_NA_NC()} are employed, and that
parameters for which a value cannot be calculated are assigned NA or NaN, as
defined in the function description
}
\description{
Calculate the total THMs
Calculate the total THMs, equal to the sum of bromodichloromethane,
bromoform, chlorodibromomethane, chloroform
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{fmi_stations}
\alias{fmi_stations}
\alias{fmi_weather_stations}
\title{Get a list of active FMI observation stations.}
\usage{
fmi_stations(groups = NULL, quiet = FALSE)
}
\arguments{
\item{groups}{a character vector of observation station groups to subset for}
\item{quiet}{whether to suppress printing of diagnostic messages}
}
\value{
a \code{data.frame} of active observation stations
}
\description{
A table of active observation stations is downloaded from the website of
Finnish Meteorological Institute, if package \pkg{rvest} or package \pkg{XML}
is installed. If neither is, or if the download fails for any other reason, a
local copy provided as a csv file within the \pkg{fmi} package is used.
}
\details{
\code{fmi_weather_stations()} is a deprecated alias for
\code{fmi_stations(groups="Weather stations")}.
}
\author{
Joona Lehtomaki \email{joona.lehtomaki@gmail.com},
Ilari Scheinin
}
\seealso{
\url{http://en.ilmatieteenlaitos.fi/observation-stations}
}
|
/man/fmi_stations.Rd
|
permissive
|
jlintusaari/fmi
|
R
| false
| true
| 1,065
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{fmi_stations}
\alias{fmi_stations}
\alias{fmi_weather_stations}
\title{Get a list of active FMI observation stations.}
\usage{
fmi_stations(groups = NULL, quiet = FALSE)
}
\arguments{
\item{groups}{a character vector of observation station groups to subset for}
\item{quiet}{whether to suppress printing of diagnostic messages}
}
\value{
a \code{data.frame} of active observation stations
}
\description{
A table of active observation stations is downloaded from the website of
Finnish Meteorological Institute, if package \pkg{rvest} or package \pkg{XML}
is installed. If neither is, or if the download fails for any other reason, a
local copy provided as a csv file within the \pkg{fmi} package is used.
}
\details{
\code{fmi_weather_stations()} is a deprecated alias for
\code{fmi_stations(groups="Weather stations")}.
}
\author{
Joona Lehtomaki \email{joona.lehtomaki@gmail.com},
Ilari Scheinin
}
\seealso{
\url{http://en.ilmatieteenlaitos.fi/observation-stations}
}
|
#SIR.model <- function(t, b, g, m){
require(deSolve)
t=250
deaths <- 3
N <- 1000
tinfec <- 14
gamma <- 1/tinfec
R0 <- 5 #numero medio de pessoas infectadas a partir de um infectado
beta <- (gamma*R0)/N
mu=deaths/N
init <- c(S=N-1,I=1,R=0, D=0)
parameters <- c(bet=beta,gamm=gamma, mmu=mu)
time <- seq(0,t,by=1)
eqn <- function(time,state,parameters){
with(as.list(c(state,parameters)),{
dS <- -(bet*S*I)
dI <- (bet*S*I)-gamm*I-mmu*I
dR <- gamm*I
dD <- mmu*I
return(list(c(dS, dI, dR, dD)))})
}
out<-ode(y=init,times=time,eqn,parms=parameters)
out.df<-as.data.frame(out)
require(ggplot2)
mytheme4 <- theme_bw() +
theme(text=element_text(colour="black")) +
theme(panel.grid = element_line(colour = "white")) +
theme(panel.background = element_rect(fill = "#B2B2B2"))
theme_set(mytheme4)
title <- bquote("SIR Model with mortality")
subtit <- bquote(list(beta==.(parameters[1]),~gamma==.(parameters[2]),~mu==.(parameters[3])))
res<-ggplot(out.df,aes(x=time))+
ggtitle(bquote(atop(bold(.(subtit)))))+
geom_line(aes(y=S,colour="Susceptible"))+
geom_line(aes(y=I,colour="Infected"))+
geom_line(aes(y=R,colour="Recovered"))+
geom_line(aes(y=D,colour="Deaths"))+
ylab(label="Proportion")+
xlab(label="Time (days)")+
theme(legend.justification=c(1,0), legend.position=c(1,0.5))+
theme(legend.title=element_text(size=12,face="bold"),
legend.background = element_rect(fill='#FFFFFF',
size=1,linetype="solid"),
legend.text=element_text(size=10),
legend.key=element_rect(colour="#FFFFFF",
fill='#C2C2C2',
size=0.5,
linetype="solid"),
plot.title = element_text(color = "black", size = 12, face = "bold"),
plot.subtitle = element_text(size = 10, hjust = 0.5))+
scale_colour_manual("Compartments",
breaks=c("Susceptible","Infected","Recovered", "Deaths"),
values=c("blue","red","darkgreen", "black"))
res
out.df$Day <- out.df$time
out.df$Infected <- ceiling(out.df$I)
out.df$Recovered <- ceiling(out.df$R)
out.df$Susceptible <- ceiling(out.df$S)
out.df$Deaths <- ceiling(out.df$D)
library(dplyr)
View(out.df <- out.df %>% select(Day, Susceptible, Infected, Recovered, Deaths))
# ggsave(plot=res,
# filename=paste0("SIRplot_","time",t,"beta",b,"gamma",g,"mu",m,".png"),
# width=8,height=6,dpi=180)
# getwd()}
confirmed_color <- rgb(234, 240, 67, maxColorValue = 255)
death_color <- rgb(220, 40, 40, maxColorValue = 255, alpha = 230)
recovered_color <- rgb(25, 209, 86, maxColorValue = 255, alpha = 230)
plotly::ggplotly(plotly::plot_ly(out.df,
x = ~ Day,
y = ~ Susceptible,
type = "bar",
name = "Susceptible",
marker = list(color = confirmed_color)) %>%
plotly::add_trace(y = ~ Infected,
name = "Infected",
marker = list(color = death_color)) %>%
plotly::add_trace(y = ~ Recovered,
name = "Recovered",
marker = list(color = recovered_color)) %>%
plotly::layout(barmode = 'overlay',
yaxis = list(title = "Population"),
xaxis = list(title = ""),
legend = list(x = 0.2, y = 1),
hovermode = "compare"))
###SIR com demografia
#SIR.model <- function(t, b, g, m){
require(deSolve)
t=150
deaths <- 3
N <- 1000
tinfec <- 14
gamma <- 1/tinfec
mu=deaths/N
R0 <- 10 #numero medio de pessoas infectadas a partir de um infectado
beta <- (R0*(gamma+mu))/N
init <- c(S=N-1,I=1,R=0)
parameters <- c(bet=beta,gamm=gamma, mmu=mu)
time <- seq(0,t,by=1)
eqn <- function(time,state,parameters){
with(as.list(c(state,parameters)),{
dS <- mmu*N-(bet*S*I)-mmu*S
dI <- (bet*S*I)-gamm*I-mmu*I
dR <- gamm*I-mmu*R
return(list(c(dS, dI, dR)))})
}
out<-ode(y=init,times=time,eqn,parms=parameters)
out.df<-as.data.frame(out)
require(ggplot2)
mytheme4 <- theme_bw() +
theme(text=element_text(colour="black")) +
theme(panel.grid = element_line(colour = "white")) +
theme(panel.background = element_rect(fill = "#B2B2B2"))
theme_set(mytheme4)
title <- bquote("SIR Model with mortality")
subtit <- bquote(list(beta==.(parameters[1]),~gamma==.(parameters[2]),~mu==.(parameters[3])))
res<-ggplot(out.df,aes(x=time))+
ggtitle(bquote(atop(bold(.(subtit)))))+
geom_line(aes(y=S,colour="Susceptible"))+
geom_line(aes(y=I,colour="Infected"))+
geom_line(aes(y=R,colour="Recovered"))+
ylab(label="Proportion")+
xlab(label="Time (days)")+
theme(legend.justification=c(1,0), legend.position=c(1,0.5))+
theme(legend.title=element_text(size=12,face="bold"),
legend.background = element_rect(fill='#FFFFFF',
size=1,linetype="solid"),
legend.text=element_text(size=10),
legend.key=element_rect(colour="#FFFFFF",
fill='#C2C2C2',
size=0.5,
linetype="solid"),
plot.title = element_text(color = "black", size = 12, face = "bold"),
plot.subtitle = element_text(size = 10, hjust = 0.5))+
scale_colour_manual("Compartments",
breaks=c("Susceptible","Infected","Recovered"),
values=c("blue","red","darkgreen"))
res
####################################################
require(deSolve)
t=250
deaths <- 5
N <- 1000
tinfec <- 14
gamma <- 1/tinfec
R0 <- 20 #numero medio de pessoas infectadas a partir de um infectado
beta <- (gamma*R0)/N
q <- 0.8
delta=deaths/N
k=1
#0<=q<=1
init <- c(S=N-1,E=0, I=1, R=0, D=0)
parameters <- c(bet=beta,k=k, gamm=gamma, delt=delta, q=q)
time <- seq(0,t,by=1)
eqn <- function(time,state,parameters){
with(as.list(c(state,parameters)),{
dS <- -bet*S*(I+(1-q)*E)
dE <- bet*S*(I+q*E)-k*E
dI <- k*E-gamm*I-delt*I
dR <- gamm*I
dD <- delt*I
return(list(c(dS, dE, dI, dR, dD)))})
}
out<-ode(y=init,times=time,eqn,parms=parameters)
out.df<-as.data.frame(out)
require(ggplot2)
mytheme4 <- theme_bw() +
theme(text=element_text(colour="black")) +
theme(panel.grid = element_line(colour = "white")) +
theme(panel.background = element_rect(fill = "#B2B2B2"))
theme_set(mytheme4)
title <- bquote("SIR Model with mortality")
subtit <- bquote(list(beta==.(parameters[1]),~gamma==.(parameters[2]),~mu==.(parameters[3])))
res<-ggplot(out.df,aes(x=time))+
ggtitle(bquote(atop(bold(.(subtit)))))+
geom_line(aes(y=S,colour="Susceptible"))+
geom_line(aes(y=E,colour="Exposto"))+
geom_line(aes(y=I,colour="Infected"))+
geom_line(aes(y=R,colour="Recovered"))+
geom_line(aes(y=D,colour="Morto"))+
ylab(label="Proportion")+
xlab(label="Time (days)")+
theme(legend.justification=c(1,0), legend.position=c(1,0.5))+
theme(legend.title=element_text(size=12,face="bold"),
legend.background = element_rect(fill='#FFFFFF',
size=1,linetype="solid"),
legend.text=element_text(size=10),
legend.key=element_rect(colour="#FFFFFF",
fill='#C2C2C2',
size=0.5,
linetype="solid"),
plot.title = element_text(color = "black", size = 12, face = "bold"),
plot.subtitle = element_text(size = 10, hjust = 0.5))+
scale_colour_manual("Compartments",
breaks=c("Susceptible","Exposto", "Infected", "Recovered", "Morto"),
values=c("blue","yellow","red","green", "black"))
res
|
/blog_posts/26-03-2020/Corona/SIR_Model.R
|
no_license
|
fsbmat-ufv/fsbmat-ufv.github.io
|
R
| false
| false
| 8,016
|
r
|
#SIR.model <- function(t, b, g, m){
require(deSolve)
t=250
deaths <- 3
N <- 1000
tinfec <- 14
gamma <- 1/tinfec
R0 <- 5 #numero medio de pessoas infectadas a partir de um infectado
beta <- (gamma*R0)/N
mu=deaths/N
init <- c(S=N-1,I=1,R=0, D=0)
parameters <- c(bet=beta,gamm=gamma, mmu=mu)
time <- seq(0,t,by=1)
eqn <- function(time,state,parameters){
with(as.list(c(state,parameters)),{
dS <- -(bet*S*I)
dI <- (bet*S*I)-gamm*I-mmu*I
dR <- gamm*I
dD <- mmu*I
return(list(c(dS, dI, dR, dD)))})
}
out<-ode(y=init,times=time,eqn,parms=parameters)
out.df<-as.data.frame(out)
require(ggplot2)
mytheme4 <- theme_bw() +
theme(text=element_text(colour="black")) +
theme(panel.grid = element_line(colour = "white")) +
theme(panel.background = element_rect(fill = "#B2B2B2"))
theme_set(mytheme4)
title <- bquote("SIR Model with mortality")
subtit <- bquote(list(beta==.(parameters[1]),~gamma==.(parameters[2]),~mu==.(parameters[3])))
res<-ggplot(out.df,aes(x=time))+
ggtitle(bquote(atop(bold(.(subtit)))))+
geom_line(aes(y=S,colour="Susceptible"))+
geom_line(aes(y=I,colour="Infected"))+
geom_line(aes(y=R,colour="Recovered"))+
geom_line(aes(y=D,colour="Deaths"))+
ylab(label="Proportion")+
xlab(label="Time (days)")+
theme(legend.justification=c(1,0), legend.position=c(1,0.5))+
theme(legend.title=element_text(size=12,face="bold"),
legend.background = element_rect(fill='#FFFFFF',
size=1,linetype="solid"),
legend.text=element_text(size=10),
legend.key=element_rect(colour="#FFFFFF",
fill='#C2C2C2',
size=0.5,
linetype="solid"),
plot.title = element_text(color = "black", size = 12, face = "bold"),
plot.subtitle = element_text(size = 10, hjust = 0.5))+
scale_colour_manual("Compartments",
breaks=c("Susceptible","Infected","Recovered", "Deaths"),
values=c("blue","red","darkgreen", "black"))
res
out.df$Day <- out.df$time
out.df$Infected <- ceiling(out.df$I)
out.df$Recovered <- ceiling(out.df$R)
out.df$Susceptible <- ceiling(out.df$S)
out.df$Deaths <- ceiling(out.df$D)
library(dplyr)
View(out.df <- out.df %>% select(Day, Susceptible, Infected, Recovered, Deaths))
# ggsave(plot=res,
# filename=paste0("SIRplot_","time",t,"beta",b,"gamma",g,"mu",m,".png"),
# width=8,height=6,dpi=180)
# getwd()}
confirmed_color <- rgb(234, 240, 67, maxColorValue = 255)
death_color <- rgb(220, 40, 40, maxColorValue = 255, alpha = 230)
recovered_color <- rgb(25, 209, 86, maxColorValue = 255, alpha = 230)
plotly::ggplotly(plotly::plot_ly(out.df,
x = ~ Day,
y = ~ Susceptible,
type = "bar",
name = "Susceptible",
marker = list(color = confirmed_color)) %>%
plotly::add_trace(y = ~ Infected,
name = "Infected",
marker = list(color = death_color)) %>%
plotly::add_trace(y = ~ Recovered,
name = "Recovered",
marker = list(color = recovered_color)) %>%
plotly::layout(barmode = 'overlay',
yaxis = list(title = "Population"),
xaxis = list(title = ""),
legend = list(x = 0.2, y = 1),
hovermode = "compare"))
###SIR com demografia
#SIR.model <- function(t, b, g, m){
require(deSolve)
t=150
deaths <- 3
N <- 1000
tinfec <- 14
gamma <- 1/tinfec
mu=deaths/N
R0 <- 10 #numero medio de pessoas infectadas a partir de um infectado
beta <- (R0*(gamma+mu))/N
init <- c(S=N-1,I=1,R=0)
parameters <- c(bet=beta,gamm=gamma, mmu=mu)
time <- seq(0,t,by=1)
eqn <- function(time,state,parameters){
with(as.list(c(state,parameters)),{
dS <- mmu*N-(bet*S*I)-mmu*S
dI <- (bet*S*I)-gamm*I-mmu*I
dR <- gamm*I-mmu*R
return(list(c(dS, dI, dR)))})
}
out<-ode(y=init,times=time,eqn,parms=parameters)
out.df<-as.data.frame(out)
require(ggplot2)
mytheme4 <- theme_bw() +
theme(text=element_text(colour="black")) +
theme(panel.grid = element_line(colour = "white")) +
theme(panel.background = element_rect(fill = "#B2B2B2"))
theme_set(mytheme4)
title <- bquote("SIR Model with mortality")
subtit <- bquote(list(beta==.(parameters[1]),~gamma==.(parameters[2]),~mu==.(parameters[3])))
res<-ggplot(out.df,aes(x=time))+
ggtitle(bquote(atop(bold(.(subtit)))))+
geom_line(aes(y=S,colour="Susceptible"))+
geom_line(aes(y=I,colour="Infected"))+
geom_line(aes(y=R,colour="Recovered"))+
ylab(label="Proportion")+
xlab(label="Time (days)")+
theme(legend.justification=c(1,0), legend.position=c(1,0.5))+
theme(legend.title=element_text(size=12,face="bold"),
legend.background = element_rect(fill='#FFFFFF',
size=1,linetype="solid"),
legend.text=element_text(size=10),
legend.key=element_rect(colour="#FFFFFF",
fill='#C2C2C2',
size=0.5,
linetype="solid"),
plot.title = element_text(color = "black", size = 12, face = "bold"),
plot.subtitle = element_text(size = 10, hjust = 0.5))+
scale_colour_manual("Compartments",
breaks=c("Susceptible","Infected","Recovered"),
values=c("blue","red","darkgreen"))
res
####################################################
require(deSolve)
t=250
deaths <- 5
N <- 1000
tinfec <- 14
gamma <- 1/tinfec
R0 <- 20 #numero medio de pessoas infectadas a partir de um infectado
beta <- (gamma*R0)/N
q <- 0.8
delta=deaths/N
k=1
#0<=q<=1
init <- c(S=N-1,E=0, I=1, R=0, D=0)
parameters <- c(bet=beta,k=k, gamm=gamma, delt=delta, q=q)
time <- seq(0,t,by=1)
eqn <- function(time,state,parameters){
with(as.list(c(state,parameters)),{
dS <- -bet*S*(I+(1-q)*E)
dE <- bet*S*(I+q*E)-k*E
dI <- k*E-gamm*I-delt*I
dR <- gamm*I
dD <- delt*I
return(list(c(dS, dE, dI, dR, dD)))})
}
out<-ode(y=init,times=time,eqn,parms=parameters)
out.df<-as.data.frame(out)
require(ggplot2)
mytheme4 <- theme_bw() +
theme(text=element_text(colour="black")) +
theme(panel.grid = element_line(colour = "white")) +
theme(panel.background = element_rect(fill = "#B2B2B2"))
theme_set(mytheme4)
title <- bquote("SIR Model with mortality")
subtit <- bquote(list(beta==.(parameters[1]),~gamma==.(parameters[2]),~mu==.(parameters[3])))
res<-ggplot(out.df,aes(x=time))+
ggtitle(bquote(atop(bold(.(subtit)))))+
geom_line(aes(y=S,colour="Susceptible"))+
geom_line(aes(y=E,colour="Exposto"))+
geom_line(aes(y=I,colour="Infected"))+
geom_line(aes(y=R,colour="Recovered"))+
geom_line(aes(y=D,colour="Morto"))+
ylab(label="Proportion")+
xlab(label="Time (days)")+
theme(legend.justification=c(1,0), legend.position=c(1,0.5))+
theme(legend.title=element_text(size=12,face="bold"),
legend.background = element_rect(fill='#FFFFFF',
size=1,linetype="solid"),
legend.text=element_text(size=10),
legend.key=element_rect(colour="#FFFFFF",
fill='#C2C2C2',
size=0.5,
linetype="solid"),
plot.title = element_text(color = "black", size = 12, face = "bold"),
plot.subtitle = element_text(size = 10, hjust = 0.5))+
scale_colour_manual("Compartments",
breaks=c("Susceptible","Exposto", "Infected", "Recovered", "Morto"),
values=c("blue","yellow","red","green", "black"))
res
|
generate_prob <- function(sequencing_error_model,unit_theta,unit_gamma,unit_mu,number_br,
number_cell,normal_genotype,mutation_genotype,initial_obs,ts){
# for this gene or location, variable singletip_exclude are those sample equal '-', variable tip_exclude are all those equal '-'
single_tip_exclude = c()
tip_exclude = c()
# for this gene or location, obs_colnam are those sample names, excluding the '-'
obs_colnam = c()
obs_genotype = c()
# for this gene or location, change the ACTG to 0/1/2, assign the mutation status for sample j
for (j in c(1:(number_cell))){
# exclude those tips with missing gene status
if (initial_obs[j]==c("-")) { #single_tip_exclude=colnames(initial_obs)[1,j]
single_tip_exclude=colnames(initial_obs)[j]
tip_exclude=c(tip_exclude,single_tip_exclude)
}
# value is 0 if gene status is same as normal
else if (as.character(initial_obs[1,j])==as.character(normal_genotype)) {
obs_genotype=c(obs_genotype,0)
}
# value is 1 if gene status is same as mutant
else if (as.character(initial_obs[1,j])==as.character(mutation_genotype)) {
#obs_genotype=c(obs_genotype,1)
obs_genotype=c(obs_genotype,2)
}
# value is m if gene status is ambguity
else { #obs_genotype=c(obs_genotype,"m")
obs_genotype=c(obs_genotype,1)}
}
# for this gene or location, exclude the sample with missing gene status
subtree = drop.tip(ts, tip = tip_exclude)
# branch_time_list is the branch length of sub tree
branch_time_list = subtree$edge.length
#generate the obs matrix
# obs_colnam are those with observations
obs_colnam=setdiff(colnames(initial_obs),tip_exclude)
# consider the ambguity status as missing
#obs_genotype_mat=matrix(,nrow=max(1,2^(count(obs_genotype=="m")[2,2])),ncol=length(obs_colnam))
#obs_genotype_mat=matrix(,nrow=1,ncol=length(obs_colnam))
obs_genotype_mat=matrix(,nrow=2^length(grep("m",obs_genotype)),ncol=length(obs_colnam))
colnames(obs_genotype_mat)=obs_colnam
# find the index of each gene status for each sample
ambiguity_index=which(obs_genotype=="m")
normal_index=which(obs_genotype=="0")
single_allele_index=which(obs_genotype=="1")
double_allele_index=which(obs_genotype=="2")
#create all possible situations for ambguity
inupt_list <- rep(list(0:1), length(ambiguity_index))
input_ambiguity=expand.grid(inupt_list)
# put the possible status into the matrix for gene i, each row represent one possible situation
obs_genotype_mat[,as.numeric(ambiguity_index)]=as.matrix(input_ambiguity)
obs_genotype_mat[,normal_index]=rep(0,dim(obs_genotype_mat)[1])
obs_genotype_mat[,single_allele_index]=rep(1,dim(obs_genotype_mat)[1])
obs_genotype_mat[,double_allele_index]=rep(2,dim(obs_genotype_mat)[1])
# for each of the possible situation, assign weight to them, here, I use equal weights
ambiguity_weight=matrix(rep(1/dim(obs_genotype_mat)[1],dim(obs_genotype_mat)[1],nrow=1))
####################################################################################################
####################################################################################################
#extract the tree, if mutation is on one branch, then the corresponding tips will have mutation
####################################################################################################
####################################################################################################
left_right <- function(edge,parent){
child = c()
for (i in 1:nrow(edge)) {
if (edge[i,1] == parent) {
child = c(child,edge[i,2])
}
}
return(child)
}
build_tree <- function(edge,branch){
child_node = left_right(edge,branch[length(branch)])
new_branch=matrix(c(branch,child_node[1],branch,child_node[2]),nrow=2,byrow = TRUE)
return(new_branch)
}
#####################################modify begin################################
# find node parent
find_ancestor <- function(edge,node){
parent = 0
for (i in 1:nrow(edge)) {
if (edge[i,2] == node) {
parent = edge[i,1]
}
}
return(parent)
}
# get all unique nodes in the tree
get_all_nodes <- function(edge)
{
all_nodes = integer(length(edge))
for (i in 1:nrow(edge))
{
all_nodes[(i-1)*2+1] = edge[i,1]
all_nodes[(i-1)*2+2] = edge[i,2]
}
all_nodes = unique(all_nodes)
return(all_nodes)
}
# find root node
find_root <- function(edge)
{
all_nodes = get_all_nodes(edge)
for (i in 1:length(all_nodes))
{
parent = find_ancestor(edge, all_nodes[i])
if (parent == 0)
{
root_node = all_nodes[i]
break
}
}
}
# find two child branches and nodes if they exist. Otherwise all zeros matrix output
find_child_branches_and_nodes <- function(edge, parent_node){
child_branches_and_nodes = matrix(0, 2, 2)
child_id = 1
# first row are two nodes, second row are two branches
for (i in 1:nrow(edge))
{
if (edge[i,1] == parent_node) {
child_branches_and_nodes[1,child_id] = edge[i,2]
child_branches_and_nodes[2,child_id] = i
child_id = child_id + 1
}
}
return(child_branches_and_nodes)
}
# find all child branch for current branch
find_child_branches <- function(edge, current_edge, child_branches)
{
id = length(child_branches)
right_node = edge[current_edge, 2]
child_branches_and_nodes = find_child_branches_and_nodes(edge, right_node)
if (child_branches_and_nodes[1,1] != 0)
{
# if not leaf node
left_node = child_branches_and_nodes[1,1]
right_node = child_branches_and_nodes[1,2]
left_branch = child_branches_and_nodes[2,1]
right_branch = child_branches_and_nodes[2,2]
id = id + 1
child_branches[id] = left_branch
id = id + 1
child_branches[id] = right_branch
child_branches = find_child_branches(edge, left_branch, child_branches)
child_branches = find_child_branches(edge, right_branch, child_branches)
return(child_branches)
}
else
{
return(child_branches)
}
}
# find all child branch for all branches
find_all_child_branches <- function(edge){
# get root node
root_node = find_root(edge)
all_child_branches = rep(list(list()), nrow(edge))
for (i in 1:nrow(edge))
{
current_edge = i
# iterative find all its child branches
child_branches = integer(0)
all_child_branches[[i]] = find_child_branches(edge, current_edge, child_branches)
}
return(all_child_branches)
}
find_all_tip_nodes <- function(edge)
{
all_parent_nodes = numeric()
for (i in 1:nrow(edge))
{
all_parent_nodes = c(all_parent_nodes, edge[i, 1])
}
all_parent_nodes = unique(all_parent_nodes)
all_nodes = get_all_nodes(edge)
all_tip_nodes = numeric()
for (i in 1:length(all_nodes))
{
if (!is.element(all_nodes[i], all_parent_nodes))
all_tip_nodes = c(all_tip_nodes, all_nodes[i])
}
return(all_tip_nodes)
}
# find tip nodes under one edge
find_tip_nodes_of_edge <- function(edge)
{
all_tip_nodes = find_all_tip_nodes(edge)
all_child_branches = find_all_child_branches(edge)
all_branch_tips = rep(list(list()), nrow(edge))
for (i in 1:nrow(edge)) {
child_branches = all_child_branches[[i]]
tip_nodes = numeric()
if (length(child_branches) > 0)
{
for (j in 1:length(child_branches)) {
child_node = edge[child_branches[j], 2]
if (is.element(child_node, all_tip_nodes))
{
tip_nodes = c(tip_nodes, child_node)
}
}
}
child_node = edge[i, 2]
if (is.element(child_node, all_tip_nodes))
{
tip_nodes = c(tip_nodes, child_node)
}
all_branch_tips[[i]] = tip_nodes
}
return(all_branch_tips)
}
###################################################################################
###################################################################################
#find the joint prob of observation and branch_i in the subtree
###################################################################################
###################################################################################
find_all_possible_mutation_matrix <- function(subtree, branch_i)
{
num_rows = nrow(subtree$edge)
num_cols = length(subtree$tip.label)
# build the branch tree sturcture from each tip to the root
branch_trees = rep( list(list()),num_cols )
num_parent = 0
for (tip_i in 1:num_cols) {
branch_trees[[tip_i]] = tip_i
parent = find_ancestor(subtree$edge,tip_i)
branch_trees[[tip_i]][num_parent+2] = parent
num_parent=num_parent+1
while (parent != num_cols+1) {
tip_node = parent
parent = find_ancestor(subtree$edge,tip_node)
branch_trees[[tip_i]][num_parent+2] = parent
num_parent=num_parent+1
}
num_parent = 0
}
# loop over all the branches, and find the possible final stage
# if the event occurs in that branch
possible_true_genotype_with_1 = matrix(rep(0,num_rows*num_cols),nrow=num_rows,ncol = num_cols)
for (j in 1:num_rows) {
branch_edge = subtree$edge[j,]
if (branch_edge[2] <= num_cols) {
possible_true_genotype_with_1[j,branch_edge[2]] = 1
}else {
for (i in 1:num_cols) {
list_branch = branch_trees[[i]]
if (is.na(match(branch_edge[2],list_branch)) == FALSE) {
possible_true_genotype_with_1[j,i] = 1
colnames(possible_true_genotype_with_1)=subtree$tip.label
}
}
}
}
descendant_branches=find_all_child_branches(subtree$edge)
num_of_cases = num_rows-1
possible_true_genotype_with_only_1 = matrix(rep(0,num_cols+2),nrow=1,ncol = num_cols+2)
possible_true_genotype_with_only_2 = matrix(rep(0,num_cols+2),nrow=1,ncol = num_cols+2)
possible_true_genotype_with_2 = matrix(rep(0,num_of_cases*(num_cols+2)),nrow=num_of_cases,ncol = num_cols+2)
colnames(possible_true_genotype_with_only_1)= c("First_branch", "Second_branch", subtree$tip.label)
colnames(possible_true_genotype_with_only_2)= c("First_branch", "Second_branch", subtree$tip.label)
colnames(possible_true_genotype_with_2)= c("First_branch", "Second_branch", subtree$tip.label)
# a matrix is created, where first two columns are the places for the mutation occuring.
# if it's NA, it means that no mutation occurs to stand for situation like 0, 1, 2
# if only this branch has one mutation
possible_true_genotype_with_only_1[1,1] = branch_i
possible_true_genotype_with_only_1[1,1:num_cols+2] = possible_true_genotype_with_1[branch_i, ]
possible_true_genotype_with_only_2[1,1] = branch_i
possible_true_genotype_with_only_2[1,2] = branch_i
possible_true_genotype_with_only_2[1,1:num_cols+2] = 2*possible_true_genotype_with_1[branch_i, ]
id_row = 1
# if this branch has one mutation, and other branch has another
for (branch_j in 1:num_rows) {
if (branch_j == branch_i)
{
next
}
possible_true_genotype_with_2[id_row,1] = branch_i
possible_true_genotype_with_2[id_row,2] = branch_j
possible_true_genotype_with_2[id_row,1:num_cols+2] = possible_true_genotype_with_1[branch_i, ] + possible_true_genotype_with_1[branch_j, ]
id_row = id_row+1
}
possible_true_genotype_with_2_sub = possible_true_genotype_with_2[ possible_true_genotype_with_2[,2] %in% descendant_branches[[branch_i]], ]
return(list(possible_true_genotype_with_only_1, possible_true_genotype_with_only_2, possible_true_genotype_with_2_sub))
}
###################################################################################
###################################################################################
#Mutation model:find the prob of mutation on each branch, but not on other branches
###################################################################################
###################################################################################
# results from function find_mutation_prob_Bj: prob_0_1= prob_Bj_with_only_1, prob_0_2 = prob_Bj_with_only_2, prob_0_1_2 = prob_Bj_with_2
find_mutation_prob_Bj <- function(branch_time_list, unit_theta,unit_gamma,unit_mu,branch_i){
###############################################
#find the prob of only 1 mutation on branch_i
###############################################
# find all branches numbers in the subtree
all_branches=c(1:dim(subtree$edge)[1])
descendant_branches=find_all_child_branches(subtree$edge)
# 1-1:find all descendant branches of the input branch, all these branches carry the mutation and no further mutation occurs
Bj_with_only_1_descendant_branches=descendant_branches[[branch_i]]
# 1-1:find the prob that all descendant branches of the input branch carry the mutation and no further mutation occurs
prob_Bj_with_only_1_descendant_branches=exp(-unit_mu*branch_time_list[Bj_with_only_1_descendant_branches])
# 0-0:find the complement branches of input branch and its descendant, branches in this set has no mutation
Bj_with_only_1_no_mutation_branches=setdiff(all_branches,union(branch_i, Bj_with_only_1_descendant_branches))
# 0-0:find the prob the complement branches of input branch and its descendant in this set has no mutation
prob_Bj_with_only_1_no_mutation_branches=exp(-(unit_theta+unit_gamma)*(branch_time_list[Bj_with_only_1_no_mutation_branches]))
# get the prob matrix of the only 1 mutation
prob_Bj_with_only_1_mat= matrix(, nrow = 1, ncol = dim(subtree$edge)[1])
prob_Bj_with_only_1_mat[,Bj_with_only_1_descendant_branches]=prob_Bj_with_only_1_descendant_branches
prob_Bj_with_only_1_mat[,Bj_with_only_1_no_mutation_branches]=prob_Bj_with_only_1_no_mutation_branches
prob_Bj_with_only_1_mat[,branch_i]= unit_theta*(exp(-(unit_theta+unit_gamma)*branch_time_list[branch_i])-exp(-unit_mu*branch_time_list[branch_i]))/(unit_mu-unit_theta-unit_gamma)
prob_Bj_with_only_1=cbind(First_branch = branch_i,
Second_branch = 0,
Mutation_Prob=cumprod(prob_Bj_with_only_1_mat)[dim(subtree$edge)[1]])
###############################################
#find the prob of only 2 mutation on branch_i
###############################################
#2-2:find all descendant branches of the input branch, all these branches carry the mutation and no further mutation occurs
Bj_with_only_2_descendant_branches=descendant_branches[[branch_i]]
#2-2:find the prob that all descendant branches of the input branch carry the 2 mutations and no further mutation occurs
prob_Bj_with_only_2_descendant_branches=exp(-0*branch_time_list[Bj_with_only_2_descendant_branches])
#0-0:find the complement branches of input branch and its descendant, branches in this set has no mutation
Bj_with_only_2_no_mutation_branches=setdiff(all_branches,union(branch_i, Bj_with_only_2_descendant_branches))
#0-0:find the prob the complement branches of input branch and its descendant in this set has no mutation
prob_Bj_with_only_2_no_mutation_branches=exp(-(unit_theta+unit_gamma)*(branch_time_list[Bj_with_only_2_no_mutation_branches]))
# get the prob matrix of the only 2 mutation
prob_Bj_with_only_2_mat= matrix(, nrow = 1, ncol = dim(subtree$edge)[1])
prob_Bj_with_only_2_mat[,Bj_with_only_2_descendant_branches]=prob_Bj_with_only_2_descendant_branches
prob_Bj_with_only_2_mat[,Bj_with_only_2_no_mutation_branches]=prob_Bj_with_only_2_no_mutation_branches
prob_Bj_with_only_2_mat[,branch_i]= ((unit_gamma-unit_mu)*(exp(-(unit_theta+unit_gamma)*branch_time_list[branch_i]))+(unit_theta)*(exp(-unit_mu*branch_time_list[branch_i])))/(unit_mu-unit_theta-unit_gamma)+1
prob_Bj_with_only_2=cbind(First_branch = branch_i,
Second_branch = 0,
Mutation_Prob=cumprod(prob_Bj_with_only_2_mat)[dim(subtree$edge)[1]])
########################################################################
#find the prob of 0-1 mutation on branch_i and 1-2 on a descendant branch
########################################################################
Bj_with_2_descendant_branches=descendant_branches[[branch_i]]
if(length(Bj_with_2_descendant_branches) > 0){
# create the matrix that store the prob if there are separate mutations on two branches
prob_Bj_with_2_mat=matrix(, nrow = length(Bj_with_2_descendant_branches), ncol = 2+dim(subtree$edge)[1])
# iterate on each of the descendant branch
for (descendant_i in 1:length(Bj_with_2_descendant_branches)){
prob_Bj_with_2_mat[descendant_i,1]= branch_i
prob_Bj_with_2_mat[descendant_i,2]= Bj_with_2_descendant_branches[descendant_i]
DB_descendant_i=descendant_branches[[Bj_with_2_descendant_branches[descendant_i]]]
# fin the branches without mutation
Bj_with_2_no_mutation_branches = setdiff(all_branches,union(branch_i, Bj_with_2_descendant_branches))
prob_Bj_with_2_no_mutation_branches = exp(-(unit_theta+unit_gamma)*(branch_time_list[Bj_with_2_no_mutation_branches]))
# the branch with the first mutation occurring
Bj_with_2_1st_mutation_branches = branch_i
prob_Bj_with_2_1st_mutation_branches = unit_theta*(exp(-(unit_theta+unit_gamma)*branch_time_list[branch_i])-exp(-unit_mu*branch_time_list[branch_i]))/(unit_mu-unit_theta-unit_gamma)
# the branch with the second mutation occurring
Bj_with_2_2nd_mutation_branches = Bj_with_2_descendant_branches[descendant_i]
prob_Bj_with_2_2nd_mutation_branches = 1-exp(-unit_mu*branch_time_list[Bj_with_2_2nd_mutation_branches])
#the branches carry the first and second mutation
Bj_with_2_carry_1st_2nd_mutation_branches = descendant_branches[[Bj_with_2_2nd_mutation_branches]]
prob_Bj_with_2_carry_1st_2nd_mutation_branches = exp(0*branch_time_list[Bj_with_2_carry_1st_2nd_mutation_branches])
#the branches carry the first mutation
Bj_with_2_carry_1st_mutation_branches = setdiff(Bj_with_2_descendant_branches,union(Bj_with_2_2nd_mutation_branches,Bj_with_2_carry_1st_2nd_mutation_branches))
prob_Bj_with_2_carry_1st_mutation_branches = exp(-unit_mu*branch_time_list[Bj_with_2_carry_1st_mutation_branches])
#put these probability into the matrix
prob_Bj_with_2_mat[descendant_i,Bj_with_2_no_mutation_branches+2] = prob_Bj_with_2_no_mutation_branches
prob_Bj_with_2_mat[descendant_i,Bj_with_2_1st_mutation_branches+2] = prob_Bj_with_2_1st_mutation_branches
prob_Bj_with_2_mat[descendant_i,Bj_with_2_carry_1st_mutation_branches+2] = prob_Bj_with_2_carry_1st_mutation_branches
prob_Bj_with_2_mat[descendant_i,Bj_with_2_2nd_mutation_branches+2] = prob_Bj_with_2_2nd_mutation_branches
prob_Bj_with_2_mat[descendant_i,Bj_with_2_carry_1st_2nd_mutation_branches+2] = prob_Bj_with_2_carry_1st_2nd_mutation_branches
#find the probability of
prob_Bj_with_2=cbind(prob_Bj_with_2_mat[,c(1,2)],
Mutation_Prob=t(apply(prob_Bj_with_2_mat[,-c(1,2)],1,cumprod))[,dim(subtree$edge)[1]])
}
}else{prob_Bj_with_2=cbind(First_branch = branch_i,
Second_branch = 0,
Mutation_Prob = 0)}
return(list(prob_0_1= prob_Bj_with_only_1,
prob_0_2 = prob_Bj_with_only_2,
prob_0_1_2 = prob_Bj_with_2))
}
###################################################################################
###################################################################################
#Error model:find the prob of observation, conditioning on a branch_i
###################################################################################
###################################################################################
# branch k (mutation on branch k)
find_mutation_prob_of_obs_condition_Bj <- function(branch_time_list, sequencing_error_model,branch_i){
possible_true_genotype_with_only_1_mutation=find_all_possible_mutation_matrix(subtree,branch_i)[1][[1]]
possible_true_genotype_with_only_2_mutation=find_all_possible_mutation_matrix(subtree,branch_i)[2][[1]]
possible_true_genotype_with_2_mutation=find_all_possible_mutation_matrix(subtree,branch_i)[3][[1]]
##################################################################
###find the conditional prob when only 1 mutation on branch_i
##################################################################
possible_true_genotype_with_only_1_mutation_values = matrix(possible_true_genotype_with_only_1_mutation[,c(3:dim(possible_true_genotype_with_only_1_mutation)[2])],
nrow=nrow(possible_true_genotype_with_only_1_mutation),
byrow=T)
colnames(possible_true_genotype_with_only_1_mutation_values)=subtree$tip.label
#create an empty matrix to store the errors for only 1 mutation on branch_i
error_result_mat_with_only_1_mutation=replicate(dim(possible_true_genotype_with_only_1_mutation_values)[1],
matrix(rep(0,dim(obs_genotype_mat)[1]*(dim(obs_genotype_mat)[2])),
nrow=dim(obs_genotype_mat)[1]),
simplify=FALSE)
#assign
for (k in 1:dim(possible_true_genotype_with_only_1_mutation_values)[1]){
#assign tipnames to the matrix
colnames(error_result_mat_with_only_1_mutation[[k]]) = subtree$tip.label
# situation j (possible situation for ambguity status),dim(obs_genotype_mat)[1]=2^(# of ambiguity sites)
for (j in 1:dim(obs_genotype_mat)[1]){
# tip or sample i
for (i in 1:dim(possible_true_genotype_with_only_1_mutation_values)[2]){
# if true is 1, and observed is 1, prob is 1-beta
if (as.matrix(possible_true_genotype_with_only_1_mutation_values)[k,i]==1 & as.matrix(obs_genotype_mat)[j,i]==1)
{error_result_mat_with_only_1_mutation[[k]][j,i]=sequencing_error_model[2,2]}
# if true is 0 and observed is 0, prob is 1-alpha
else if (as.matrix(possible_true_genotype_with_only_1_mutation_values)[k,i]==0 & as.matrix(obs_genotype_mat)[j,i]==0)
{error_result_mat_with_only_1_mutation[[k]][j,i]=sequencing_error_model[1,1]}
# if true is 1 and observed is 0, false negative, the prob is beta
else if (as.matrix(possible_true_genotype_with_only_1_mutation_values)[k,i]==1 & as.matrix(obs_genotype_mat)[j,i]==0)
{error_result_mat_with_only_1_mutation[[k]][j,i]=sequencing_error_model[2,1]}
# if true is 0 and observed is 1, false positive, the prob is alpha
else if (as.matrix(possible_true_genotype_with_only_1_mutation_values)[k,i]==0 & as.matrix(obs_genotype_mat)[j,i]==1)
{error_result_mat_with_only_1_mutation[[k]][j,i]=sequencing_error_model[1,2]}
# if true is 2 and observed is 2, false positive, the prob is alpha
else if (as.matrix(possible_true_genotype_with_only_1_mutation_values)[k,i]==2 & as.matrix(obs_genotype_mat)[j,i]==2)
{error_result_mat_with_only_1_mutation[[k]][j,i]=sequencing_error_model[3,3]}
# if true is 2 and observed is 1, false positive, the prob is alpha
else if (as.matrix(possible_true_genotype_with_only_1_mutation_values)[k,i]==2 & as.matrix(obs_genotype_mat)[j,i]==1)
{error_result_mat_with_only_1_mutation[[k]][j,i]=sequencing_error_model[3,2]}
# if true is 2 and observed is 0, false positive, the prob is alpha
else if (as.matrix(possible_true_genotype_with_only_1_mutation_values)[k,i]==2 & as.matrix(obs_genotype_mat)[j,i]==0)
{error_result_mat_with_only_1_mutation[[k]][j,i]=sequencing_error_model[3,1]}
# if true is 1 and observed is 2, false positive, the prob is alpha
else if (as.matrix(possible_true_genotype_with_only_1_mutation_values)[k,i]==1 & as.matrix(obs_genotype_mat)[j,i]==2)
{error_result_mat_with_only_1_mutation[[k]][j,i]=sequencing_error_model[2,3]}
# if true is 0 and observed is 2, false positive, the prob is alpha
else if (as.matrix(possible_true_genotype_with_only_1_mutation_values)[k,i]==0 & as.matrix(obs_genotype_mat)[j,i]==2)
{error_result_mat_with_only_1_mutation[[k]][j,i]=sequencing_error_model[1,3]}
}
}
}
##################################################################
###find the conditional prob when only 2 mutation on branch_i
##################################################################
possible_true_genotype_with_only_2_mutation_values = matrix(possible_true_genotype_with_only_2_mutation[,c(3:dim(possible_true_genotype_with_only_2_mutation)[2])],
nrow=nrow(possible_true_genotype_with_only_2_mutation),
byrow=T)
colnames(possible_true_genotype_with_only_2_mutation_values)=subtree$tip.label
#create an empty matrix to store the errors for only 1 mutation on branch_i
error_result_mat_with_only_2_mutation=replicate(dim(possible_true_genotype_with_only_2_mutation_values)[1],
matrix(rep(0,dim(obs_genotype_mat)[1]*(dim(obs_genotype_mat)[2])),
nrow=dim(obs_genotype_mat)[1]),
simplify=FALSE)
#assign
for (k in 1:dim(possible_true_genotype_with_only_2_mutation_values)[1]){
#assign tipnames to the matrix
colnames(error_result_mat_with_only_2_mutation[[k]]) = subtree$tip.label
# situation j (possible situation for ambguity status),dim(obs_genotype_mat)[1]=2^(# of ambiguity sites)
for (j in 1:dim(obs_genotype_mat)[1]){
# tip or sample i
for (i in 1:dim(possible_true_genotype_with_only_2_mutation_values)[2]){
# if true is 1, and observed is 1, prob is 1-beta
if (as.matrix(possible_true_genotype_with_only_2_mutation_values)[k,i]==1 & as.matrix(obs_genotype_mat)[j,i]==1)
{error_result_mat_with_only_2_mutation[[k]][j,i]=sequencing_error_model[2,2]}
# if true is 0 and observed is 0, prob is 1-alpha
else if (as.matrix(possible_true_genotype_with_only_2_mutation_values)[k,i]==0 & as.matrix(obs_genotype_mat)[j,i]==0)
{error_result_mat_with_only_2_mutation[[k]][j,i]=sequencing_error_model[1,1]}
# if true is 1 and observed is 0, false negative, the prob is beta
else if (as.matrix(possible_true_genotype_with_only_2_mutation_values)[k,i]==1 & as.matrix(obs_genotype_mat)[j,i]==0)
{error_result_mat_with_only_2_mutation[[k]][j,i]=sequencing_error_model[2,1]}
# if true is 0 and observed is 1, false positive, the prob is alpha
else if (as.matrix(possible_true_genotype_with_only_2_mutation_values)[k,i]==0 & as.matrix(obs_genotype_mat)[j,i]==1)
{error_result_mat_with_only_2_mutation[[k]][j,i]=sequencing_error_model[1,2]}
# if true is 2 and observed is 2, false positive, the prob is alpha
else if (as.matrix(possible_true_genotype_with_only_2_mutation_values)[k,i]==2 & as.matrix(obs_genotype_mat)[j,i]==2)
{error_result_mat_with_only_2_mutation[[k]][j,i]=sequencing_error_model[3,3]}
# if true is 2 and observed is 1, false positive, the prob is alpha
else if (as.matrix(possible_true_genotype_with_only_2_mutation_values)[k,i]==2 & as.matrix(obs_genotype_mat)[j,i]==1)
{error_result_mat_with_only_2_mutation[[k]][j,i]=sequencing_error_model[3,2]}
# if true is 2 and observed is 0, false positive, the prob is alpha
else if (as.matrix(possible_true_genotype_with_only_2_mutation_values)[k,i]==2 & as.matrix(obs_genotype_mat)[j,i]==0)
{error_result_mat_with_only_2_mutation[[k]][j,i]=sequencing_error_model[3,1]}
# if true is 1 and observed is 2, false positive, the prob is alpha
else if (as.matrix(possible_true_genotype_with_only_2_mutation_values)[k,i]==1 & as.matrix(obs_genotype_mat)[j,i]==2)
{error_result_mat_with_only_2_mutation[[k]][j,i]=sequencing_error_model[2,3]}
# if true is 0 and observed is 2, false positive, the prob is alpha
else if (as.matrix(possible_true_genotype_with_only_2_mutation_values)[k,i]==0 & as.matrix(obs_genotype_mat)[j,i]==2)
{error_result_mat_with_only_2_mutation[[k]][j,i]=sequencing_error_model[1,3]}
}
}
}
#########################################################################################
###find the conditional prob when 1 mutation on branch_i and 1 mutation on the sub branch
#########################################################################################
possible_true_genotype_with_2_mutation_values = matrix(possible_true_genotype_with_2_mutation[,c(3:dim(possible_true_genotype_with_2_mutation)[2])],
nrow=nrow(possible_true_genotype_with_2_mutation),
byrow=T)
if(dim(possible_true_genotype_with_2_mutation_values)[1] > 0)
{colnames(possible_true_genotype_with_2_mutation_values)=subtree$tip.label
#create an empty matrix to store the errors for only 1 mutation on branch_i
error_result_mat_with_2_mutation=replicate(dim(possible_true_genotype_with_2_mutation)[1],
matrix(rep(0,dim(obs_genotype_mat)[1]*(dim(obs_genotype_mat)[2])),
nrow=dim(obs_genotype_mat)[1]),
simplify=FALSE)
prob_error_result_0_1_2 = c()
#assign
for (k in 1:dim(possible_true_genotype_with_2_mutation_values)[1]){
#assign tipnames to the matrix
colnames(error_result_mat_with_2_mutation[[k]]) = subtree$tip.label
# situation j (possible situation for ambguity status),dim(obs_genotype_mat)[1]=2^(# of ambiguity sites)
for (j in 1:dim(obs_genotype_mat)[1]){
# tip or sample i
for (i in 1:dim(possible_true_genotype_with_2_mutation_values)[2]){
# if true is 1, and observed is 1, prob is 1-beta
if (as.matrix(possible_true_genotype_with_2_mutation_values)[k,i]==1 & as.matrix(obs_genotype_mat)[j,i]==1)
{error_result_mat_with_2_mutation[[k]][j,i]=sequencing_error_model[2,2]}
# if true is 0 and observed is 0, prob is 1-alpha
else if (as.matrix(possible_true_genotype_with_2_mutation_values)[k,i]==0 & as.matrix(obs_genotype_mat)[j,i]==0)
{error_result_mat_with_2_mutation[[k]][j,i]=sequencing_error_model[1,1]}
# if true is 1 and observed is 0, false negative, the prob is beta
else if (as.matrix(possible_true_genotype_with_2_mutation_values)[k,i]==1 & as.matrix(obs_genotype_mat)[j,i]==0)
{error_result_mat_with_2_mutation[[k]][j,i]=sequencing_error_model[2,1]}
# if true is 0 and observed is 1, false positive, the prob is alpha
else if (as.matrix(possible_true_genotype_with_2_mutation_values)[k,i]==0 & as.matrix(obs_genotype_mat)[j,i]==1)
{error_result_mat_with_2_mutation[[k]][j,i]=sequencing_error_model[1,2]}
# if true is 2 and observed is 2, false positive, the prob is alpha
else if (as.matrix(possible_true_genotype_with_2_mutation_values)[k,i]==2 & as.matrix(obs_genotype_mat)[j,i]==2)
{error_result_mat_with_2_mutation[[k]][j,i]=sequencing_error_model[3,3]}
# if true is 2 and observed is 1, false positive, the prob is alpha
else if (as.matrix(possible_true_genotype_with_2_mutation_values)[k,i]==2 & as.matrix(obs_genotype_mat)[j,i]==1)
{error_result_mat_with_2_mutation[[k]][j,i]=sequencing_error_model[3,2]}
# if true is 2 and observed is 0, false positive, the prob is alpha
else if (as.matrix(possible_true_genotype_with_2_mutation_values)[k,i]==2 & as.matrix(obs_genotype_mat)[j,i]==0)
{error_result_mat_with_2_mutation[[k]][j,i]=sequencing_error_model[3,1]}
# if true is 1 and observed is 2, false positive, the prob is alpha
else if (as.matrix(possible_true_genotype_with_2_mutation_values)[k,i]==1 & as.matrix(obs_genotype_mat)[j,i]==2)
{error_result_mat_with_2_mutation[[k]][j,i]=sequencing_error_model[2,3]}
# if true is 0 and observed is 2, false positive, the prob is alpha
else if (as.matrix(possible_true_genotype_with_2_mutation_values)[k,i]==0 & as.matrix(obs_genotype_mat)[j,i]==2)
{error_result_mat_with_2_mutation[[k]][j,i]=sequencing_error_model[1,3]}
}
}
prob_error_result_0_1_2 [[k]] = tail(cumprod(error_result_mat_with_2_mutation[[k]]), n = 1)
}
} else
{error_result_mat_with_2_mutation =0
prob_error_result_0_1_2 = 0}
# find the
prob_error_result_0_1 = tail(cumprod(error_result_mat_with_only_1_mutation[[1]]),n=1)
prob_error_result_0_2 = tail(cumprod(error_result_mat_with_only_2_mutation[[1]]),n=1)
##################################################################
#return the errors for the three conditions
##################################################################
return(list(error_result_0_1=error_result_mat_with_only_1_mutation,
error_result_0_2=error_result_mat_with_only_2_mutation,
error_result_0_1_2=error_result_mat_with_2_mutation,
prob_error_result_0_1=prob_error_result_0_1,
prob_error_result_0_2=prob_error_result_0_2,
prob_error_result_0_1_2=prob_error_result_0_1_2))
}
##############################################################################
##############################################################################
find_joint_obs_Bj <- function(subtree, branch_i)
{
#use the mutation function to find the probability
prob_Bj_final = find_mutation_prob_Bj(branch_time_list, unit_theta,unit_gamma,unit_mu,branch_i)
#use the error function to find the conditional probability
prob_find_mutation_prob_of_obs_condition_Bj = find_mutation_prob_of_obs_condition_Bj(branch_time_list, sequencing_error_model,branch_i)
prob_Bj_and_obs_genotype = prob_Bj_final$prob_0_1[1,3] * prob_find_mutation_prob_of_obs_condition_Bj$prob_error_result_0_1 +
prob_Bj_final$prob_0_2[1,3] * prob_find_mutation_prob_of_obs_condition_Bj$prob_error_result_0_2 +
t(as.matrix(prob_Bj_final$prob_0_1_2[,3])) %*% (as.matrix(prob_find_mutation_prob_of_obs_condition_Bj$prob_error_result_0_1_2))
prob_Bj_and_obs_genotype_0_1 = prob_Bj_final$prob_0_1[1,3] * prob_find_mutation_prob_of_obs_condition_Bj$prob_error_result_0_1
prob_Bj_and_obs_genotype_0_2 = prob_Bj_final$prob_0_2[1,3] * prob_find_mutation_prob_of_obs_condition_Bj$prob_error_result_0_2
prob_Bj_and_obs_genotype_0_1_2 = t(as.matrix(prob_Bj_final$prob_0_1_2[,3])) %*% (as.matrix(prob_find_mutation_prob_of_obs_condition_Bj$prob_error_result_0_1_2))
return(list(prob_Bj_and_obs_genotype = prob_Bj_and_obs_genotype,
prob_Bj_and_obs_genotype_0_1 = prob_Bj_and_obs_genotype_0_1,
prob_Bj_and_obs_genotype_0_2 = prob_Bj_and_obs_genotype_0_2,
prob_Bj_and_obs_genotype_0_1_2 = prob_Bj_and_obs_genotype_0_1_2))
}
#find the joint prob of observation and branch_i on all branches of the subtree
find_joint_obs_Bj_all_branches<-function(subtree){
prob_joint_obs_Bj_all_branches_mat = matrix(,nrow=dim(subtree$edge)[1],ncol=4)
colnames(prob_joint_obs_Bj_all_branches_mat) = c("prob_Bj_and_obs_genotype_0_1","prob_Bj_and_obs_genotype_0_2",
"prob_Bj_and_obs_genotype_0_1_2","prob_Bj_and_obs_genotype")
for (br_num in 1:dim(subtree$edge)[1]){
prob_find_joint_obs_Bj = find_joint_obs_Bj(subtree,br_num)
prob_joint_obs_Bj_all_branches_mat[br_num,1] = prob_find_joint_obs_Bj$prob_Bj_and_obs_genotype_0_1
prob_joint_obs_Bj_all_branches_mat[br_num,2] = prob_find_joint_obs_Bj$prob_Bj_and_obs_genotype_0_2
prob_joint_obs_Bj_all_branches_mat[br_num,3] = prob_find_joint_obs_Bj$prob_Bj_and_obs_genotype_0_1_2
prob_joint_obs_Bj_all_branches_mat[br_num,4] = prob_find_joint_obs_Bj$prob_Bj_and_obs_genotype
}
return(prob_joint_obs_Bj_all_branches_mat = prob_joint_obs_Bj_all_branches_mat)
}
find_mutation_prob_of_Bj_condition_obs<-function(subtree){
prob_joint_obs_Bj_all_branches_mat = find_joint_obs_Bj_all_branches(subtree)
prob_of_Bj_condition_obs_mat = matrix(,nrow=dim(subtree$edge)[1],ncol=4)
colnames(prob_of_Bj_condition_obs_mat) = c("prob_of_Bj_condition_obs_0_1","prob_of_Bj_condition_obs_0_2",
"prob_of_Bj_condition_obs_0_1_2","prob_of_Bj_condition_obs_genotype")
for (br_num in 1:dim(subtree$edge)[1]){
prob_of_Bj_condition_obs_mat[br_num,1] = prob_joint_obs_Bj_all_branches_mat[br_num,1]/sum(prob_joint_obs_Bj_all_branches_mat[,4])
prob_of_Bj_condition_obs_mat[br_num,2] = prob_joint_obs_Bj_all_branches_mat[br_num,2]/sum(prob_joint_obs_Bj_all_branches_mat[,4])
prob_of_Bj_condition_obs_mat[br_num,3] = prob_joint_obs_Bj_all_branches_mat[br_num,3]/sum(prob_joint_obs_Bj_all_branches_mat[,4])
prob_of_Bj_condition_obs_mat[br_num,4] = prob_joint_obs_Bj_all_branches_mat[br_num,4]/sum(prob_joint_obs_Bj_all_branches_mat[,4])
}
return(prob_of_Bj_condition_obs_mat = prob_of_Bj_condition_obs_mat )
}
result = find_mutation_prob_of_Bj_condition_obs(subtree)
return(result = result)
}
library(ape)
parameter_setting = expand.grid(alpha=c(0.05,0.1,0.2,0.4),
beta =c(0.05,0.1,0.2,0.4))
for(paraInd in 5:5){
alpha = parameter_setting[paraInd,1]
beta = parameter_setting[paraInd,2]
alpha01= alpha
alpha02= alpha*beta/2
beta10= beta/2
beta12= beta/2
gamma20 =0
gamma21 =0
sequencing_error_model=matrix(c(1-alpha01-alpha02,alpha01,alpha02,
beta10,1-beta10-beta12,beta12,
gamma20,gamma21,1-gamma21-gamma21),nrow=3,byrow = TRUE)
print(sequencing_error_model)
unit_theta = 10^(-7)
unit_gamma = 10^(-9)
unit_mu = 10 ^(-2)
number_br = 98
number_cell = 50
if (alpha < 0.1)
{
alpha_str = sprintf('0%s', alpha*100)
} else
{
alpha_str = sprintf('%s', alpha*10)
}
if (beta < 0.1)
{
beta_str = sprintf('0%s', beta*100)
} else
{
beta_str = sprintf('%s', beta*10)
}
ternary_folder_form_result = sprintf('/fs/project/kubatko.2-temp/gao.957/DNA_alignment/bowtie2-2.3.4.2-linux-x86_64/reference/Simulation_Setting_LargeTree/SimulateData_EXP10_20Missing/Ternary_alpha0%s_beta0%s_result',alpha_str, beta_str)
dir.create(ternary_folder_form_result)
for (indexn in 1:100){
print(c(alpha,beta,indexn))
form = sprintf('/fs/project/kubatko.2-temp/gao.957/DNA_alignment/bowtie2-2.3.4.2-linux-x86_64/reference/Simulation_Setting_LargeTree/SimulateData_EXP10_20Missing/RandomTree/RandomTree_%s.tre', indexn)
sampletr=read.tree(form)
obs_form_0_1_2 = sprintf('/fs/project/kubatko.2-temp/gao.957/DNA_alignment/bowtie2-2.3.4.2-linux-x86_64/reference/Simulation_Setting_LargeTree/SimulateData_EXP10_20Missing/Ternary_alpha0%s_beta0%s/ternary_obs_0_1_tip_alpha_0%s_beta_0%s_matrix%s.csv', alpha_str, beta_str, alpha_str, beta_str,indexn)
mat_obs_form_0_1_2 = read.csv(obs_form_0_1_2)
ternary_prob_matrix_all_0_1_all=c()
ternary_prob_matrix_01_0_1_all=c()
ternary_prob_matrix_02_0_1_all=c()
ternary_prob_matrix_012_0_1_all=c()
if (dim(mat_obs_form_0_1_2)[1]>0){
ternary_prob_matrix_all_0_1=c()
ternary_prob_matrix_01_0_1=c()
ternary_prob_matrix_02_0_1=c()
ternary_prob_matrix_012_0_1=c()
normal_genotype_0_1_2 = rep(0,dim(mat_obs_form_0_1_2)[1])
mutation_genotype_0_1_2 = rep(2,dim(mat_obs_form_0_1_2)[1])
initial_obs_0_1 = data.frame(mat_obs_form_0_1_2[,-c(1,2,3)])
for (i in 1:dim(initial_obs_0_1)[1]){
print(i)
#rd_unit_theta <- rbeta(10, (10^7)*unit_theta, (10^7)*(1-unit_theta))
#rd_unit_gamma <- rbeta(10, (10^14)*unit_gamma, (10^14)*(1-unit_gamma))
#rd_unit_mu <- rbeta(10, 100*unit_mu, 100*(1-unit_mu))
rd_unit_theta = rgamma(n = 3, shape = 100, scale = 0.01*unit_theta)
rd_unit_gamma = rgamma(3, shape = 100, scale = 0.01*unit_gamma)
rd_unit_mu = rgamma(3, shape = 100, scale = 0.01*unit_mu)
generate_prob_br_0_1_dat=data.frame(matrix(NA, nrow = number_br, ncol = 3))
generate_prob_br_0_2_dat=data.frame(matrix(NA, nrow = number_br, ncol = 3))
generate_prob_br_0_1_2_dat=data.frame(matrix(NA, nrow = number_br, ncol = 3))
generate_prob_br_all_dat=data.frame(matrix(NA, nrow = number_br, ncol = 3))
for (j in 1:3){
generate_prob_br <- generate_prob(sequencing_error_model,rd_unit_theta[j],rd_unit_gamma[j],rd_unit_mu[j],number_br,number_cell,
normal_genotype_0_1_2[i],mutation_genotype_0_1_2[i],initial_obs_0_1[i,],sampletr)
generate_prob_br_0_1_single <- c(generate_prob_br[,1],rep(0,number_br-dim(generate_prob_br)[1]))
generate_prob_br_0_2_single <- c(generate_prob_br[,2],rep(0,number_br-dim(generate_prob_br)[1]))
generate_prob_br_0_1_2_single <- c(generate_prob_br[,3],rep(0,number_br-dim(generate_prob_br)[1]))
generate_prob_br_all_single <- c(generate_prob_br[,4],rep(0,number_br-dim(generate_prob_br)[1]))
generate_prob_br_0_1_dat[,j] = generate_prob_br_0_1_single
generate_prob_br_0_2_dat[,j] = generate_prob_br_0_2_single
generate_prob_br_0_1_2_dat[,j] = generate_prob_br_0_1_2_single
generate_prob_br_all_dat[,j] = generate_prob_br_all_single
}
generate_prob_br_0_1=rowMeans(generate_prob_br_0_1_dat, na.rm = FALSE, dims = 1)
generate_prob_br_0_2=rowMeans(generate_prob_br_0_2_dat, na.rm = FALSE, dims = 1)
generate_prob_br_0_1_2=rowMeans(generate_prob_br_0_1_2_dat, na.rm = FALSE, dims = 1)
generate_prob_br_all=rowMeans(generate_prob_br_all_dat, na.rm = FALSE, dims = 1)
ternary_prob_matrix_all_0_1 = rbind(ternary_prob_matrix_all_0_1,generate_prob_br_all)
ternary_prob_matrix_01_0_1 = rbind(ternary_prob_matrix_01_0_1,generate_prob_br_0_1)
ternary_prob_matrix_02_0_1 = rbind(ternary_prob_matrix_02_0_1,generate_prob_br_0_2)
ternary_prob_matrix_012_0_1 = rbind(ternary_prob_matrix_012_0_1,generate_prob_br_0_1_2)
}
ternary_prob_matrix_all_0_1_rownames=cbind(data.frame(mat_obs_form_0_1_2[,c(2,3)]),data.frame(ternary_prob_matrix_all_0_1))
ternary_prob_matrix_01_0_1_rownames=cbind(data.frame(mat_obs_form_0_1_2[,c(2,3)]),data.frame(ternary_prob_matrix_01_0_1))
ternary_prob_matrix_02_0_1_rownames=cbind(data.frame(mat_obs_form_0_1_2[,c(2,3)]),data.frame(ternary_prob_matrix_02_0_1))
ternary_prob_matrix_012_0_1_rownames=cbind(data.frame(mat_obs_form_0_1_2[,c(2,3)]),data.frame(ternary_prob_matrix_012_0_1))
ternary_prob_matrix_all_0_1_all=rbind(ternary_prob_matrix_all_0_1_all,ternary_prob_matrix_all_0_1_rownames)
ternary_prob_matrix_01_0_1_all=rbind(ternary_prob_matrix_01_0_1_all,ternary_prob_matrix_01_0_1_rownames)
ternary_prob_matrix_02_0_1_all=rbind(ternary_prob_matrix_02_0_1_all,ternary_prob_matrix_02_0_1_rownames)
ternary_prob_matrix_012_0_1_all=rbind(ternary_prob_matrix_012_0_1_all,ternary_prob_matrix_012_0_1_rownames)
}
ternary_prob_matrix_all_0_1_2_out = sprintf('/fs/project/kubatko.2-temp/gao.957/DNA_alignment/bowtie2-2.3.4.2-linux-x86_64/reference/Simulation_Setting_LargeTree/SimulateData_EXP10_20Missing/Ternary_alpha0%s_beta0%s_result/ternary_prob_matrix_all_0_1_2_out_alpha_0%s_beta_0%s_matrix%s.csv', alpha_str, beta_str, alpha_str, beta_str,indexn)
ternary_prob_matrix_01_0_1_2_out = sprintf('/fs/project/kubatko.2-temp/gao.957/DNA_alignment/bowtie2-2.3.4.2-linux-x86_64/reference/Simulation_Setting_LargeTree/SimulateData_EXP10_20Missing/Ternary_alpha0%s_beta0%s_result/ternary_prob_matrix_01_0_1_2_out_alpha_0%s_beta_0%s_matrix%s.csv', alpha_str, beta_str, alpha_str, beta_str,indexn)
ternary_prob_matrix_02_0_1_2_out= sprintf('/fs/project/kubatko.2-temp/gao.957/DNA_alignment/bowtie2-2.3.4.2-linux-x86_64/reference/Simulation_Setting_LargeTree/SimulateData_EXP10_20Missing/Ternary_alpha0%s_beta0%s_result/ternary_prob_matrix_02_0_1_2_out_alpha_0%s_beta_0%s_matrix%s.csv', alpha_str, beta_str, alpha_str, beta_str,indexn)
ternary_prob_matrix_012_0_1_2_out= sprintf('/fs/project/kubatko.2-temp/gao.957/DNA_alignment/bowtie2-2.3.4.2-linux-x86_64/reference/Simulation_Setting_LargeTree/SimulateData_EXP10_20Missing/Ternary_alpha0%s_beta0%s_result/ternary_prob_matrix_012_0_1_2_out_alpha_0%s_beta_0%s_matrix%s.csv', alpha_str, beta_str, alpha_str, beta_str,indexn)
write.csv(ternary_prob_matrix_all_0_1_all,file=ternary_prob_matrix_all_0_1_2_out)
write.csv(ternary_prob_matrix_01_0_1_all,file=ternary_prob_matrix_01_0_1_2_out)
write.csv(ternary_prob_matrix_02_0_1_all,file=ternary_prob_matrix_02_0_1_2_out)
write.csv(ternary_prob_matrix_012_0_1_all,file=ternary_prob_matrix_012_0_1_2_out)
}
}
|
/simulation/FiguresS2_S3_Scenarios3_4/Scenario4_result/MO_Ternary_20missing/MO_Ternary_Simulation_Study_5.R
|
no_license
|
DavidSimone/MO
|
R
| false
| false
| 47,782
|
r
|
generate_prob <- function(sequencing_error_model,unit_theta,unit_gamma,unit_mu,number_br,
number_cell,normal_genotype,mutation_genotype,initial_obs,ts){
# for this gene or location, variable singletip_exclude are those sample equal '-', variable tip_exclude are all those equal '-'
single_tip_exclude = c()
tip_exclude = c()
# for this gene or location, obs_colnam are those sample names, excluding the '-'
obs_colnam = c()
obs_genotype = c()
# for this gene or location, change the ACTG to 0/1/2, assign the mutation status for sample j
for (j in c(1:(number_cell))){
# exclude those tips with missing gene status
if (initial_obs[j]==c("-")) { #single_tip_exclude=colnames(initial_obs)[1,j]
single_tip_exclude=colnames(initial_obs)[j]
tip_exclude=c(tip_exclude,single_tip_exclude)
}
# value is 0 if gene status is same as normal
else if (as.character(initial_obs[1,j])==as.character(normal_genotype)) {
obs_genotype=c(obs_genotype,0)
}
# value is 1 if gene status is same as mutant
else if (as.character(initial_obs[1,j])==as.character(mutation_genotype)) {
#obs_genotype=c(obs_genotype,1)
obs_genotype=c(obs_genotype,2)
}
# value is m if gene status is ambguity
else { #obs_genotype=c(obs_genotype,"m")
obs_genotype=c(obs_genotype,1)}
}
# for this gene or location, exclude the sample with missing gene status
subtree = drop.tip(ts, tip = tip_exclude)
# branch_time_list is the branch length of sub tree
branch_time_list = subtree$edge.length
#generate the obs matrix
# obs_colnam are those with observations
obs_colnam=setdiff(colnames(initial_obs),tip_exclude)
# consider the ambguity status as missing
#obs_genotype_mat=matrix(,nrow=max(1,2^(count(obs_genotype=="m")[2,2])),ncol=length(obs_colnam))
#obs_genotype_mat=matrix(,nrow=1,ncol=length(obs_colnam))
obs_genotype_mat=matrix(,nrow=2^length(grep("m",obs_genotype)),ncol=length(obs_colnam))
colnames(obs_genotype_mat)=obs_colnam
# find the index of each gene status for each sample
ambiguity_index=which(obs_genotype=="m")
normal_index=which(obs_genotype=="0")
single_allele_index=which(obs_genotype=="1")
double_allele_index=which(obs_genotype=="2")
#create all possible situations for ambguity
inupt_list <- rep(list(0:1), length(ambiguity_index))
input_ambiguity=expand.grid(inupt_list)
# put the possible status into the matrix for gene i, each row represent one possible situation
obs_genotype_mat[,as.numeric(ambiguity_index)]=as.matrix(input_ambiguity)
obs_genotype_mat[,normal_index]=rep(0,dim(obs_genotype_mat)[1])
obs_genotype_mat[,single_allele_index]=rep(1,dim(obs_genotype_mat)[1])
obs_genotype_mat[,double_allele_index]=rep(2,dim(obs_genotype_mat)[1])
# for each of the possible situation, assign weight to them, here, I use equal weights
ambiguity_weight=matrix(rep(1/dim(obs_genotype_mat)[1],dim(obs_genotype_mat)[1],nrow=1))
####################################################################################################
####################################################################################################
#extract the tree, if mutation is on one branch, then the corresponding tips will have mutation
####################################################################################################
####################################################################################################
left_right <- function(edge,parent){
child = c()
for (i in 1:nrow(edge)) {
if (edge[i,1] == parent) {
child = c(child,edge[i,2])
}
}
return(child)
}
build_tree <- function(edge,branch){
child_node = left_right(edge,branch[length(branch)])
new_branch=matrix(c(branch,child_node[1],branch,child_node[2]),nrow=2,byrow = TRUE)
return(new_branch)
}
#####################################modify begin################################
# find node parent
find_ancestor <- function(edge,node){
parent = 0
for (i in 1:nrow(edge)) {
if (edge[i,2] == node) {
parent = edge[i,1]
}
}
return(parent)
}
# get all unique nodes in the tree
get_all_nodes <- function(edge)
{
all_nodes = integer(length(edge))
for (i in 1:nrow(edge))
{
all_nodes[(i-1)*2+1] = edge[i,1]
all_nodes[(i-1)*2+2] = edge[i,2]
}
all_nodes = unique(all_nodes)
return(all_nodes)
}
# find root node
find_root <- function(edge)
{
all_nodes = get_all_nodes(edge)
for (i in 1:length(all_nodes))
{
parent = find_ancestor(edge, all_nodes[i])
if (parent == 0)
{
root_node = all_nodes[i]
break
}
}
}
# find two child branches and nodes if they exist. Otherwise all zeros matrix output
find_child_branches_and_nodes <- function(edge, parent_node){
child_branches_and_nodes = matrix(0, 2, 2)
child_id = 1
# first row are two nodes, second row are two branches
for (i in 1:nrow(edge))
{
if (edge[i,1] == parent_node) {
child_branches_and_nodes[1,child_id] = edge[i,2]
child_branches_and_nodes[2,child_id] = i
child_id = child_id + 1
}
}
return(child_branches_and_nodes)
}
# find all child branch for current branch
find_child_branches <- function(edge, current_edge, child_branches)
{
id = length(child_branches)
right_node = edge[current_edge, 2]
child_branches_and_nodes = find_child_branches_and_nodes(edge, right_node)
if (child_branches_and_nodes[1,1] != 0)
{
# if not leaf node
left_node = child_branches_and_nodes[1,1]
right_node = child_branches_and_nodes[1,2]
left_branch = child_branches_and_nodes[2,1]
right_branch = child_branches_and_nodes[2,2]
id = id + 1
child_branches[id] = left_branch
id = id + 1
child_branches[id] = right_branch
child_branches = find_child_branches(edge, left_branch, child_branches)
child_branches = find_child_branches(edge, right_branch, child_branches)
return(child_branches)
}
else
{
return(child_branches)
}
}
# find all child branch for all branches
find_all_child_branches <- function(edge){
# get root node
root_node = find_root(edge)
all_child_branches = rep(list(list()), nrow(edge))
for (i in 1:nrow(edge))
{
current_edge = i
# iterative find all its child branches
child_branches = integer(0)
all_child_branches[[i]] = find_child_branches(edge, current_edge, child_branches)
}
return(all_child_branches)
}
find_all_tip_nodes <- function(edge)
{
all_parent_nodes = numeric()
for (i in 1:nrow(edge))
{
all_parent_nodes = c(all_parent_nodes, edge[i, 1])
}
all_parent_nodes = unique(all_parent_nodes)
all_nodes = get_all_nodes(edge)
all_tip_nodes = numeric()
for (i in 1:length(all_nodes))
{
if (!is.element(all_nodes[i], all_parent_nodes))
all_tip_nodes = c(all_tip_nodes, all_nodes[i])
}
return(all_tip_nodes)
}
# find tip nodes under one edge
find_tip_nodes_of_edge <- function(edge)
{
all_tip_nodes = find_all_tip_nodes(edge)
all_child_branches = find_all_child_branches(edge)
all_branch_tips = rep(list(list()), nrow(edge))
for (i in 1:nrow(edge)) {
child_branches = all_child_branches[[i]]
tip_nodes = numeric()
if (length(child_branches) > 0)
{
for (j in 1:length(child_branches)) {
child_node = edge[child_branches[j], 2]
if (is.element(child_node, all_tip_nodes))
{
tip_nodes = c(tip_nodes, child_node)
}
}
}
child_node = edge[i, 2]
if (is.element(child_node, all_tip_nodes))
{
tip_nodes = c(tip_nodes, child_node)
}
all_branch_tips[[i]] = tip_nodes
}
return(all_branch_tips)
}
###################################################################################
###################################################################################
#find the joint prob of observation and branch_i in the subtree
###################################################################################
###################################################################################
find_all_possible_mutation_matrix <- function(subtree, branch_i)
{
num_rows = nrow(subtree$edge)
num_cols = length(subtree$tip.label)
# build the branch tree sturcture from each tip to the root
branch_trees = rep( list(list()),num_cols )
num_parent = 0
for (tip_i in 1:num_cols) {
branch_trees[[tip_i]] = tip_i
parent = find_ancestor(subtree$edge,tip_i)
branch_trees[[tip_i]][num_parent+2] = parent
num_parent=num_parent+1
while (parent != num_cols+1) {
tip_node = parent
parent = find_ancestor(subtree$edge,tip_node)
branch_trees[[tip_i]][num_parent+2] = parent
num_parent=num_parent+1
}
num_parent = 0
}
# loop over all the branches, and find the possible final stage
# if the event occurs in that branch
possible_true_genotype_with_1 = matrix(rep(0,num_rows*num_cols),nrow=num_rows,ncol = num_cols)
for (j in 1:num_rows) {
branch_edge = subtree$edge[j,]
if (branch_edge[2] <= num_cols) {
possible_true_genotype_with_1[j,branch_edge[2]] = 1
}else {
for (i in 1:num_cols) {
list_branch = branch_trees[[i]]
if (is.na(match(branch_edge[2],list_branch)) == FALSE) {
possible_true_genotype_with_1[j,i] = 1
colnames(possible_true_genotype_with_1)=subtree$tip.label
}
}
}
}
descendant_branches=find_all_child_branches(subtree$edge)
num_of_cases = num_rows-1
possible_true_genotype_with_only_1 = matrix(rep(0,num_cols+2),nrow=1,ncol = num_cols+2)
possible_true_genotype_with_only_2 = matrix(rep(0,num_cols+2),nrow=1,ncol = num_cols+2)
possible_true_genotype_with_2 = matrix(rep(0,num_of_cases*(num_cols+2)),nrow=num_of_cases,ncol = num_cols+2)
colnames(possible_true_genotype_with_only_1)= c("First_branch", "Second_branch", subtree$tip.label)
colnames(possible_true_genotype_with_only_2)= c("First_branch", "Second_branch", subtree$tip.label)
colnames(possible_true_genotype_with_2)= c("First_branch", "Second_branch", subtree$tip.label)
# a matrix is created, where first two columns are the places for the mutation occuring.
# if it's NA, it means that no mutation occurs to stand for situation like 0, 1, 2
# if only this branch has one mutation
possible_true_genotype_with_only_1[1,1] = branch_i
possible_true_genotype_with_only_1[1,1:num_cols+2] = possible_true_genotype_with_1[branch_i, ]
possible_true_genotype_with_only_2[1,1] = branch_i
possible_true_genotype_with_only_2[1,2] = branch_i
possible_true_genotype_with_only_2[1,1:num_cols+2] = 2*possible_true_genotype_with_1[branch_i, ]
id_row = 1
# if this branch has one mutation, and other branch has another
for (branch_j in 1:num_rows) {
if (branch_j == branch_i)
{
next
}
possible_true_genotype_with_2[id_row,1] = branch_i
possible_true_genotype_with_2[id_row,2] = branch_j
possible_true_genotype_with_2[id_row,1:num_cols+2] = possible_true_genotype_with_1[branch_i, ] + possible_true_genotype_with_1[branch_j, ]
id_row = id_row+1
}
possible_true_genotype_with_2_sub = possible_true_genotype_with_2[ possible_true_genotype_with_2[,2] %in% descendant_branches[[branch_i]], ]
return(list(possible_true_genotype_with_only_1, possible_true_genotype_with_only_2, possible_true_genotype_with_2_sub))
}
###################################################################################
###################################################################################
#Mutation model:find the prob of mutation on each branch, but not on other branches
###################################################################################
###################################################################################
# results from function find_mutation_prob_Bj: prob_0_1= prob_Bj_with_only_1, prob_0_2 = prob_Bj_with_only_2, prob_0_1_2 = prob_Bj_with_2
find_mutation_prob_Bj <- function(branch_time_list, unit_theta,unit_gamma,unit_mu,branch_i){
###############################################
#find the prob of only 1 mutation on branch_i
###############################################
# find all branches numbers in the subtree
all_branches=c(1:dim(subtree$edge)[1])
descendant_branches=find_all_child_branches(subtree$edge)
# 1-1:find all descendant branches of the input branch, all these branches carry the mutation and no further mutation occurs
Bj_with_only_1_descendant_branches=descendant_branches[[branch_i]]
# 1-1:find the prob that all descendant branches of the input branch carry the mutation and no further mutation occurs
prob_Bj_with_only_1_descendant_branches=exp(-unit_mu*branch_time_list[Bj_with_only_1_descendant_branches])
# 0-0:find the complement branches of input branch and its descendant, branches in this set has no mutation
Bj_with_only_1_no_mutation_branches=setdiff(all_branches,union(branch_i, Bj_with_only_1_descendant_branches))
# 0-0:find the prob the complement branches of input branch and its descendant in this set has no mutation
prob_Bj_with_only_1_no_mutation_branches=exp(-(unit_theta+unit_gamma)*(branch_time_list[Bj_with_only_1_no_mutation_branches]))
# get the prob matrix of the only 1 mutation
prob_Bj_with_only_1_mat= matrix(, nrow = 1, ncol = dim(subtree$edge)[1])
prob_Bj_with_only_1_mat[,Bj_with_only_1_descendant_branches]=prob_Bj_with_only_1_descendant_branches
prob_Bj_with_only_1_mat[,Bj_with_only_1_no_mutation_branches]=prob_Bj_with_only_1_no_mutation_branches
prob_Bj_with_only_1_mat[,branch_i]= unit_theta*(exp(-(unit_theta+unit_gamma)*branch_time_list[branch_i])-exp(-unit_mu*branch_time_list[branch_i]))/(unit_mu-unit_theta-unit_gamma)
prob_Bj_with_only_1=cbind(First_branch = branch_i,
Second_branch = 0,
Mutation_Prob=cumprod(prob_Bj_with_only_1_mat)[dim(subtree$edge)[1]])
###############################################
#find the prob of only 2 mutation on branch_i
###############################################
#2-2:find all descendant branches of the input branch, all these branches carry the mutation and no further mutation occurs
Bj_with_only_2_descendant_branches=descendant_branches[[branch_i]]
#2-2:find the prob that all descendant branches of the input branch carry the 2 mutations and no further mutation occurs
prob_Bj_with_only_2_descendant_branches=exp(-0*branch_time_list[Bj_with_only_2_descendant_branches])
#0-0:find the complement branches of input branch and its descendant, branches in this set has no mutation
Bj_with_only_2_no_mutation_branches=setdiff(all_branches,union(branch_i, Bj_with_only_2_descendant_branches))
#0-0:find the prob the complement branches of input branch and its descendant in this set has no mutation
prob_Bj_with_only_2_no_mutation_branches=exp(-(unit_theta+unit_gamma)*(branch_time_list[Bj_with_only_2_no_mutation_branches]))
# get the prob matrix of the only 2 mutation
prob_Bj_with_only_2_mat= matrix(, nrow = 1, ncol = dim(subtree$edge)[1])
prob_Bj_with_only_2_mat[,Bj_with_only_2_descendant_branches]=prob_Bj_with_only_2_descendant_branches
prob_Bj_with_only_2_mat[,Bj_with_only_2_no_mutation_branches]=prob_Bj_with_only_2_no_mutation_branches
prob_Bj_with_only_2_mat[,branch_i]= ((unit_gamma-unit_mu)*(exp(-(unit_theta+unit_gamma)*branch_time_list[branch_i]))+(unit_theta)*(exp(-unit_mu*branch_time_list[branch_i])))/(unit_mu-unit_theta-unit_gamma)+1
prob_Bj_with_only_2=cbind(First_branch = branch_i,
Second_branch = 0,
Mutation_Prob=cumprod(prob_Bj_with_only_2_mat)[dim(subtree$edge)[1]])
########################################################################
#find the prob of 0-1 mutation on branch_i and 1-2 on a descendant branch
########################################################################
Bj_with_2_descendant_branches=descendant_branches[[branch_i]]
if(length(Bj_with_2_descendant_branches) > 0){
# create the matrix that store the prob if there are separate mutations on two branches
prob_Bj_with_2_mat=matrix(, nrow = length(Bj_with_2_descendant_branches), ncol = 2+dim(subtree$edge)[1])
# iterate on each of the descendant branch
for (descendant_i in 1:length(Bj_with_2_descendant_branches)){
prob_Bj_with_2_mat[descendant_i,1]= branch_i
prob_Bj_with_2_mat[descendant_i,2]= Bj_with_2_descendant_branches[descendant_i]
DB_descendant_i=descendant_branches[[Bj_with_2_descendant_branches[descendant_i]]]
# fin the branches without mutation
Bj_with_2_no_mutation_branches = setdiff(all_branches,union(branch_i, Bj_with_2_descendant_branches))
prob_Bj_with_2_no_mutation_branches = exp(-(unit_theta+unit_gamma)*(branch_time_list[Bj_with_2_no_mutation_branches]))
# the branch with the first mutation occurring
Bj_with_2_1st_mutation_branches = branch_i
prob_Bj_with_2_1st_mutation_branches = unit_theta*(exp(-(unit_theta+unit_gamma)*branch_time_list[branch_i])-exp(-unit_mu*branch_time_list[branch_i]))/(unit_mu-unit_theta-unit_gamma)
# the branch with the second mutation occurring
Bj_with_2_2nd_mutation_branches = Bj_with_2_descendant_branches[descendant_i]
prob_Bj_with_2_2nd_mutation_branches = 1-exp(-unit_mu*branch_time_list[Bj_with_2_2nd_mutation_branches])
#the branches carry the first and second mutation
Bj_with_2_carry_1st_2nd_mutation_branches = descendant_branches[[Bj_with_2_2nd_mutation_branches]]
prob_Bj_with_2_carry_1st_2nd_mutation_branches = exp(0*branch_time_list[Bj_with_2_carry_1st_2nd_mutation_branches])
#the branches carry the first mutation
Bj_with_2_carry_1st_mutation_branches = setdiff(Bj_with_2_descendant_branches,union(Bj_with_2_2nd_mutation_branches,Bj_with_2_carry_1st_2nd_mutation_branches))
prob_Bj_with_2_carry_1st_mutation_branches = exp(-unit_mu*branch_time_list[Bj_with_2_carry_1st_mutation_branches])
#put these probability into the matrix
prob_Bj_with_2_mat[descendant_i,Bj_with_2_no_mutation_branches+2] = prob_Bj_with_2_no_mutation_branches
prob_Bj_with_2_mat[descendant_i,Bj_with_2_1st_mutation_branches+2] = prob_Bj_with_2_1st_mutation_branches
prob_Bj_with_2_mat[descendant_i,Bj_with_2_carry_1st_mutation_branches+2] = prob_Bj_with_2_carry_1st_mutation_branches
prob_Bj_with_2_mat[descendant_i,Bj_with_2_2nd_mutation_branches+2] = prob_Bj_with_2_2nd_mutation_branches
prob_Bj_with_2_mat[descendant_i,Bj_with_2_carry_1st_2nd_mutation_branches+2] = prob_Bj_with_2_carry_1st_2nd_mutation_branches
#find the probability of
prob_Bj_with_2=cbind(prob_Bj_with_2_mat[,c(1,2)],
Mutation_Prob=t(apply(prob_Bj_with_2_mat[,-c(1,2)],1,cumprod))[,dim(subtree$edge)[1]])
}
}else{prob_Bj_with_2=cbind(First_branch = branch_i,
Second_branch = 0,
Mutation_Prob = 0)}
return(list(prob_0_1= prob_Bj_with_only_1,
prob_0_2 = prob_Bj_with_only_2,
prob_0_1_2 = prob_Bj_with_2))
}
###################################################################################
###################################################################################
#Error model:find the prob of observation, conditioning on a branch_i
###################################################################################
###################################################################################
# branch k (mutation on branch k)
find_mutation_prob_of_obs_condition_Bj <- function(branch_time_list, sequencing_error_model,branch_i){
possible_true_genotype_with_only_1_mutation=find_all_possible_mutation_matrix(subtree,branch_i)[1][[1]]
possible_true_genotype_with_only_2_mutation=find_all_possible_mutation_matrix(subtree,branch_i)[2][[1]]
possible_true_genotype_with_2_mutation=find_all_possible_mutation_matrix(subtree,branch_i)[3][[1]]
##################################################################
###find the conditional prob when only 1 mutation on branch_i
##################################################################
possible_true_genotype_with_only_1_mutation_values = matrix(possible_true_genotype_with_only_1_mutation[,c(3:dim(possible_true_genotype_with_only_1_mutation)[2])],
nrow=nrow(possible_true_genotype_with_only_1_mutation),
byrow=T)
colnames(possible_true_genotype_with_only_1_mutation_values)=subtree$tip.label
#create an empty matrix to store the errors for only 1 mutation on branch_i
error_result_mat_with_only_1_mutation=replicate(dim(possible_true_genotype_with_only_1_mutation_values)[1],
matrix(rep(0,dim(obs_genotype_mat)[1]*(dim(obs_genotype_mat)[2])),
nrow=dim(obs_genotype_mat)[1]),
simplify=FALSE)
#assign
for (k in 1:dim(possible_true_genotype_with_only_1_mutation_values)[1]){
#assign tipnames to the matrix
colnames(error_result_mat_with_only_1_mutation[[k]]) = subtree$tip.label
# situation j (possible situation for ambguity status),dim(obs_genotype_mat)[1]=2^(# of ambiguity sites)
for (j in 1:dim(obs_genotype_mat)[1]){
# tip or sample i
for (i in 1:dim(possible_true_genotype_with_only_1_mutation_values)[2]){
# if true is 1, and observed is 1, prob is 1-beta
if (as.matrix(possible_true_genotype_with_only_1_mutation_values)[k,i]==1 & as.matrix(obs_genotype_mat)[j,i]==1)
{error_result_mat_with_only_1_mutation[[k]][j,i]=sequencing_error_model[2,2]}
# if true is 0 and observed is 0, prob is 1-alpha
else if (as.matrix(possible_true_genotype_with_only_1_mutation_values)[k,i]==0 & as.matrix(obs_genotype_mat)[j,i]==0)
{error_result_mat_with_only_1_mutation[[k]][j,i]=sequencing_error_model[1,1]}
# if true is 1 and observed is 0, false negative, the prob is beta
else if (as.matrix(possible_true_genotype_with_only_1_mutation_values)[k,i]==1 & as.matrix(obs_genotype_mat)[j,i]==0)
{error_result_mat_with_only_1_mutation[[k]][j,i]=sequencing_error_model[2,1]}
# if true is 0 and observed is 1, false positive, the prob is alpha
else if (as.matrix(possible_true_genotype_with_only_1_mutation_values)[k,i]==0 & as.matrix(obs_genotype_mat)[j,i]==1)
{error_result_mat_with_only_1_mutation[[k]][j,i]=sequencing_error_model[1,2]}
# if true is 2 and observed is 2, false positive, the prob is alpha
else if (as.matrix(possible_true_genotype_with_only_1_mutation_values)[k,i]==2 & as.matrix(obs_genotype_mat)[j,i]==2)
{error_result_mat_with_only_1_mutation[[k]][j,i]=sequencing_error_model[3,3]}
# if true is 2 and observed is 1, false positive, the prob is alpha
else if (as.matrix(possible_true_genotype_with_only_1_mutation_values)[k,i]==2 & as.matrix(obs_genotype_mat)[j,i]==1)
{error_result_mat_with_only_1_mutation[[k]][j,i]=sequencing_error_model[3,2]}
# if true is 2 and observed is 0, false positive, the prob is alpha
else if (as.matrix(possible_true_genotype_with_only_1_mutation_values)[k,i]==2 & as.matrix(obs_genotype_mat)[j,i]==0)
{error_result_mat_with_only_1_mutation[[k]][j,i]=sequencing_error_model[3,1]}
# if true is 1 and observed is 2, false positive, the prob is alpha
else if (as.matrix(possible_true_genotype_with_only_1_mutation_values)[k,i]==1 & as.matrix(obs_genotype_mat)[j,i]==2)
{error_result_mat_with_only_1_mutation[[k]][j,i]=sequencing_error_model[2,3]}
# if true is 0 and observed is 2, false positive, the prob is alpha
else if (as.matrix(possible_true_genotype_with_only_1_mutation_values)[k,i]==0 & as.matrix(obs_genotype_mat)[j,i]==2)
{error_result_mat_with_only_1_mutation[[k]][j,i]=sequencing_error_model[1,3]}
}
}
}
##################################################################
###find the conditional prob when only 2 mutation on branch_i
##################################################################
possible_true_genotype_with_only_2_mutation_values = matrix(possible_true_genotype_with_only_2_mutation[,c(3:dim(possible_true_genotype_with_only_2_mutation)[2])],
nrow=nrow(possible_true_genotype_with_only_2_mutation),
byrow=T)
colnames(possible_true_genotype_with_only_2_mutation_values)=subtree$tip.label
#create an empty matrix to store the errors for only 1 mutation on branch_i
error_result_mat_with_only_2_mutation=replicate(dim(possible_true_genotype_with_only_2_mutation_values)[1],
matrix(rep(0,dim(obs_genotype_mat)[1]*(dim(obs_genotype_mat)[2])),
nrow=dim(obs_genotype_mat)[1]),
simplify=FALSE)
#assign
for (k in 1:dim(possible_true_genotype_with_only_2_mutation_values)[1]){
#assign tipnames to the matrix
colnames(error_result_mat_with_only_2_mutation[[k]]) = subtree$tip.label
# situation j (possible situation for ambguity status),dim(obs_genotype_mat)[1]=2^(# of ambiguity sites)
for (j in 1:dim(obs_genotype_mat)[1]){
# tip or sample i
for (i in 1:dim(possible_true_genotype_with_only_2_mutation_values)[2]){
# if true is 1, and observed is 1, prob is 1-beta
if (as.matrix(possible_true_genotype_with_only_2_mutation_values)[k,i]==1 & as.matrix(obs_genotype_mat)[j,i]==1)
{error_result_mat_with_only_2_mutation[[k]][j,i]=sequencing_error_model[2,2]}
# if true is 0 and observed is 0, prob is 1-alpha
else if (as.matrix(possible_true_genotype_with_only_2_mutation_values)[k,i]==0 & as.matrix(obs_genotype_mat)[j,i]==0)
{error_result_mat_with_only_2_mutation[[k]][j,i]=sequencing_error_model[1,1]}
# if true is 1 and observed is 0, false negative, the prob is beta
else if (as.matrix(possible_true_genotype_with_only_2_mutation_values)[k,i]==1 & as.matrix(obs_genotype_mat)[j,i]==0)
{error_result_mat_with_only_2_mutation[[k]][j,i]=sequencing_error_model[2,1]}
# if true is 0 and observed is 1, false positive, the prob is alpha
else if (as.matrix(possible_true_genotype_with_only_2_mutation_values)[k,i]==0 & as.matrix(obs_genotype_mat)[j,i]==1)
{error_result_mat_with_only_2_mutation[[k]][j,i]=sequencing_error_model[1,2]}
# if true is 2 and observed is 2, false positive, the prob is alpha
else if (as.matrix(possible_true_genotype_with_only_2_mutation_values)[k,i]==2 & as.matrix(obs_genotype_mat)[j,i]==2)
{error_result_mat_with_only_2_mutation[[k]][j,i]=sequencing_error_model[3,3]}
# if true is 2 and observed is 1, false positive, the prob is alpha
else if (as.matrix(possible_true_genotype_with_only_2_mutation_values)[k,i]==2 & as.matrix(obs_genotype_mat)[j,i]==1)
{error_result_mat_with_only_2_mutation[[k]][j,i]=sequencing_error_model[3,2]}
# if true is 2 and observed is 0, false positive, the prob is alpha
else if (as.matrix(possible_true_genotype_with_only_2_mutation_values)[k,i]==2 & as.matrix(obs_genotype_mat)[j,i]==0)
{error_result_mat_with_only_2_mutation[[k]][j,i]=sequencing_error_model[3,1]}
# if true is 1 and observed is 2, false positive, the prob is alpha
else if (as.matrix(possible_true_genotype_with_only_2_mutation_values)[k,i]==1 & as.matrix(obs_genotype_mat)[j,i]==2)
{error_result_mat_with_only_2_mutation[[k]][j,i]=sequencing_error_model[2,3]}
# if true is 0 and observed is 2, false positive, the prob is alpha
else if (as.matrix(possible_true_genotype_with_only_2_mutation_values)[k,i]==0 & as.matrix(obs_genotype_mat)[j,i]==2)
{error_result_mat_with_only_2_mutation[[k]][j,i]=sequencing_error_model[1,3]}
}
}
}
#########################################################################################
###find the conditional prob when 1 mutation on branch_i and 1 mutation on the sub branch
#########################################################################################
possible_true_genotype_with_2_mutation_values = matrix(possible_true_genotype_with_2_mutation[,c(3:dim(possible_true_genotype_with_2_mutation)[2])],
nrow=nrow(possible_true_genotype_with_2_mutation),
byrow=T)
if(dim(possible_true_genotype_with_2_mutation_values)[1] > 0)
{colnames(possible_true_genotype_with_2_mutation_values)=subtree$tip.label
#create an empty matrix to store the errors for only 1 mutation on branch_i
error_result_mat_with_2_mutation=replicate(dim(possible_true_genotype_with_2_mutation)[1],
matrix(rep(0,dim(obs_genotype_mat)[1]*(dim(obs_genotype_mat)[2])),
nrow=dim(obs_genotype_mat)[1]),
simplify=FALSE)
prob_error_result_0_1_2 = c()
#assign
for (k in 1:dim(possible_true_genotype_with_2_mutation_values)[1]){
#assign tipnames to the matrix
colnames(error_result_mat_with_2_mutation[[k]]) = subtree$tip.label
# situation j (possible situation for ambguity status),dim(obs_genotype_mat)[1]=2^(# of ambiguity sites)
for (j in 1:dim(obs_genotype_mat)[1]){
# tip or sample i
for (i in 1:dim(possible_true_genotype_with_2_mutation_values)[2]){
# if true is 1, and observed is 1, prob is 1-beta
if (as.matrix(possible_true_genotype_with_2_mutation_values)[k,i]==1 & as.matrix(obs_genotype_mat)[j,i]==1)
{error_result_mat_with_2_mutation[[k]][j,i]=sequencing_error_model[2,2]}
# if true is 0 and observed is 0, prob is 1-alpha
else if (as.matrix(possible_true_genotype_with_2_mutation_values)[k,i]==0 & as.matrix(obs_genotype_mat)[j,i]==0)
{error_result_mat_with_2_mutation[[k]][j,i]=sequencing_error_model[1,1]}
# if true is 1 and observed is 0, false negative, the prob is beta
else if (as.matrix(possible_true_genotype_with_2_mutation_values)[k,i]==1 & as.matrix(obs_genotype_mat)[j,i]==0)
{error_result_mat_with_2_mutation[[k]][j,i]=sequencing_error_model[2,1]}
# if true is 0 and observed is 1, false positive, the prob is alpha
else if (as.matrix(possible_true_genotype_with_2_mutation_values)[k,i]==0 & as.matrix(obs_genotype_mat)[j,i]==1)
{error_result_mat_with_2_mutation[[k]][j,i]=sequencing_error_model[1,2]}
# if true is 2 and observed is 2, false positive, the prob is alpha
else if (as.matrix(possible_true_genotype_with_2_mutation_values)[k,i]==2 & as.matrix(obs_genotype_mat)[j,i]==2)
{error_result_mat_with_2_mutation[[k]][j,i]=sequencing_error_model[3,3]}
# if true is 2 and observed is 1, false positive, the prob is alpha
else if (as.matrix(possible_true_genotype_with_2_mutation_values)[k,i]==2 & as.matrix(obs_genotype_mat)[j,i]==1)
{error_result_mat_with_2_mutation[[k]][j,i]=sequencing_error_model[3,2]}
# if true is 2 and observed is 0, false positive, the prob is alpha
else if (as.matrix(possible_true_genotype_with_2_mutation_values)[k,i]==2 & as.matrix(obs_genotype_mat)[j,i]==0)
{error_result_mat_with_2_mutation[[k]][j,i]=sequencing_error_model[3,1]}
# if true is 1 and observed is 2, false positive, the prob is alpha
else if (as.matrix(possible_true_genotype_with_2_mutation_values)[k,i]==1 & as.matrix(obs_genotype_mat)[j,i]==2)
{error_result_mat_with_2_mutation[[k]][j,i]=sequencing_error_model[2,3]}
# if true is 0 and observed is 2, false positive, the prob is alpha
else if (as.matrix(possible_true_genotype_with_2_mutation_values)[k,i]==0 & as.matrix(obs_genotype_mat)[j,i]==2)
{error_result_mat_with_2_mutation[[k]][j,i]=sequencing_error_model[1,3]}
}
}
prob_error_result_0_1_2 [[k]] = tail(cumprod(error_result_mat_with_2_mutation[[k]]), n = 1)
}
} else
{error_result_mat_with_2_mutation =0
prob_error_result_0_1_2 = 0}
# find the
prob_error_result_0_1 = tail(cumprod(error_result_mat_with_only_1_mutation[[1]]),n=1)
prob_error_result_0_2 = tail(cumprod(error_result_mat_with_only_2_mutation[[1]]),n=1)
##################################################################
#return the errors for the three conditions
##################################################################
return(list(error_result_0_1=error_result_mat_with_only_1_mutation,
error_result_0_2=error_result_mat_with_only_2_mutation,
error_result_0_1_2=error_result_mat_with_2_mutation,
prob_error_result_0_1=prob_error_result_0_1,
prob_error_result_0_2=prob_error_result_0_2,
prob_error_result_0_1_2=prob_error_result_0_1_2))
}
##############################################################################
##############################################################################
find_joint_obs_Bj <- function(subtree, branch_i)
{
#use the mutation function to find the probability
prob_Bj_final = find_mutation_prob_Bj(branch_time_list, unit_theta,unit_gamma,unit_mu,branch_i)
#use the error function to find the conditional probability
prob_find_mutation_prob_of_obs_condition_Bj = find_mutation_prob_of_obs_condition_Bj(branch_time_list, sequencing_error_model,branch_i)
prob_Bj_and_obs_genotype = prob_Bj_final$prob_0_1[1,3] * prob_find_mutation_prob_of_obs_condition_Bj$prob_error_result_0_1 +
prob_Bj_final$prob_0_2[1,3] * prob_find_mutation_prob_of_obs_condition_Bj$prob_error_result_0_2 +
t(as.matrix(prob_Bj_final$prob_0_1_2[,3])) %*% (as.matrix(prob_find_mutation_prob_of_obs_condition_Bj$prob_error_result_0_1_2))
prob_Bj_and_obs_genotype_0_1 = prob_Bj_final$prob_0_1[1,3] * prob_find_mutation_prob_of_obs_condition_Bj$prob_error_result_0_1
prob_Bj_and_obs_genotype_0_2 = prob_Bj_final$prob_0_2[1,3] * prob_find_mutation_prob_of_obs_condition_Bj$prob_error_result_0_2
prob_Bj_and_obs_genotype_0_1_2 = t(as.matrix(prob_Bj_final$prob_0_1_2[,3])) %*% (as.matrix(prob_find_mutation_prob_of_obs_condition_Bj$prob_error_result_0_1_2))
return(list(prob_Bj_and_obs_genotype = prob_Bj_and_obs_genotype,
prob_Bj_and_obs_genotype_0_1 = prob_Bj_and_obs_genotype_0_1,
prob_Bj_and_obs_genotype_0_2 = prob_Bj_and_obs_genotype_0_2,
prob_Bj_and_obs_genotype_0_1_2 = prob_Bj_and_obs_genotype_0_1_2))
}
#find the joint prob of observation and branch_i on all branches of the subtree
find_joint_obs_Bj_all_branches<-function(subtree){
prob_joint_obs_Bj_all_branches_mat = matrix(,nrow=dim(subtree$edge)[1],ncol=4)
colnames(prob_joint_obs_Bj_all_branches_mat) = c("prob_Bj_and_obs_genotype_0_1","prob_Bj_and_obs_genotype_0_2",
"prob_Bj_and_obs_genotype_0_1_2","prob_Bj_and_obs_genotype")
for (br_num in 1:dim(subtree$edge)[1]){
prob_find_joint_obs_Bj = find_joint_obs_Bj(subtree,br_num)
prob_joint_obs_Bj_all_branches_mat[br_num,1] = prob_find_joint_obs_Bj$prob_Bj_and_obs_genotype_0_1
prob_joint_obs_Bj_all_branches_mat[br_num,2] = prob_find_joint_obs_Bj$prob_Bj_and_obs_genotype_0_2
prob_joint_obs_Bj_all_branches_mat[br_num,3] = prob_find_joint_obs_Bj$prob_Bj_and_obs_genotype_0_1_2
prob_joint_obs_Bj_all_branches_mat[br_num,4] = prob_find_joint_obs_Bj$prob_Bj_and_obs_genotype
}
return(prob_joint_obs_Bj_all_branches_mat = prob_joint_obs_Bj_all_branches_mat)
}
find_mutation_prob_of_Bj_condition_obs<-function(subtree){
prob_joint_obs_Bj_all_branches_mat = find_joint_obs_Bj_all_branches(subtree)
prob_of_Bj_condition_obs_mat = matrix(,nrow=dim(subtree$edge)[1],ncol=4)
colnames(prob_of_Bj_condition_obs_mat) = c("prob_of_Bj_condition_obs_0_1","prob_of_Bj_condition_obs_0_2",
"prob_of_Bj_condition_obs_0_1_2","prob_of_Bj_condition_obs_genotype")
for (br_num in 1:dim(subtree$edge)[1]){
prob_of_Bj_condition_obs_mat[br_num,1] = prob_joint_obs_Bj_all_branches_mat[br_num,1]/sum(prob_joint_obs_Bj_all_branches_mat[,4])
prob_of_Bj_condition_obs_mat[br_num,2] = prob_joint_obs_Bj_all_branches_mat[br_num,2]/sum(prob_joint_obs_Bj_all_branches_mat[,4])
prob_of_Bj_condition_obs_mat[br_num,3] = prob_joint_obs_Bj_all_branches_mat[br_num,3]/sum(prob_joint_obs_Bj_all_branches_mat[,4])
prob_of_Bj_condition_obs_mat[br_num,4] = prob_joint_obs_Bj_all_branches_mat[br_num,4]/sum(prob_joint_obs_Bj_all_branches_mat[,4])
}
return(prob_of_Bj_condition_obs_mat = prob_of_Bj_condition_obs_mat )
}
result = find_mutation_prob_of_Bj_condition_obs(subtree)
return(result = result)
}
library(ape)
parameter_setting = expand.grid(alpha=c(0.05,0.1,0.2,0.4),
beta =c(0.05,0.1,0.2,0.4))
for(paraInd in 5:5){
alpha = parameter_setting[paraInd,1]
beta = parameter_setting[paraInd,2]
alpha01= alpha
alpha02= alpha*beta/2
beta10= beta/2
beta12= beta/2
gamma20 =0
gamma21 =0
sequencing_error_model=matrix(c(1-alpha01-alpha02,alpha01,alpha02,
beta10,1-beta10-beta12,beta12,
gamma20,gamma21,1-gamma21-gamma21),nrow=3,byrow = TRUE)
print(sequencing_error_model)
unit_theta = 10^(-7)
unit_gamma = 10^(-9)
unit_mu = 10 ^(-2)
number_br = 98
number_cell = 50
if (alpha < 0.1)
{
alpha_str = sprintf('0%s', alpha*100)
} else
{
alpha_str = sprintf('%s', alpha*10)
}
if (beta < 0.1)
{
beta_str = sprintf('0%s', beta*100)
} else
{
beta_str = sprintf('%s', beta*10)
}
ternary_folder_form_result = sprintf('/fs/project/kubatko.2-temp/gao.957/DNA_alignment/bowtie2-2.3.4.2-linux-x86_64/reference/Simulation_Setting_LargeTree/SimulateData_EXP10_20Missing/Ternary_alpha0%s_beta0%s_result',alpha_str, beta_str)
dir.create(ternary_folder_form_result)
for (indexn in 1:100){
print(c(alpha,beta,indexn))
form = sprintf('/fs/project/kubatko.2-temp/gao.957/DNA_alignment/bowtie2-2.3.4.2-linux-x86_64/reference/Simulation_Setting_LargeTree/SimulateData_EXP10_20Missing/RandomTree/RandomTree_%s.tre', indexn)
sampletr=read.tree(form)
obs_form_0_1_2 = sprintf('/fs/project/kubatko.2-temp/gao.957/DNA_alignment/bowtie2-2.3.4.2-linux-x86_64/reference/Simulation_Setting_LargeTree/SimulateData_EXP10_20Missing/Ternary_alpha0%s_beta0%s/ternary_obs_0_1_tip_alpha_0%s_beta_0%s_matrix%s.csv', alpha_str, beta_str, alpha_str, beta_str,indexn)
mat_obs_form_0_1_2 = read.csv(obs_form_0_1_2)
ternary_prob_matrix_all_0_1_all=c()
ternary_prob_matrix_01_0_1_all=c()
ternary_prob_matrix_02_0_1_all=c()
ternary_prob_matrix_012_0_1_all=c()
if (dim(mat_obs_form_0_1_2)[1]>0){
ternary_prob_matrix_all_0_1=c()
ternary_prob_matrix_01_0_1=c()
ternary_prob_matrix_02_0_1=c()
ternary_prob_matrix_012_0_1=c()
normal_genotype_0_1_2 = rep(0,dim(mat_obs_form_0_1_2)[1])
mutation_genotype_0_1_2 = rep(2,dim(mat_obs_form_0_1_2)[1])
initial_obs_0_1 = data.frame(mat_obs_form_0_1_2[,-c(1,2,3)])
for (i in 1:dim(initial_obs_0_1)[1]){
print(i)
#rd_unit_theta <- rbeta(10, (10^7)*unit_theta, (10^7)*(1-unit_theta))
#rd_unit_gamma <- rbeta(10, (10^14)*unit_gamma, (10^14)*(1-unit_gamma))
#rd_unit_mu <- rbeta(10, 100*unit_mu, 100*(1-unit_mu))
rd_unit_theta = rgamma(n = 3, shape = 100, scale = 0.01*unit_theta)
rd_unit_gamma = rgamma(3, shape = 100, scale = 0.01*unit_gamma)
rd_unit_mu = rgamma(3, shape = 100, scale = 0.01*unit_mu)
generate_prob_br_0_1_dat=data.frame(matrix(NA, nrow = number_br, ncol = 3))
generate_prob_br_0_2_dat=data.frame(matrix(NA, nrow = number_br, ncol = 3))
generate_prob_br_0_1_2_dat=data.frame(matrix(NA, nrow = number_br, ncol = 3))
generate_prob_br_all_dat=data.frame(matrix(NA, nrow = number_br, ncol = 3))
for (j in 1:3){
generate_prob_br <- generate_prob(sequencing_error_model,rd_unit_theta[j],rd_unit_gamma[j],rd_unit_mu[j],number_br,number_cell,
normal_genotype_0_1_2[i],mutation_genotype_0_1_2[i],initial_obs_0_1[i,],sampletr)
generate_prob_br_0_1_single <- c(generate_prob_br[,1],rep(0,number_br-dim(generate_prob_br)[1]))
generate_prob_br_0_2_single <- c(generate_prob_br[,2],rep(0,number_br-dim(generate_prob_br)[1]))
generate_prob_br_0_1_2_single <- c(generate_prob_br[,3],rep(0,number_br-dim(generate_prob_br)[1]))
generate_prob_br_all_single <- c(generate_prob_br[,4],rep(0,number_br-dim(generate_prob_br)[1]))
generate_prob_br_0_1_dat[,j] = generate_prob_br_0_1_single
generate_prob_br_0_2_dat[,j] = generate_prob_br_0_2_single
generate_prob_br_0_1_2_dat[,j] = generate_prob_br_0_1_2_single
generate_prob_br_all_dat[,j] = generate_prob_br_all_single
}
generate_prob_br_0_1=rowMeans(generate_prob_br_0_1_dat, na.rm = FALSE, dims = 1)
generate_prob_br_0_2=rowMeans(generate_prob_br_0_2_dat, na.rm = FALSE, dims = 1)
generate_prob_br_0_1_2=rowMeans(generate_prob_br_0_1_2_dat, na.rm = FALSE, dims = 1)
generate_prob_br_all=rowMeans(generate_prob_br_all_dat, na.rm = FALSE, dims = 1)
ternary_prob_matrix_all_0_1 = rbind(ternary_prob_matrix_all_0_1,generate_prob_br_all)
ternary_prob_matrix_01_0_1 = rbind(ternary_prob_matrix_01_0_1,generate_prob_br_0_1)
ternary_prob_matrix_02_0_1 = rbind(ternary_prob_matrix_02_0_1,generate_prob_br_0_2)
ternary_prob_matrix_012_0_1 = rbind(ternary_prob_matrix_012_0_1,generate_prob_br_0_1_2)
}
ternary_prob_matrix_all_0_1_rownames=cbind(data.frame(mat_obs_form_0_1_2[,c(2,3)]),data.frame(ternary_prob_matrix_all_0_1))
ternary_prob_matrix_01_0_1_rownames=cbind(data.frame(mat_obs_form_0_1_2[,c(2,3)]),data.frame(ternary_prob_matrix_01_0_1))
ternary_prob_matrix_02_0_1_rownames=cbind(data.frame(mat_obs_form_0_1_2[,c(2,3)]),data.frame(ternary_prob_matrix_02_0_1))
ternary_prob_matrix_012_0_1_rownames=cbind(data.frame(mat_obs_form_0_1_2[,c(2,3)]),data.frame(ternary_prob_matrix_012_0_1))
ternary_prob_matrix_all_0_1_all=rbind(ternary_prob_matrix_all_0_1_all,ternary_prob_matrix_all_0_1_rownames)
ternary_prob_matrix_01_0_1_all=rbind(ternary_prob_matrix_01_0_1_all,ternary_prob_matrix_01_0_1_rownames)
ternary_prob_matrix_02_0_1_all=rbind(ternary_prob_matrix_02_0_1_all,ternary_prob_matrix_02_0_1_rownames)
ternary_prob_matrix_012_0_1_all=rbind(ternary_prob_matrix_012_0_1_all,ternary_prob_matrix_012_0_1_rownames)
}
ternary_prob_matrix_all_0_1_2_out = sprintf('/fs/project/kubatko.2-temp/gao.957/DNA_alignment/bowtie2-2.3.4.2-linux-x86_64/reference/Simulation_Setting_LargeTree/SimulateData_EXP10_20Missing/Ternary_alpha0%s_beta0%s_result/ternary_prob_matrix_all_0_1_2_out_alpha_0%s_beta_0%s_matrix%s.csv', alpha_str, beta_str, alpha_str, beta_str,indexn)
ternary_prob_matrix_01_0_1_2_out = sprintf('/fs/project/kubatko.2-temp/gao.957/DNA_alignment/bowtie2-2.3.4.2-linux-x86_64/reference/Simulation_Setting_LargeTree/SimulateData_EXP10_20Missing/Ternary_alpha0%s_beta0%s_result/ternary_prob_matrix_01_0_1_2_out_alpha_0%s_beta_0%s_matrix%s.csv', alpha_str, beta_str, alpha_str, beta_str,indexn)
ternary_prob_matrix_02_0_1_2_out= sprintf('/fs/project/kubatko.2-temp/gao.957/DNA_alignment/bowtie2-2.3.4.2-linux-x86_64/reference/Simulation_Setting_LargeTree/SimulateData_EXP10_20Missing/Ternary_alpha0%s_beta0%s_result/ternary_prob_matrix_02_0_1_2_out_alpha_0%s_beta_0%s_matrix%s.csv', alpha_str, beta_str, alpha_str, beta_str,indexn)
ternary_prob_matrix_012_0_1_2_out= sprintf('/fs/project/kubatko.2-temp/gao.957/DNA_alignment/bowtie2-2.3.4.2-linux-x86_64/reference/Simulation_Setting_LargeTree/SimulateData_EXP10_20Missing/Ternary_alpha0%s_beta0%s_result/ternary_prob_matrix_012_0_1_2_out_alpha_0%s_beta_0%s_matrix%s.csv', alpha_str, beta_str, alpha_str, beta_str,indexn)
write.csv(ternary_prob_matrix_all_0_1_all,file=ternary_prob_matrix_all_0_1_2_out)
write.csv(ternary_prob_matrix_01_0_1_all,file=ternary_prob_matrix_01_0_1_2_out)
write.csv(ternary_prob_matrix_02_0_1_all,file=ternary_prob_matrix_02_0_1_2_out)
write.csv(ternary_prob_matrix_012_0_1_all,file=ternary_prob_matrix_012_0_1_2_out)
}
}
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{addLayers}
\alias{addLayers}
\title{addLayers}
\usage{
addLayers(OSMMap1, OSMMap2)
}
\arguments{
\item{OSMMap1}{a map object resulting from the call to OSMMap}
\item{OSMMap2}{another map object resulting from a call to OSMMap}
}
\description{
Take two maps and mesh them so that the markers are on different layers
}
\details{
OpenStreetMap Layer adding function
}
\examples{
\dontrun{
dualMap = (addLayers(OSMMap(df, size='size', color='color'),
linePlot))
}
}
\author{
Theo Van Rooy <theo@royaltyanalytics.com>
}
|
/man/addLayers.Rd
|
no_license
|
greentheo/OpenStreetMapR
|
R
| false
| false
| 576
|
rd
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{addLayers}
\alias{addLayers}
\title{addLayers}
\usage{
addLayers(OSMMap1, OSMMap2)
}
\arguments{
\item{OSMMap1}{a map object resulting from the call to OSMMap}
\item{OSMMap2}{another map object resulting from a call to OSMMap}
}
\description{
Take two maps and mesh them so that the markers are on different layers
}
\details{
OpenStreetMap Layer adding function
}
\examples{
\dontrun{
dualMap = (addLayers(OSMMap(df, size='size', color='color'),
linePlot))
}
}
\author{
Theo Van Rooy <theo@royaltyanalytics.com>
}
|
# Lesson 2: Working Directory
getwd() # Current working directory
setwd() # Set working directory
list.files() # Objects in current working directory
dir() # Also the same result
ls() # Objects in current workspace
args(list.files) # Arguments of the function
dir.create("testdir") # Creates a new directory
file.create("mytest.R") # Creates a new file
file.exists("mytest.R") # Checks the existence of a file
file.info("mytest.R") # ls -ltrh equivalent
file.rename("mytest.R", "mytest2.R") # Rename file
file.copy("mytest2.R", "mytest3.R") # Copy file
file.path("mytest3.R") # Relative path of file
file.path("folder1", "folder2") # Equivalent to os.path.join
dir.create(file.path("testdir2", "testdir3"), recursive = TRUE) # Recursively create missing directories
setwd(old.dir) # Revert to original directory
# Lesson 3: Sequences of Numbers
pi:10 # Step of 1, starting with the initial value, till the value does not become greater than the final value
15:1 # Last < First => Step = -1
seq(0, 10, by = 0.5) # Sequence is a generalization that allows step
my_seq <- seq(5, 10, length = 30) # Or even the number of values
seq(along.with = my_seq) # Another sequence of the same length as my_seq
seq_along(my_seq) # Same as above
rep(0, times = 40) # Repetitions of a number
rep(c(0, 1, 2), times = 10) # Repetitions of a vector
rep(c(0, 1, 2), each = 10) # Each values is repeated and then next value
# Lesson 4: Vectors
my_char <- c("My", "name", "is")
paste(my_char, collapse = " ") # " ".join([...]) Equivalent
my_name <- c(my_char, "xyz")
paste(1:3, c("X","Y","Z"), sep = "") # Paste can be used for multiple vectors
paste(LETTERS, 1:4, sep = "-") # LETTERS is a keyword character vector containing a:z
# Lesson 5: Missing Values
y <- rnorm(1000) # 1000 values sampled from the random normal distribution
my_data <- sample(c(y, z), 100) # Sampling a fixed number of values from a vector
# Lession 6: Subsetting Vectors
# Index vectors come in four different flavors -- logical vectors, vectors of positive integers, vectors of negative integers, and vectors of character strings
# NA > 0 evaluates to NA
x[c(-2, -10)] # All values except the 2nd and the 10th
x[-c(2, 10)] # A shorter way of the same
vect <- c(foo = 11, bar = 2, norf = NA)
vect2 <- c(11,2,NA)
names(vect2) <- c("foo", "bar", "norf")
identical(vect, vect2) # Checks for identical values
# Lesson 7: Matrices and DataFrames
my_vector <- 1:20
length(my_vector)
dim(my_vector) <- c(4,5)
|
/course-2/week_1/swirl.R
|
no_license
|
storyteller-aditya/datasciencecoursera
|
R
| false
| false
| 2,463
|
r
|
# Lesson 2: Working Directory
getwd() # Current working directory
setwd() # Set working directory
list.files() # Objects in current working directory
dir() # Also the same result
ls() # Objects in current workspace
args(list.files) # Arguments of the function
dir.create("testdir") # Creates a new directory
file.create("mytest.R") # Creates a new file
file.exists("mytest.R") # Checks the existence of a file
file.info("mytest.R") # ls -ltrh equivalent
file.rename("mytest.R", "mytest2.R") # Rename file
file.copy("mytest2.R", "mytest3.R") # Copy file
file.path("mytest3.R") # Relative path of file
file.path("folder1", "folder2") # Equivalent to os.path.join
dir.create(file.path("testdir2", "testdir3"), recursive = TRUE) # Recursively create missing directories
setwd(old.dir) # Revert to original directory
# Lesson 3: Sequences of Numbers
pi:10 # Step of 1, starting with the initial value, till the value does not become greater than the final value
15:1 # Last < First => Step = -1
seq(0, 10, by = 0.5) # Sequence is a generalization that allows step
my_seq <- seq(5, 10, length = 30) # Or even the number of values
seq(along.with = my_seq) # Another sequence of the same length as my_seq
seq_along(my_seq) # Same as above
rep(0, times = 40) # Repetitions of a number
rep(c(0, 1, 2), times = 10) # Repetitions of a vector
rep(c(0, 1, 2), each = 10) # Each values is repeated and then next value
# Lesson 4: Vectors
my_char <- c("My", "name", "is")
paste(my_char, collapse = " ") # " ".join([...]) Equivalent
my_name <- c(my_char, "xyz")
paste(1:3, c("X","Y","Z"), sep = "") # Paste can be used for multiple vectors
paste(LETTERS, 1:4, sep = "-") # LETTERS is a keyword character vector containing a:z
# Lesson 5: Missing Values
y <- rnorm(1000) # 1000 values sampled from the random normal distribution
my_data <- sample(c(y, z), 100) # Sampling a fixed number of values from a vector
# Lession 6: Subsetting Vectors
# Index vectors come in four different flavors -- logical vectors, vectors of positive integers, vectors of negative integers, and vectors of character strings
# NA > 0 evaluates to NA
x[c(-2, -10)] # All values except the 2nd and the 10th
x[-c(2, 10)] # A shorter way of the same
vect <- c(foo = 11, bar = 2, norf = NA)
vect2 <- c(11,2,NA)
names(vect2) <- c("foo", "bar", "norf")
identical(vect, vect2) # Checks for identical values
# Lesson 7: Matrices and DataFrames
my_vector <- 1:20
length(my_vector)
dim(my_vector) <- c(4,5)
|
if(!require(checkpoint)){
install.packages("checkpoint")
library(checkpoint)
}
checkpoint("2016-01-13")
if(!(require(readr) & require(ggplot2) & require(tidyr) & require(plyr) & require(dplyr) & require(rstan) & require(rstudioapi) & require(codetools) & require(readxl))){
install.packages(c("rstan", "ggplot2", "tidyr", "dplyr", "plyr", "readr", "rstudioapi", "codetools", "readxl"))
library(rstan)
library(readr)
library(ggplot2)
library(plyr)
library(tidyr)
library(dplyr)
library(rstudioapi)
library(codetools)
library(readxl)
}
rstan_options(auto_write = TRUE)
options(mc.cores = parallel::detectCores())
HDI <- function(Values, Interval = 0.95){
Values <- Values[!is.na(Values)]
intSize <- as.integer(length(Values) * Interval)
startMax <- as.integer(length(Values) - (length(Values) * Interval))
ordered <- Values[sort.list(Values)]
low <- 1
diff <- Inf
for(i in 1:startMax)
if(ordered[i + intSize] - ordered[i] < diff){
low <- i
diff <- ordered[i + intSize] - ordered[i]
}
return(data.frame(LowerHDI = ordered[low], HigherHDI = ordered[low + intSize]))
}
unkn <- read_csv("Data.csv") %>%
filter(Plate != "Plate 12") %>%
mutate(uID = as.numeric(factor(Samp)),
pID = as.numeric(factor(Plate))) %>%
arrange(uID, Dilution) %>%
mutate(Std = Samp == "std")
ser_dilutions <- unkn %>%
mutate(dID = as.numeric(factor(paste(Plate, Samp, Dilution, sep = "_")))) %>%
distinct(dID, Dilution) %>%
arrange(dID) %>%
.$Dilution
unkn <- mutate(unkn, dID = as.numeric(factor(paste(Plate, Samp, Dilution, sep = "_"))))
# Plot of each plate's standard and corresponding unknowns
mutate(unkn, Conc = 4500 * Dilution) %>%
ggplot(aes(x = Conc, y = OD, colour = Std, group = uID)) +
geom_point(alpha = 0.4) +
stat_summary(aes(fun.data = "mean"), geom = "line") +
scale_x_log10() +
theme_bw() +
facet_wrap(~Plate, ncol = 3)
mutate(unkn, Conc = 4500 * Dilution) %>%
filter(Std == TRUE) %>%
ggplot(aes(x = Conc, y = OD, colour = Plate)) +
geom_point(alpha = 0.4) +
stat_summary(aes(fun.data = "mean"), geom = "line") +
scale_x_log10() +
theme_bw()
initial <- function(N_dil, N_plates, N_grp){
inits <- list(std_raw = rnorm(1, 0, 1),
sigma_y = abs(rnorm(1, 0, 1)),
Bottom = abs(rnorm(N_plates, 0.04, 0.02)),
Span = rnorm(N_plates, 3.5, 0.1),
log_Inflec = rnorm(N_plates, 0, 1),
Slope = abs(rnorm(N_plates, 1, 0.5)),
log_theta = runif(N_grp - 1, -5, 6),
sigma_x = rexp(1, 1),
sigma_OD = abs(rnorm(1, 0, 0.2)))
return(inits)
}
# Run the model
mod <- stan_model("logistic_OD_4p_UnknOnly.stan")
sep <- lapply(1:18, function(i){
print(i)
df <- filter(unkn, pID == i) %>%
mutate(uID = as.numeric(factor(Samp)),
pID = as.numeric(factor(Plate)),
dID = as.numeric(factor(paste(Samp, format(Dilution, scientific = F), sep = "_")))) %>%
arrange(dID, uID, Dilution) %>%
mutate(Std = Samp == "std")
inits <- lapply(1:4, function(x) initial(max(df$dID), 1, max(df$uID)))
ser_dilutions <- df %>%
mutate(dID = as.numeric(factor(paste(Samp, format(Dilution, scientific = F), sep = "_")))) %>%
distinct(dID, Dilution) %>%
arrange(dID) %>%
.$Dilution
s_df <- df %>%
distinct(dID, uID)
res <- sampling(mod,
data = list(N = nrow(df),
N_grp = max(s_df$uID),
N_grp_dil = max(df$dID),
dil_ID = df$dID,
uID = s_df$uID,
meas_OD = df$OD,
dilution = ser_dilutions,
mu_Std = 4500,
sigma_std = 200),
init = inits, chains = 4,
iter = 2000, warmup = 500, refresh = 50, control = list(adapt_delta = 0.99))
return(res)
})
check_divergent <- function(stan_res){
np <- nuts_params(stan_res)
divergences <- filter(np, Parameter == "divergent__", Iteration > 500)
number <- sum(divergences$Value)
return(number)
}
Divergences <- sapply(1:6, function(x) check_divergent(sep[[x]]))
mutate(unkn, Conc = 4500 * Dilution,
Div = Divergences[pID] != 0) %>%
filter(Std == TRUE) %>%
ggplot(aes(x = Conc, y = OD, colour = Div, group = Plate)) +
geom_point(alpha = 0.4) +
stat_summary(aes(fun.data = "mean"), geom = "line") +
scale_x_log10() +
theme_bw()
output <- unkn
out_sep <- bind_rows(lapply(1:18, function(i){
cat(i)
df <- filter(unkn, pID == i) %>%
mutate(uID = as.numeric(factor(Samp)),
pID = as.numeric(factor(Plate)),
dID = as.numeric(factor(paste(Samp, format(Dilution, scientific = F), sep = "_")))) %>%
arrange(dID, uID, Dilution) %>%
mutate(Std = Samp == "std")
cOD <- exp(rstan::extract(sep[[i]], "log_x")$log_x)
df$Median <- rep(apply(cOD, 2, median), each = 2)
errors <- bind_rows(apply(cOD, 2, HDI))
df$TopHDI <- rep(errors$HigherHDI, each = 2)
df$LowHDI <- rep(errors$LowerHDI, each = 2)
df$Conc <- df$Median
return(df)
}))
out_sep <- mutate(out_sep, uID = as.numeric(factor(Samp)))
ggplot(out_sep, aes(Conc, OD)) +
scale_x_log10(breaks = 10^seq(floor(log10(min(out_sep$Conc))), ceiling(log10(max(out_sep$Conc))), by = 1)) +
coord_cartesian(xlim = c(1e-4, 35), ylim = c(0, 4)) +
#geom_point(aes(colour = factor(uID))) +
geom_text(aes(label = Samp, colour = factor(uID))) +
geom_errorbarh(aes(xmin = LowHDI, xmax = TopHDI, colour = factor(uID))) +
scale_colour_discrete(guide = "none") +
facet_wrap(~Plate, ncol = 3)
out_sep <- bind_rows(lapply(1:18, function(i){
print(i)
out <- unkn %>%
filter(Samp != "std", pID == i) %>%
mutate(uID = as.numeric(factor(Samp)),
pID = as.numeric(factor(Plate))) %>%
arrange(uID) %>%
distinct(uID, .keep_all = T) %>%
separate(Samp, c("Group", "Samp"), sep = "-") %>%
separate(Samp, c("Unit", "Week"), sep = "_") %>%
mutate(Week = as.numeric(Week))
theta <- exp(rstan::extract(sep[[i]], "log_theta")$log_theta)
out$Conc <- apply(theta, 2, median)
errors <- bind_rows(apply(theta, 2, HDI))
out$TopHDI <- errors$HigherHDI
out$LowHDI <- errors$LowerHDI
return(out)
}))
out_sep <- mutate(out_sep, pID = as.numeric(factor(Plate)))
ggplot(out_sep, aes(Week, Conc, colour = Group, fill = Unit)) +
geom_pointrange(aes(ymin = LowHDI, ymax = TopHDI, shape = Unit)) +
geom_line() +
scale_y_log10(breaks = 10^seq(-12, 4)) +
annotation_logticks(sides = "l") +
coord_cartesian(ylim = c(0.1, 2e4)) +
xlim(0, NA) +
#facet_wrap(~Group) +
theme_bw()
for(i in 1:18){
if(Divergences[i]){
filter(out_sep, pID == i) %>%
print
}
}
ser_dilutions <- unkn %>%
.$Dilution
inits <- lapply(1:4, function(x) initial(nrow(unkn), max(unkn$pID), max(unkn$uID)))
unkn <- unkn %>%
mutate(uID = as.numeric(factor(Samp)),
pID = as.numeric(factor(Plate)),
dID = as.numeric(factor(paste(Plate, Samp, format(Dilution, scientific = F), sep = "_")))) %>%
arrange(dID, Dilution) %>%
mutate(Std = Samp == "std")
ser_dilutions <- unkn %>%
mutate(dID = as.numeric(factor(paste(Plate, Samp, format(Dilution, scientific = F), sep = "_")))) %>%
distinct(dID, Dilution) %>%
arrange(dID) %>%
.$Dilution
dil_unkn <- unkn %>%
distinct(uID, pID, dID, Dilution)
mod <- stan_model("logistic_X_4p_Jplate.stan")
res2 <- sampling(mod,
data = list(N = nrow(unkn),
N_grp = max(unkn$uID),
N_grp_dil = max(unkn$dID),
uID = dil_unkn$uID,
dil_ID = unkn$dID,
meas_OD = unkn$OD,
dilution = ser_dilutions,
mu_Std = 4500,
sigma_std = 200,
J = max(unkn$pID),
pID = dil_unkn$pID),
init = inits, chains = 4,diagnostic_file = "dia",
iter = 2000, warmup = 500, refresh = 50, control = list(adapt_delta = 0.95))
stan_ess(res2)
stan_ac(res2)
stan_rhat(res2)
# Look at the curves and the unknowns, the standards are shown in red all other colors are unknowns
output <- unkn
cOD <- exp(rstan::extract(res2, "log_x")$log_x)
output$Median <- apply(cOD, 2, median)[output$dID]
errors <- bind_rows(apply(cOD, 2, HDI))
output$TopHDI <- errors$HigherHDI[output$dID]
output$LowHDI <- errors$LowerHDI[output$dID]
output$Conc <- output$Median
ggplot(output, aes(Conc, OD)) +
scale_x_log10(breaks = 10^seq(floor(log10(min(output$Conc))), ceiling(log10(max(output$Conc))), by = 1)) +
coord_cartesian(xlim = c(1e-4, 35), ylim = c(0, 4)) +
#geom_point(aes(colour = factor(uID))) +
geom_text(aes(label = Samp, colour = factor(uID))) +
geom_errorbarh(aes(xmin = LowHDI, xmax = TopHDI, colour = factor(uID))) +
scale_colour_discrete(guide = "none") +
facet_wrap(~Plate, ncol = 3)
# Plotting the output data (theta) as it is meant to
out <- unkn %>%
filter(Samp != "std") %>%
arrange(uID) %>%
group_by(uID) %>%
top_n(1, OD) %>%
ungroup %>%
separate(Samp, c("Group", "Samp"), sep = "-") %>%
separate(Samp, c("Unit", "Day"), sep = "_") %>%
mutate(Day = as.numeric(Day))
theta <- exp(rstan::extract(res2, "log_theta")$log_theta)
out$Conc <- apply(theta, 2, median)[out$uID]
errors <- bind_rows(apply(theta, 2, HDI))
out$TopHDI <- errors$HigherHDI[out$uID]
out$LowHDI <- errors$LowerHDI[out$uID]
ggplot(out, aes(Day, Conc, colour = Group, fill = Unit)) +
geom_pointrange(aes(ymin = LowHDI, ymax = TopHDI, shape = Unit)) +
geom_line() +
scale_y_log10(breaks = 10^seq(-12, 4)) +
annotation_logticks(sides = "l") +
coord_cartesian(ylim = c(0.0001, 5e3)) +
xlim(0, NA) +
theme_bw()
|
/RunModel_MultiPlate_4p.R
|
no_license
|
jonaudet/Logistic_ELISA_std
|
R
| false
| false
| 9,810
|
r
|
if(!require(checkpoint)){
install.packages("checkpoint")
library(checkpoint)
}
checkpoint("2016-01-13")
if(!(require(readr) & require(ggplot2) & require(tidyr) & require(plyr) & require(dplyr) & require(rstan) & require(rstudioapi) & require(codetools) & require(readxl))){
install.packages(c("rstan", "ggplot2", "tidyr", "dplyr", "plyr", "readr", "rstudioapi", "codetools", "readxl"))
library(rstan)
library(readr)
library(ggplot2)
library(plyr)
library(tidyr)
library(dplyr)
library(rstudioapi)
library(codetools)
library(readxl)
}
rstan_options(auto_write = TRUE)
options(mc.cores = parallel::detectCores())
HDI <- function(Values, Interval = 0.95){
Values <- Values[!is.na(Values)]
intSize <- as.integer(length(Values) * Interval)
startMax <- as.integer(length(Values) - (length(Values) * Interval))
ordered <- Values[sort.list(Values)]
low <- 1
diff <- Inf
for(i in 1:startMax)
if(ordered[i + intSize] - ordered[i] < diff){
low <- i
diff <- ordered[i + intSize] - ordered[i]
}
return(data.frame(LowerHDI = ordered[low], HigherHDI = ordered[low + intSize]))
}
unkn <- read_csv("Data.csv") %>%
filter(Plate != "Plate 12") %>%
mutate(uID = as.numeric(factor(Samp)),
pID = as.numeric(factor(Plate))) %>%
arrange(uID, Dilution) %>%
mutate(Std = Samp == "std")
ser_dilutions <- unkn %>%
mutate(dID = as.numeric(factor(paste(Plate, Samp, Dilution, sep = "_")))) %>%
distinct(dID, Dilution) %>%
arrange(dID) %>%
.$Dilution
unkn <- mutate(unkn, dID = as.numeric(factor(paste(Plate, Samp, Dilution, sep = "_"))))
# Plot of each plate's standard and corresponding unknowns
mutate(unkn, Conc = 4500 * Dilution) %>%
ggplot(aes(x = Conc, y = OD, colour = Std, group = uID)) +
geom_point(alpha = 0.4) +
stat_summary(aes(fun.data = "mean"), geom = "line") +
scale_x_log10() +
theme_bw() +
facet_wrap(~Plate, ncol = 3)
mutate(unkn, Conc = 4500 * Dilution) %>%
filter(Std == TRUE) %>%
ggplot(aes(x = Conc, y = OD, colour = Plate)) +
geom_point(alpha = 0.4) +
stat_summary(aes(fun.data = "mean"), geom = "line") +
scale_x_log10() +
theme_bw()
initial <- function(N_dil, N_plates, N_grp){
inits <- list(std_raw = rnorm(1, 0, 1),
sigma_y = abs(rnorm(1, 0, 1)),
Bottom = abs(rnorm(N_plates, 0.04, 0.02)),
Span = rnorm(N_plates, 3.5, 0.1),
log_Inflec = rnorm(N_plates, 0, 1),
Slope = abs(rnorm(N_plates, 1, 0.5)),
log_theta = runif(N_grp - 1, -5, 6),
sigma_x = rexp(1, 1),
sigma_OD = abs(rnorm(1, 0, 0.2)))
return(inits)
}
# Run the model
mod <- stan_model("logistic_OD_4p_UnknOnly.stan")
sep <- lapply(1:18, function(i){
print(i)
df <- filter(unkn, pID == i) %>%
mutate(uID = as.numeric(factor(Samp)),
pID = as.numeric(factor(Plate)),
dID = as.numeric(factor(paste(Samp, format(Dilution, scientific = F), sep = "_")))) %>%
arrange(dID, uID, Dilution) %>%
mutate(Std = Samp == "std")
inits <- lapply(1:4, function(x) initial(max(df$dID), 1, max(df$uID)))
ser_dilutions <- df %>%
mutate(dID = as.numeric(factor(paste(Samp, format(Dilution, scientific = F), sep = "_")))) %>%
distinct(dID, Dilution) %>%
arrange(dID) %>%
.$Dilution
s_df <- df %>%
distinct(dID, uID)
res <- sampling(mod,
data = list(N = nrow(df),
N_grp = max(s_df$uID),
N_grp_dil = max(df$dID),
dil_ID = df$dID,
uID = s_df$uID,
meas_OD = df$OD,
dilution = ser_dilutions,
mu_Std = 4500,
sigma_std = 200),
init = inits, chains = 4,
iter = 2000, warmup = 500, refresh = 50, control = list(adapt_delta = 0.99))
return(res)
})
check_divergent <- function(stan_res){
np <- nuts_params(stan_res)
divergences <- filter(np, Parameter == "divergent__", Iteration > 500)
number <- sum(divergences$Value)
return(number)
}
Divergences <- sapply(1:6, function(x) check_divergent(sep[[x]]))
mutate(unkn, Conc = 4500 * Dilution,
Div = Divergences[pID] != 0) %>%
filter(Std == TRUE) %>%
ggplot(aes(x = Conc, y = OD, colour = Div, group = Plate)) +
geom_point(alpha = 0.4) +
stat_summary(aes(fun.data = "mean"), geom = "line") +
scale_x_log10() +
theme_bw()
output <- unkn
out_sep <- bind_rows(lapply(1:18, function(i){
cat(i)
df <- filter(unkn, pID == i) %>%
mutate(uID = as.numeric(factor(Samp)),
pID = as.numeric(factor(Plate)),
dID = as.numeric(factor(paste(Samp, format(Dilution, scientific = F), sep = "_")))) %>%
arrange(dID, uID, Dilution) %>%
mutate(Std = Samp == "std")
cOD <- exp(rstan::extract(sep[[i]], "log_x")$log_x)
df$Median <- rep(apply(cOD, 2, median), each = 2)
errors <- bind_rows(apply(cOD, 2, HDI))
df$TopHDI <- rep(errors$HigherHDI, each = 2)
df$LowHDI <- rep(errors$LowerHDI, each = 2)
df$Conc <- df$Median
return(df)
}))
out_sep <- mutate(out_sep, uID = as.numeric(factor(Samp)))
ggplot(out_sep, aes(Conc, OD)) +
scale_x_log10(breaks = 10^seq(floor(log10(min(out_sep$Conc))), ceiling(log10(max(out_sep$Conc))), by = 1)) +
coord_cartesian(xlim = c(1e-4, 35), ylim = c(0, 4)) +
#geom_point(aes(colour = factor(uID))) +
geom_text(aes(label = Samp, colour = factor(uID))) +
geom_errorbarh(aes(xmin = LowHDI, xmax = TopHDI, colour = factor(uID))) +
scale_colour_discrete(guide = "none") +
facet_wrap(~Plate, ncol = 3)
out_sep <- bind_rows(lapply(1:18, function(i){
print(i)
out <- unkn %>%
filter(Samp != "std", pID == i) %>%
mutate(uID = as.numeric(factor(Samp)),
pID = as.numeric(factor(Plate))) %>%
arrange(uID) %>%
distinct(uID, .keep_all = T) %>%
separate(Samp, c("Group", "Samp"), sep = "-") %>%
separate(Samp, c("Unit", "Week"), sep = "_") %>%
mutate(Week = as.numeric(Week))
theta <- exp(rstan::extract(sep[[i]], "log_theta")$log_theta)
out$Conc <- apply(theta, 2, median)
errors <- bind_rows(apply(theta, 2, HDI))
out$TopHDI <- errors$HigherHDI
out$LowHDI <- errors$LowerHDI
return(out)
}))
out_sep <- mutate(out_sep, pID = as.numeric(factor(Plate)))
ggplot(out_sep, aes(Week, Conc, colour = Group, fill = Unit)) +
geom_pointrange(aes(ymin = LowHDI, ymax = TopHDI, shape = Unit)) +
geom_line() +
scale_y_log10(breaks = 10^seq(-12, 4)) +
annotation_logticks(sides = "l") +
coord_cartesian(ylim = c(0.1, 2e4)) +
xlim(0, NA) +
#facet_wrap(~Group) +
theme_bw()
for(i in 1:18){
if(Divergences[i]){
filter(out_sep, pID == i) %>%
print
}
}
ser_dilutions <- unkn %>%
.$Dilution
inits <- lapply(1:4, function(x) initial(nrow(unkn), max(unkn$pID), max(unkn$uID)))
unkn <- unkn %>%
mutate(uID = as.numeric(factor(Samp)),
pID = as.numeric(factor(Plate)),
dID = as.numeric(factor(paste(Plate, Samp, format(Dilution, scientific = F), sep = "_")))) %>%
arrange(dID, Dilution) %>%
mutate(Std = Samp == "std")
ser_dilutions <- unkn %>%
mutate(dID = as.numeric(factor(paste(Plate, Samp, format(Dilution, scientific = F), sep = "_")))) %>%
distinct(dID, Dilution) %>%
arrange(dID) %>%
.$Dilution
dil_unkn <- unkn %>%
distinct(uID, pID, dID, Dilution)
mod <- stan_model("logistic_X_4p_Jplate.stan")
res2 <- sampling(mod,
data = list(N = nrow(unkn),
N_grp = max(unkn$uID),
N_grp_dil = max(unkn$dID),
uID = dil_unkn$uID,
dil_ID = unkn$dID,
meas_OD = unkn$OD,
dilution = ser_dilutions,
mu_Std = 4500,
sigma_std = 200,
J = max(unkn$pID),
pID = dil_unkn$pID),
init = inits, chains = 4,diagnostic_file = "dia",
iter = 2000, warmup = 500, refresh = 50, control = list(adapt_delta = 0.95))
stan_ess(res2)
stan_ac(res2)
stan_rhat(res2)
# Look at the curves and the unknowns, the standards are shown in red all other colors are unknowns
output <- unkn
cOD <- exp(rstan::extract(res2, "log_x")$log_x)
output$Median <- apply(cOD, 2, median)[output$dID]
errors <- bind_rows(apply(cOD, 2, HDI))
output$TopHDI <- errors$HigherHDI[output$dID]
output$LowHDI <- errors$LowerHDI[output$dID]
output$Conc <- output$Median
ggplot(output, aes(Conc, OD)) +
scale_x_log10(breaks = 10^seq(floor(log10(min(output$Conc))), ceiling(log10(max(output$Conc))), by = 1)) +
coord_cartesian(xlim = c(1e-4, 35), ylim = c(0, 4)) +
#geom_point(aes(colour = factor(uID))) +
geom_text(aes(label = Samp, colour = factor(uID))) +
geom_errorbarh(aes(xmin = LowHDI, xmax = TopHDI, colour = factor(uID))) +
scale_colour_discrete(guide = "none") +
facet_wrap(~Plate, ncol = 3)
# Plotting the output data (theta) as it is meant to
out <- unkn %>%
filter(Samp != "std") %>%
arrange(uID) %>%
group_by(uID) %>%
top_n(1, OD) %>%
ungroup %>%
separate(Samp, c("Group", "Samp"), sep = "-") %>%
separate(Samp, c("Unit", "Day"), sep = "_") %>%
mutate(Day = as.numeric(Day))
theta <- exp(rstan::extract(res2, "log_theta")$log_theta)
out$Conc <- apply(theta, 2, median)[out$uID]
errors <- bind_rows(apply(theta, 2, HDI))
out$TopHDI <- errors$HigherHDI[out$uID]
out$LowHDI <- errors$LowerHDI[out$uID]
ggplot(out, aes(Day, Conc, colour = Group, fill = Unit)) +
geom_pointrange(aes(ymin = LowHDI, ymax = TopHDI, shape = Unit)) +
geom_line() +
scale_y_log10(breaks = 10^seq(-12, 4)) +
annotation_logticks(sides = "l") +
coord_cartesian(ylim = c(0.0001, 5e3)) +
xlim(0, NA) +
theme_bw()
|
CAAElementsPlanetaryOrbit_NeptuneLongitudePerihelion <-
function(JD){
.Call("CAAElementsPlanetaryOrbit_NeptuneLongitudePerihelion", JD)
}
|
/R/CAAElementsPlanetaryOrbit_NeptuneLongitudePerihelion.R
|
no_license
|
helixcn/skycalc
|
R
| false
| false
| 142
|
r
|
CAAElementsPlanetaryOrbit_NeptuneLongitudePerihelion <-
function(JD){
.Call("CAAElementsPlanetaryOrbit_NeptuneLongitudePerihelion", JD)
}
|
library(glmnet)
mydata = read.table("./TrainingSet/RF/lymphoid.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.6,family="gaussian",standardize=FALSE)
sink('./Model/EN/Classifier/lymphoid/lymphoid_066.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
/Model/EN/Classifier/lymphoid/lymphoid_066.R
|
no_license
|
leon1003/QSMART
|
R
| false
| false
| 356
|
r
|
library(glmnet)
mydata = read.table("./TrainingSet/RF/lymphoid.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.6,family="gaussian",standardize=FALSE)
sink('./Model/EN/Classifier/lymphoid/lymphoid_066.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
AppendPlotList <- function(item)
{
if( .GlobalEnv$counter == .GlobalEnv$size )
{
length(.GlobalEnv$plotlist) <- .GlobalEnv$size <- .GlobalEnv$Size * 2
}
.GlobalEnv$counter <- .GlobalEnv$counter + 1
.GlobalEnv$plotlist[[.GlobalEnv$counter]] <- item
}
|
/Week 1 - Clustering/AddItemToList.R
|
no_license
|
Merlijn-van-Breugel/Machine-Learning
|
R
| false
| false
| 287
|
r
|
AppendPlotList <- function(item)
{
if( .GlobalEnv$counter == .GlobalEnv$size )
{
length(.GlobalEnv$plotlist) <- .GlobalEnv$size <- .GlobalEnv$Size * 2
}
.GlobalEnv$counter <- .GlobalEnv$counter + 1
.GlobalEnv$plotlist[[.GlobalEnv$counter]] <- item
}
|
#' @name .TVPBVAR_linear_wrapper
#' @noRd
#' @importFrom abind adrop
#' @importFrom utils capture.output
.TVPBVAR_linear_wrapper <- function(Yraw, prior, plag, draws, burnin, cons, trend, SV, thin, default_hyperpara, Ex, applyfun, cores, eigen, trim){
class(Yraw) <- "numeric"
prior_in <- prior
if(default_hyperpara[["a_log"]]) default_hyperpara["a_start"] <- 1/log(ncol(Yraw))
if(prior=="TVP" || prior=="TVP-NG"){
prior_in <- ifelse(prior=="TVP",1,2)
post_draws <- applyfun(1:ncol(Yraw), function(nr){
.TVPBVAR_noncentered_R(nr=nr,Y_in=Yraw,p_in=plag,draws_in=draws,burnin_in=burnin,cons_in=cons,trend_in=trend,sv_in=SV,thin_in=thin,prior_in=prior_in,hyperparam_in=default_hyperpara,Ex_in=Ex)
})
tvpbvar <- .var_posterior(post_draws, prior, draws/thin, applyfun, cores)
}else if(prior=="TTVP"){
prior_in <- 3
post_draws <- applyfun(1:ncol(Yraw), function(nr){
.TVPBVAR_centered_R(nr=nr,Y_in=Yraw,p_in=plag,draws_in=draws,burnin_in=burnin,cons_in=cons,trend_in=trend,sv_in=SV,thin_in=thin,prior_in=prior_in,hyperparam_in=default_hyperpara,Ex_in=Ex)
})
tvpbvar <- .var_posterior(post_draws, prior, draws/thin, applyfun, cores)
}
#------------------------------------------------ get data ----------------------------------------#
Y <- tvpbvar$Y; colnames(Y) <- colnames(Yraw); X <- tvpbvar$X
M <- ncol(Y); bigT <- nrow(Y); K <- ncol(X)
if(!is.null(Ex)) Mex <- ncol(Ex)
names <- colnames(Yraw)
if(is.null(names)) names <- rep("Y",M)
xnames <-NULL
for(ii in 1:plag) xnames <- c(xnames,paste0(names,".lag",ii))
if(!is.null(Ex)) enames <- c(enames,paste(rep("Tex",Mex))) else enames <- NULL
if(cons) cnames <- "cons" else cnames <- NULL
if(trend) tnames <- "trend" else tnames <- NULL
colnames(X) <- c(xnames,enames,cnames,tnames)
#-----------------------------------------get containers ------------------------------------------#
A_store <- tvpbvar$A_store; dimnames(A_store)[[2]] <- paste("t",seq(1,bigT),sep="."); dimnames(A_store)[[3]] <- colnames(X); dimnames(A_store)[[4]] <- colnames(Y)
# splitting up stores
dims <- dimnames(A_store)[[3]]
a0_store <- a1_store <- Ex_store <- Phi_store <- NULL
if(cons) a0_store <- A_store[,,which(dims%in%cnames),]
if(trend) a1_store <- A_store[,,which(dims%in%tnames),]
if(!is.null(Ex)) Ex_store <- A_store[,which(dims%in%enames),,drop=FALSE]
for(jj in 1:plag) {
xnames.jj <- xnames[grepl(paste0("lag",jj),xnames)]
Phi_store[[jj]] <- A_store[,,which(dims%in%xnames.jj),]
dimnames(Phi_store[[jj]]) <- list(NULL,paste("t",seq(1,bigT),sep="."),xnames.jj,names)
}
L_store <- tvpbvar$L_store
S_store <- tvpbvar$S_store
Smed_store <- tvpbvar$Smed_store
vola_store <- tvpbvar$Sv_store; dimnames(vola_store) <- list(NULL,NULL,colnames(Y))
if(SV){
pars_store <- tvpbvar$pars_store; dimnames(pars_store) <- list(NULL,c("mu","phi","sigma","latent0"),colnames(Y))
}else pars_store <- NULL
res_store <- tvpbvar$res_store; dimnames(res_store) <- list(NULL,NULL,colnames(Y))
# NG
if(prior=="TVP"){
thetasqrt_store<- tvpbvar$thetasqrt_store
Lthetasqrt_store<-tvpbvar$Lthetasqrt_store
tau2_store<-xi2_store<-lambda2_store<-kappa2_store<-a_tau_store<-a_xi_store<-Ltau2_store<-Lxi2_store <- NULL
D_store<-Omega_store<-thrsh_store<-kappa_store<-V0_store<-LD_store<-LOmega_store<-Lthrsh_store<-LV0_store <- NULL
}else if(prior=="TVP-NG"){
D_store<-Omega_store<-thrsh_store<-kappa_store<-V0_store<-LD_store<-LOmega_store<-Lthrsh_store<-LV0_store<-NULL
thetasqrt_store <- tvpbvar$thetasqrt_store
tau2_store <- tvpbvar$tau2_store
xi2_store <- tvpbvar$xi2_store
lambda2_store <- tvpbvar$lambda2_store
kappa2_store <- tvpbvar$kappa2_store
a_tau_store <- tvpbvar$a_tau_store
a_xi_store <- tvpbvar$a_xi_store
Lthetasqrt_store<-tvpbvar$Lthetasqrt_store
Ltau2_store <- tvpbvar$Ltau2_store
Lxi2_store <- tvpbvar$Lxi2_store
}else if(prior=="TTVP"){
thetasqrt_store<-Lthetasqrt_store<-tau2_store<-xi2_store<-lambda2_store<-kappa2_store<-a_tau_store<-a_xi_store<-Ltau2_store<-Lxi2_store<-NULL
D_store <- tvpbvar$D_store
Omega_store <- tvpbvar$Omega_store
thrsh_store <- tvpbvar$thrsh_store
kappa_store <- tvpbvar$kappa_store
V0_store <- tvpbvar$V0_store
LD_store <- tvpbvar$LD_store
LOmega_store <- tvpbvar$LOmega_store
Lthrsh_store <- tvpbvar$Lthrsh_store
LV0_store <- tvpbvar$LV0_store
}
if(eigen){
# check medians: could be done more carefully
A.eigen <- unlist(applyfun(1:(draws/thin),function(irep){
Cm <- .gen_compMat(apply(A_store[irep,,,],c(2,3),median),ncol(Yraw),plag)$Cm
return(max(abs(Re(eigen(Cm)$values))))
}))
trim_eigen <- which(A.eigen<trim)
if(length(trim_eigen)==0) stop("No stable draws found. Either increase number of draws or trimming factor.")
A_store<-A_store[trim_eigen,,,,drop=FALSE]
if(cons) a0_store <- a0_store[trim_eigen,,,drop=FALSE]
if(trend) a1_store <- a1_store[trim_eigen,,,drop=FALSE]
if(!is.null(Ex)) Ex_store <- Ex_store[trim_eigen,,,,drop=FALSE]
Phi_store<-lapply(Phi_store,function(l)l[trim_eigen,,,,drop=FALSE])
L_store<-L_store[trim_eigen,,,,drop=FALSE]
S_store<-S_store[trim_eigen,,,,drop=FALSE]
Smed_store<-Smed_store[trim_eigen,,,drop=FALSE]
vola_store<-vola_store[trim_eigen,,,drop=FALSE]
if(SV) pars_store<-pars_store[trim_eigen,,,drop=FALSE]
res_store<-res_store[trim_eigen,,,drop=FALSE]
if(prior=="TVP"){
thetasqrt_store<-thetasqrt_store[trim_eigen,,,drop=FALSE]
Lthetasqrt_store<-lapply(Lthetasqrt_store,function(l)l[trim_eigen,,drop=FALSE])
}
if(prior=="TVP-NG"){
thetasqrt_store<-thetasqrt_store[trim_eigen,,,drop=FALSE]
tau2_store<-tau2_store[trim_eigen,,,drop=FALSE]
xi2_store<-xi2_store[trim_eigen,,,drop=FALSE]
lambda2_store<-lambda2_store[trim_eigen,,,drop=FALSE]
kappa2_store<-kappa2_store[trim_eigen,,,drop=FALSE]
a_tau_store<-a_tau_store[trim_eigen,,,drop=FALSE]
a_xi_store<-a_xi_store[trim_eigen,,,drop=FALSE]
Lthetasqrt_store<-lapply(Lthetasqrt_store,function(l)l[trim_eigen,,drop=FALSE])
Ltau2_store<-lapply(Ltau2_store,function(l)l[trim_eigen,,drop=FALSE])
Lxi2_store<-lapply(Lxi2_store,function(l)l[trim_eigen,,drop=FALSE])
}else if(prior=="TTVP"){
D_store<-D_store[trim_eigen,,,,drop=FALSE]
Omega_store<-Omega_store[trim_eigen,,,,drop=FALSE]
thrsh_store<-thrsh_store[trim_eigen,,,drop=FALSE]
kappa_store<-kappa_store[trim_eigen,,,drop=FALSE]
V0_store<-V0_store[trim_eigen,,,drop=FALSE]
LD_store<-lapply(LD_store,function(l)l[trim_eigen,,,drop=FALSE])
LOmega_store<-lapply(LOmega_store,function(l)l[trim_eigen,,,drop=FALSE])
Lthrsh_store<-lapply(Lthrsh_store,function(l)l[trim_eigen,,drop=FALSE])
LV0_store<-lapply(LV0_store,function(l)l[trim_eigen,,drop=FALSE])
}
}else{A.eigen<-NULL}
store <- list(A_store=A_store,a0_store=a0_store,a1_store=a1_store,Phi_store=Phi_store,Ex_store=Ex_store,S_store=S_store,Smed_store=Smed_store,L_store=L_store,Lthetasqrt_store=Lthetasqrt_store,
vola_store=vola_store,pars_store=pars_store,res_store=res_store,thetasqrt_store=thetasqrt_store,tau2_store=tau2_store,xi2_store=xi2_store,lambda2_store=lambda2_store,
kappa2_store=kappa2_store,a_tau_store=a_tau_store,a_xi_store=a_xi_store,Ltau2_store=Ltau2_store,Lxi2_store=Lxi2_store,D_store=D_store,
Omega_store=Omega_store,thrsh_store=thrsh_store,kappa_store=kappa_store,V0_store=V0_store,LD_store=LD_store,LOmega_store=LOmega_store,
Lthrsh_store=Lthrsh_store,LV0_store=LV0_store,A.eigen=A.eigen)
#------------------------------------ compute posteriors -------------------------------------------#
A_post <- apply(A_store,c(2,3,4),median)
L_post <- apply(L_store,c(2,3,4),median)
S_post <- apply(S_store,c(2,3,4),median)
Smed_post <- apply(Smed_store,c(2,3),median)
Sig <- apply(S_post,c(2,3),mean)/(bigT-K)
res_post <- apply(res_store,c(2,3),median)
# splitting up posteriors
a0_post <- a1_post <- Ex_post <- NULL
if(cons) a0_post <- A_post[,which(dims=="cons"),,drop=FALSE]
if(trend) a1_post <- A_post[,which(dims=="trend"),,drop=FALSE]
if(!is.null(Ex)) Ex_post <- A_post[,which(dims=="Tex"),,drop=FALSE]
Phi_post<- NULL
for(jj in 1:plag){
Phi_post[[jj]] <- A_post[,which(dims==paste("Ylag",jj,sep="")),,drop=FALSE]
}
vola_post <- apply(vola_store,c(2,3),median); dimnames(vola_post) <- list(NULL,colnames(Y))
if(SV){
pars_post <- apply(pars_store,c(2,3),median); dimnames(pars_post) <- list(c("mu","phi","sigma","latent0"),colnames(Y))
}else pars_post <- NULL
if(prior=="TVP"){
thetasqrt_post<-apply(thetasqrt_store,c(2,3),median)
Lthetasqrt_post<-lapply(Lthetasqrt_store,function(l)apply(l,2,median))
tau2_post<-xi2_post<-lambda2_post<-kappa2_post<-a_tau_post<-a_xi_post<-Ltau2_post<-Lxi2_post<-NULL
D_post<-Omega_post<-thrsh_post<-kappa_post<-V0_post<-LD_post<-LOmega_post<-Lthrsh_post<-LV0_post<-NULL
}else if(prior=="TVP-NG"){
D_post<-Omega_post<-thrsh_post<-kappa_post<-V0_post<-LD_post<-LOmega_post<-Lthrsh_post<-LV0_post<-NULL
thetasqrt_post<-apply(thetasqrt_store,c(2,3),median)
tau2_post <- apply(tau2_store,c(2,3),median)
xi2_post <- apply(xi2_store,c(2,3),median)
lambda2_post <- apply(lambda2_store,c(2,3),median)
kappa2_post <- apply(kappa2_store,c(2,3),median)
a_tau_post <- apply(a_tau_store,c(2,3),median)
a_xi_post <- apply(a_xi_store,c(2,3),median)
Lthetasqrt_post<-lapply(Lthetasqrt_store,function(l)apply(l,2,median))
Ltau2_post <- lapply(Ltau2_store,function(l)apply(l,c(2),median))
Lxi2_post <- lapply(Lxi2_store,function(l)apply(l,c(2),median))
}else if(prior=="TTVP"){
thetasqrt_post<-Lthetasqrt_post<-tau2_post<-xi2_post<-lambda2_post<-kappa2_post<-a_tau_post<-a_xi_post<-Ltau2_post<-Lxi2_post<-NULL
D_post <- apply(D_store,c(2,3.4),median)
Omega_post <- apply(Omega_store,c(2,3,4),median)
thrsh_post <- apply(thrsh_store,c(2,3),median)
kappa_post <- apply(kappa_store,c(2,3),median)
V0_post <- apply(V0_store,c(2,3),median)
LD_post <- lapply(LD_store,function(l)apply(l,c(2,3),median))
LOmega_post <- lapply(LOmega_store,function(l)apply(l,c(2,3),median))
Lthrsh_post <- lapply(Lthrsh_store,function(l)apply(l,2,median))
LV0_post <- lapply(LV0_store,function(l)apply(l,2,median))
}
post <- list(A_post=A_post,a0_post=a0_post,a1_post=a1_post,Phi_post=Phi_post,Ex_post=Ex_post,S_post=S_post,Smed_post=Smed_post,L_post=L_post,Lthetasqrt_post=Lthetasqrt_post,
vola_post=vola_post,pars_post=pars_post,res_post=res_post,tau2_post=tau2_post,thetasqrt_post=thetasqrt_post,xi2_post=xi2_post,lambda2_post=lambda2_post,
kappa2_post=kappa2_post,a_tau_post=a_tau_post,a_xi_post=a_xi_post,Ltau2_post=Ltau2_post,Lxi2_post=Lxi2_post,D_post=D_post,
Omega_post=Omega_post,thrsh_post=thrsh_post,kappa_post=kappa_post,V0_post=V0_post,LD_post=LD_post,LOmega_post=LOmega_post,
Lthrsh_post=Lthrsh_post,LV0_post=LV0_post)
return(list(Y=Y,X=X,store=store,post=post))
}
#' @name .TVPBVAR_noncentered_R.m
#' @importFrom stochvol svsample_fast_cpp specify_priors default_fast_sv sv_normal sv_beta sv_gamma
#' @importFrom MASS ginv mvrnorm
#' @importFrom matrixcalc hadamard.prod
#' @importFrom methods is
#' @importFrom stats rnorm rgamma runif dnorm
#' @noRd
.TVPBVAR_noncentered_R <- function(nr,Y_in,p_in,draws_in,burnin_in,cons_in,trend_in,sv_in,thin_in,quiet_in,prior_in,hyperparam_in,Ex_in){
#----------------------------------------INPUTS----------------------------------------------------#
Yraw <- Y_in
p <- p_in
Traw <- nrow(Yraw)
M <- ncol(Yraw)
K <- M*p
Ylag <- .mlag(Yraw,p)
names <- colnames(Yraw)
if(is.null(names)) names <- rep("Y",M)
colnames(Yraw) <- names
nameslags <- NULL
for(ii in 1:p) nameslags <- c(nameslags,paste0(names,".lag",ii))
colnames(Ylag) <- nameslags
texo <- FALSE; Mex <- 0; Exraw <- NULL; enames <- NULL
if(!is.null(Ex_in)){
Exraw <- Ex_in; Mex <- ncol(Exraw); texo <- TRUE
enames <- colnames(Exraw)
if(is.null(enames)) enames <- rep("Tex",Mex)
colnames(Exraw) <- enames
}
if(nr==1) slct <- NULL else slct <- 1:(nr-1)
Xraw <- cbind(Yraw[,slct],Ylag,Exraw)
colnames(Xraw) <- c(colnames(Yraw)[slct],nameslags,enames)
X <- Xraw[(p+1):nrow(Xraw),,drop=FALSE]
y <- Yraw[(p+1):Traw,nr,drop=FALSE]
bigT <- nrow(X)
M_ <- M-length(slct)
cons <- cons_in
if(cons){
X <- cbind(X,1)
colnames(X)[ncol(X)] <- "cons"
}
trend <- trend_in
if(trend){
X <- cbind(X,seq(1,bigT))
colnames(X)[ncol(X)] <- "trend"
}
d <- ncol(X)
n <- d*M
v <- (M*(M-1))/2
#---------------------------------------------------------------------------------------------------------
# HYPERPARAMETERS
#---------------------------------------------------------------------------------------------------------
hyperpara <- hyperparam_in
prior <- prior_in
sv <- sv_in
prmean <- hyperpara$prmean
# non-SV
c0 <- hyperpara$c0
g0 <- hyperpara$g0
# SV
bmu <- hyperpara$bmu
Bmu <- hyperpara$Bmu
a0 <- hyperpara$a0
b0 <- hyperpara$b0
Bsigma <- hyperpara$Bsigma
# TVP-NG
d1 <- hyperpara$d1
d2 <- hyperpara$d2
e1 <- hyperpara$e1
e2 <- hyperpara$e2
b_xi <- hyperpara$b_xi
b_tau <- hyperpara$b_tau
nu_xi <- hyperpara$nu_xi
nu_tau <- hyperpara$nu_tau
a_start <- hyperpara$a_start
sample_A <- hyperpara$sample_A
#---------------------------------------------------------------------------------------------------------
# OLS Quantitites
#---------------------------------------------------------------------------------------------------------
XtXinv <- try(solve(crossprod(X)),silent=TRUE)
if(is(XtXinv,"try-error")) XtXinv <- ginv(crossprod(X))
A_OLS <- XtXinv%*%(t(X)%*%y)
E_OLS <- y - X%*%A_OLS
S_OLS <- crossprod(E_OLS)/(bigT-d)
#---------------------------------------------------------------------------------------------------------
# Initial Values
#---------------------------------------------------------------------------------------------------------
A_draw <- matrix(A_OLS, bigT+1, d, byrow=TRUE, dimnames=list(NULL,colnames(X)))
S_draw <- matrix(S_OLS, bigT, 1)
# time-varying stuff
Am_draw <- A_OLS
At_draw <- matrix(0, bigT+1, d)
theta_draw <- rep(1,d)
theta_sqrt <- sqrt(theta_draw)
#---------------------------------------------------------------------------------------------------------
# PRIORS
#---------------------------------------------------------------------------------------------------------
# Priors on VAR coefs
#-----------------------------
# prior mean
A_prior <- matrix(0,2*d, 1)
A_prior[2*nr-1,1] <- prmean
# prior variance
tau2_draw <- rep(10,d)
xi2_draw <- rep(10,d)
# NG stuff
lambda2 <- 10
a_tau <- a_start
scale_tau <- .43
acc_tau <- 0
kappa2 <- 10
a_xi <- a_start
scale_xi <- .43
acc_xi <- 0
#------------------------------------
# SV quantities
#------------------------------------
svdraw <- list(para=c(mu=-10,phi=.9,sigma=.2,latent0=-3),latent=rep(-3,bigT))
Sv_draw <- svdraw$latent
pars_var <- matrix(c(-3,.9,.2,-3),4,1,dimnames=list(c("mu","phi","sigma","latent0"),NULL))
Sv_priors <- specify_priors(mu=sv_normal(mean=bmu, sd=Bmu), phi=sv_beta(a0,b0), sigma2=sv_gamma(shape=0.5,rate=1/(2*Bsigma)))
#-----------------------------------
# non-SV quantities
#-----------------------------------
sig_eta <- exp(-3)
G0 <- g0/S_OLS*(c0-1)
#---------------------------------------------------------------------------------------------------------
# SAMPLER MISCELLANEOUS
#---------------------------------------------------------------------------------------------------------
nsave <- draws_in
nburn <- burnin_in
ntot <- nsave+nburn
# thinning
thin <- thin_in
count <- 0
thindraws <- nsave/thin
thin.draws <- seq(nburn+1,ntot,by=thin)
#---------------------------------------------------------------------------------------------------------
# STORAGES
#---------------------------------------------------------------------------------------------------------
A_store <- array(NA,c(thindraws,bigT+1,d))
Am_store <- array(NA,c(thindraws,d,1))
At_store <- array(NA,c(thindraws,bigT+1,d))
res_store <- array(NA,c(thindraws,bigT,1))
Sv_store <- array(NA,c(thindraws,bigT,1))
pars_store <- array(NA,c(thindraws,4,1))
# state variances
thetasqrt_store <- array(NA,c(thindraws,d,1))
# TVP-NG
tau2_store <- array(NA,c(thindraws,d,1))
xi2_store <- array(NA,c(thindraws,d,1))
lambda2_store<- array(NA,c(thindraws,p,1))
kappa2_store <- array(NA,c(thindraws,1,1))
a_xi_store <- array(NA,c(thindraws,1,1))
a_tau_store <- array(NA,c(thindraws,p,1))
#---------------------------------------------------------------------------------------------------------
# MCMC LOOP
#---------------------------------------------------------------------------------------------------------
for (irep in 1:ntot){
#----------------------------------------------------------------------------
# Step 0: Normalize data
Xt <- apply(X,2,function(x)x*exp(-0.5*Sv_draw))
yt <- y*exp(-0.5*Sv_draw)
#----------------------------------------------------------------------------
# Step 1: Sample coefficients
Zt <- cbind(Xt,hadamard.prod(Xt,At_draw[2:(bigT+1),]))
Vpriorinv <- diag(1/c(tau2_draw,xi2_draw))
# V_post <- try(chol2inv(chol(crossprod(Zt)+Vpriorinv)),silent=TRUE)
# if (is(V_post,"try-error")) V_post <- ginv(crossprod(Zt)+Vpriorinv)
# alternative a la supplementary bitto/sfs s.3
Vpriorsqrt <- diag(c(sqrt(tau2_draw),sqrt(xi2_draw)))
V_poststar <- solve(Vpriorsqrt%*%crossprod(Zt)%*%Vpriorsqrt + diag(2*d))
V_post <- Vpriorsqrt%*%V_poststar%*%Vpriorsqrt
A_post <- V_post%*%(crossprod(Zt,yt)+Vpriorinv%*%A_prior)
alph_draw <- try(A_post+t(chol(V_post))%*%rnorm(ncol(Zt)),silent=TRUE)
if (is(alph_draw,"try-error")) alph_draw <- matrix(mvrnorm(1,A_post,V_post),ncol(Zt),1)
Am_draw <- alph_draw[1:d,,drop=FALSE]
theta_sqrt <- alph_draw[(d+1):(2*d),,drop=TRUE]
theta_draw <- theta_sqrt^2
#----------------------------------------------------------------------------
# Step 2: Sample TVP-coef
ystar <- yt - Xt%*%Am_draw
Fstar <- Xt%*%diag(theta_sqrt)
At_draw <- sample_McCausland(ystar, Fstar)
#----------------------------------------------------------------------------
# Step 3: Interweaving
theta_sign <- sign(theta_sqrt)
A_draw <- matrix(Am_draw,bigT+1,d,byrow=TRUE) + At_draw%*%diag(theta_sqrt)
A_diff <- diff(At_draw%*%diag(theta_sqrt))
#A_diff <- diff(A_draw) # same as line above
for(dd in 1:d){
# theta.new
res <- do_rgig1(lambda=-bigT/2,
chi=sum(A_diff[,dd]^2)+(A_draw[1,dd]-Am_draw[dd,1])^2,
psi=1/xi2_draw[dd])
theta_draw[dd] <- res
theta_sqrt[dd] <- sqrt(res)*theta_sign[dd]
# betam.new
sigma2_A_mean <- 1/((1/tau2_draw[dd]) + (1/theta_draw[dd]))
mu_A_mean <- A_draw[1,dd]*tau2_draw[dd]/(tau2_draw[dd] + theta_draw[dd])
Am_draw[dd,1] <- rnorm(1, mu_A_mean, sqrt(sigma2_A_mean))
}
At_draw <- sapply(1:d,function(dd)A_draw[,dd]-Am_draw[dd,1])%*%diag(1/theta_sqrt)
#----------------------------------------------------------------------------
# Step 4: Prior choice
if(prior==1){ # TVP
### no hierarchical priors
}else if(prior==2){ # TVP-NG
kappa2 <- rgamma(1, d1+a_xi*d, d2+0.5*a_xi*mean(xi2_draw)*d)
lambda2 <- rgamma(1, e1+a_tau*d, e2+0.5*a_tau*mean(tau2_draw)*d)
for(dd in 1:d){
xi2_draw[dd] <- do_rgig1(lambda=a_xi-0.5, chi=theta_draw[dd], psi=a_xi*kappa2)
tau2_draw[dd] <- do_rgig1(lambda=a_tau-0.5, chi=(Am_draw[dd,1]-A_prior[dd,1])^2, psi=a_tau*lambda2)
}
xi2_draw[xi2_draw<1e-7] <- 1e-7
tau2_draw[tau2_draw<1e-7] <- 1e-7
if(sample_A){
before <- a_xi
a_xi <- MH_step(a_xi, scale_xi, d, kappa2, theta_sqrt, b_xi, nu_xi, d1, d2)
if(before!=a_xi){
acc_xi <- acc_xi + 1
}
before <- a_tau
a_tau <- MH_step(a_tau, scale_xi, d, lambda2, Am_draw, b_tau, nu_tau, e1, e2)
if(before!=a_tau){
acc_tau <- acc_tau + 1
}
# scale MH proposal during the first 50% of the burn-in stage
if(irep<(0.5*nburn)){
if((acc_xi/irep)>0.30){scale_xi <- 1.01*scale_xi}
if((acc_xi/irep)<0.15){scale_xi <- 0.99*scale_xi}
if((acc_tau/irep)>0.30){scale_xi <- 1.01*scale_xi}
if((acc_tau/irep)<0.15){scale_xi <- 0.99*scale_xi}
}
}
} # END PRIOR QUERY
#----------------------------------------------------------------------------
# Step 5: Sample variances
eps <- y - cbind(Xt,hadamard.prod(Xt,At_draw[2:(bigT+1),]))%*%alph_draw
if(sv){
para <- as.list(pars_var); names(para) <- c("mu","phi","sigma","latent0")
para$nu = Inf; para$rho=0; para$beta<-0
svdraw <- svsample_fast_cpp(y=eps, draws=1, burnin=0, designmatrix=matrix(NA_real_),
priorspec=Sv_priors, thinpara=1, thinlatent=1, keeptime="all",
startpara=para, startlatent=Sv_draw,
keeptau=FALSE, print_settings=list(quiet=TRUE, n_chains=1, chain=1),
correct_model_misspecification=FALSE, interweave=TRUE, myoffset=0,
fast_sv=default_fast_sv)
h_ <- exp(svdraw$latent[1,])
para$mu <- svdraw$para[1,"mu"]
para$phi <- svdraw$para[1,"phi"]
para$sigma <- svdraw$para[1,"sigma"]
para$latent0 <- svdraw$latent0[1,"h_0"]
pars_var <- unlist(para[c("mu","phi","sigma","latent0")])
Sv_draw <- log(h_)
}else{
C0 <- rgamma(1, g0+c0, G0+sig_eta)
S_1 <- c0+bigT/2
S_2 <- C0+crossprod(eps)/2
sig_eta <- 1/rgamma(1,S_1,S_2)
Sv_draw <- matrix(log(sig_eta),bigT,1)
}
#-------------------------------------------------------------------------#
# STEP 6: RANDOM SIGN SWITCH
for(dd in 1:d){
if(runif(1,0,1)>0.5){
theta_sqrt[dd] <- -theta_sqrt[dd]
}
}
#----------------------------------------------------------------------------
# Step 7: store draws
if(irep %in% thin.draws){
count <- count+1
A_store[count,,]<- A_draw
res_store[count,,]<- eps
# SV
Sv_store[count,,] <- Sv_draw
pars_store[count,,] <- pars_var
# NG
thetasqrt_store[count,,] <- theta_sqrt
tau2_store[count,,]<- tau2_draw
xi2_store[count,,] <- xi2_draw
lambda2_store[count,,] <- lambda2
kappa2_store[count,,] <- kappa2
a_xi_store[count,,] <- a_xi
a_tau_store[count,,]<- a_tau
}
}
#---------------------------------------------------------------------------------------------------------
# END ESTIMATION
#---------------------------------------------------------------------------------------------------------
dimnames(A_store)=list(NULL,paste("t",seq(0,bigT),sep="."),colnames(X))
ret <- list(Y=y,X=X,A_store=A_store,Sv_store=Sv_store,pars_store=pars_store,res_store=res_store,
thetasqrt_store=thetasqrt_store,tau2_store=tau2_store,xi2_store=xi2_store,lambda2_store=lambda2_store,kappa2_store=kappa2_store,a_xi_store=a_xi_store,a_tau_store=a_tau_store)
return(ret)
}
#' @name .TVPBVAR_centered_R.m
#' @importFrom stochvol svsample_fast_cpp specify_priors default_fast_sv sv_normal sv_beta sv_gamma
#' @importFrom dlm dlmModReg dlmMLE dlmSmooth
#' @importFrom MASS ginv mvrnorm
#' @importFrom matrixcalc hadamard.prod
#' @importFrom methods is
#' @importFrom stats rnorm rgamma runif dnorm
#' @noRd
.TVPBVAR_centered_R <- function(nr,Y_in,p_in,draws_in,burnin_in,cons_in,trend_in,sv_in,thin_in,quiet_in,prior_in,hyperparam_in,Ex_in){
#----------------------------------------INPUTS----------------------------------------------------#
Yraw <- Y_in
p <- p_in
Traw <- nrow(Yraw)
M <- ncol(Yraw)
K <- M*p
Ylag <- .mlag(Yraw,p)
names <- colnames(Yraw)
if(is.null(names)) names <- rep("Y",M)
colnames(Yraw) <- names
nameslags <- NULL
for(ii in 1:p) nameslags <- c(nameslags,paste0(names,".lag",ii))
colnames(Ylag) <- nameslags
texo <- FALSE; Mex <- 0; Exraw <- NULL; enames <- NULL
if(!is.null(Ex_in)){
Exraw <- Ex_in; Mex <- ncol(Exraw); texo <- TRUE
enames <- colnames(Exraw)
if(is.null(enames)) enames <- rep("Tex",Mex)
colnames(Exraw) <- enames
}
if(nr==1) slct <- NULL else slct <- 1:(nr-1)
Xraw <- cbind(Yraw[,slct],Ylag,Exraw)
colnames(Xraw) <- c(colnames(Yraw)[slct],nameslags,enames)
X <- Xraw[(p+1):nrow(Xraw),,drop=FALSE]
y <- Yraw[(p+1):Traw,nr,drop=FALSE]
bigT <- nrow(X)
M_ <- M-length(slct)
cons <- cons_in
if(cons){
X <- cbind(X,1)
colnames(X)[ncol(X)] <- "cons"
}
trend <- trend_in
if(trend){
X <- cbind(X,seq(1,bigT))
colnames(X)[ncol(X)] <- "trend"
}
d <- ncol(X)
n <- d*M
v <- (M*(M-1))/2
#---------------------------------------------------------------------------------------------------------
# HYPERPARAMETERS
#---------------------------------------------------------------------------------------------------------
hyperpara <- hyperparam_in
prior <- prior_in
sv <- sv_in
prmean <- hyperpara$prmean
# non-SV
c0 <- hyperpara$c0
g0 <- hyperpara$g0
# SV
bmu <- hyperpara$bmu
Bmu <- hyperpara$Bmu
a0 <- hyperpara$a0
b0 <- hyperpara$b0
Bsigma <- hyperpara$Bsigma
# TTVP
B_1 <- hyperpara$B_1
B_2 <- hyperpara$B_2
kappa0 <- hyperpara$kappa0
a_tau <- hyperpara$a_tau
c_tau <- hyperpara$c_tau
d_tau <- hyperpara$d_tau
h0prior <- hyperpara$h0prior
grid.length <- hyperpara$grid.length
thrsh.pct <- hyperpara$thrsh.pct
thrsh.pct.high <- hyperpara$thres.pct.high
TVS <- hyperpara$TVS
a.approx <- hyperpara$a.approx
sim.kappa <- hyperpara$sim.kappa
kappa.grid <- hyperpara$kappa.grid
MaxTrys <- hyperpara$MaxTrys
#---------------------------------------------------------------------------------------------------------
# OLS Quantitites
#---------------------------------------------------------------------------------------------------------
XtXinv <- try(solve(crossprod(X)),silent=TRUE)
if(is(XtXinv,"try-error")) XtXinv <- ginv(crossprod(X))
A_OLS <- XtXinv%*%(t(X)%*%y)
E_OLS <- y - X%*%A_OLS
S_OLS <- crossprod(E_OLS)/(bigT-d)
V_OLS <- as.numeric(S_OLS)*XtXinv
sd_OLS <- sqrt(diag(V_OLS))
#---------------------------------------------------------------------------------------------------------
# Initial Values
#---------------------------------------------------------------------------------------------------------
A_draw <- matrix(A_OLS, bigT+1, d, byrow=TRUE, dimnames=list(NULL,colnames(X)))
S_draw <- matrix(S_OLS, bigT,1)
# state variances
Omega_t <- matrix(1,bigT,d)
# state indicator
D_t <- matrix(1,bigT,d)
#---------------------------------------------------------------------------------------------------------
# PRIORS
#---------------------------------------------------------------------------------------------------------
# Priors on VAR coefs
#-----------------------------
# prior mean
A_prior <- matrix(0,2*d, 1)
A_prior[2*nr-1,1] <- prmean
# prior variance
sqrttheta1 <- diag(d)*0.1
sqrttheta2 <-diag(d)*0.01
Omega_t <- D_t%*%sqrttheta1+(1-D_t)%*%sqrttheta2
kappa00 <- kappa0
if(kappa0<0) kappa00 <- -kappa0 * sd.OLS else kappa00 <- matrix(kappa0,d,1)
if (a.approx){
buildCapm <- function(u){
dlm::dlmModReg(X, dV = exp(u[1]), dW = exp(u[2:(d+1)]),addInt = FALSE)
}
outMLE <- dlm::dlmMLE(y, parm = rep(0,d+1), buildCapm)
mod <- buildCapm(outMLE$par)
outS <- dlm::dlmSmooth(y, mod)
states.OLS <- t(matrix(outS$s,bigT+1,d))
Achg.OLS <- t(diff(t(states.OLS)))#t(as.numeric(ALPHA0)+ALPHA2)
}
# threshold
thrsh <- matrix(0,d,1)
# priors on initial state
B0prior <- matrix(0,d,1)
V0prior <- rep(4,d)
#------------------------------------
# SV quantities
#------------------------------------
svdraw <- list(para=c(mu=-10,phi=.9,sigma=.2,latent0=-3),latent=rep(-3,bigT))
Sv_draw <- svdraw$latent
pars_var <- matrix(c(-3,.9,.2,-3),4,1,dimnames=list(c("mu","phi","sigma","latent0"),NULL))
Sv_priors <- specify_priors(mu=sv_normal(mean=bmu, sd=Bmu), phi=sv_beta(a0,b0), sigma2=sv_gamma(shape=0.5,rate=1/(2*Bsigma)))
#-----------------------------------
# non-SV quantities
#-----------------------------------
sig_eta <- exp(-3)
G0 <- g0/S_OLS*(c0-1)
#---------------------------------------------------------------------------------------------------------
# SAMPLER MISCELLANEOUS
#---------------------------------------------------------------------------------------------------------
nsave <- draws_in
nburn <- burnin_in
ntot <- nsave+nburn
# thinning
thin <- thin_in
count <- 0
thindraws <- nsave/thin
thin.draws <- seq(nburn+1,ntot,by=thin)
#---------------------------------------------------------------------------------------------------------
# STORAGES
#---------------------------------------------------------------------------------------------------------
A_store <- array(NA,c(thindraws,bigT,d))
res_store <- array(NA,c(thindraws,bigT,1))
Sv_store <- array(NA,c(thindraws,bigT,1))
pars_store <- array(NA,c(thindraws,4,1))
# TTVP
D_store <- array(NA,c(thindraws,bigT,d,1))
Omega_store <- array(NA,c(thindraws,bigT,d,1))
thrsh_store <- array(NA,c(thindraws,d,1))
kappa_store <- array(NA,c(thindraws,1))
V0_store <- array(NA,c(thindraws,d,1))
#---------------------------------------------------------------------------------------------------------
# MCMC LOOP
#---------------------------------------------------------------------------------------------------------
for (irep in 1:ntot){
#----------------------------------------------------------------------------
# Step 1: Draw A
invisible(capture.output(
A_draw1 <- try(KF_fast(t(as.matrix(y)), X,as.matrix(exp(Sv_draw)),Omega_t,d, 1, bigT, B0prior, diag(V0prior)),silent=TRUE),
type="message"))
if (is(A_draw1,"try-error")){
invisible(capture.output(
A_draw1 <- KF(t(as.matrix(y)), X,as.matrix(exp(Sv_draw)),Omega_t,d, 1, bigT, B0prior, diag(V0prior)),
type="message"))
try0 <- 0
while (any(abs(A_draw1$bdraw)>1e+10) && try0<MaxTrys){ #This block resamples if the draw from the state vector is not well behaved
invisible(capture.output(
A_draw1 <- try(KF(t(as.matrix(y)), X,as.matrix(exp(Sv_draw)),Omega_t,d, 1, bigT, B0prior, diag(V0prior)),silent=TRUE),
type="message"))
try0 <- try0+1
}
}
A_draw <- t(A_draw1$bdraw)
VCOV <- A_draw1$Vcov
#----------------------------------------------------------------------------
# Step 2: Prior choice
if(prior==3){
#------------------------------------------
# Step 2a: Sample variances
A_diff <- diff(A_draw)
for(dd in 1:d){
sig_q <- sqrttheta1[dd,dd]
if (!a.approx){
si <- (abs(A_diff[,dd])>thrsh[dd,1])*1
}else{
si <- (abs(Achg.OLS[,dd])>thrsh[dd,1])*1
}
si <- D_t[2:bigT,dd]
s_1 <- B_1 + sum(si)/2 + 0.5
s_2 <- B_2 + 0.5*crossprod(A_diff[si==1,dd,drop=FALSE])
sig_q <- 1/rgamma(1,s_1,s_2)
sqrttheta1[dd,dd] <- sig_q
sqrttheta2[dd,dd] <- kappa00[dd,1]^2
}
#------------------------------------------
# sample indicator
if(TVS){
#Check whether coefficient is time-varying or constant at each point in time
Achg <- t(A_diff)
Achg <- cbind(matrix(0,d,1),Achg) #we simply assume that the parameters stayed constant between t=0 and t=1
if(a.approx) Achg.approx <- Achg.OLS else Achg.approx <- Achg
grid.mat <- matrix(unlist(lapply(1:d,function(x) .get_grid(Achg[x,],sqrt(sqrttheta1[x,x]),grid.length=grid.length,thrsh.pct=thrsh.pct,thrsh.pct.high=thrsh.pct.high))),ncol = d)
probs <- get_threshold(Achg, sqrttheta1, sqrttheta2, grid.mat, Achg.approx)
for(dd in 1:d){
post1 <- probs[,dd]
probs1 <- exp(post1-max(post1))/sum(exp(post1-max(post1)))
thrsh[dd,] <- sample(grid.mat[,dd],1,prob=probs1)
if (!a.approx){
D_t[,dd] <- (abs(Achg[dd,])>thrsh[dd,])*1 #change 2:T usw. here
}else{
D_t[,dd] <- (abs(Achg.OLS[dd,])>thrsh[dd,])*1 #change 2:T usw. here
}
}
if (sim.kappa){
grid.kappa <- kappa.grid
Lik.kappa <- matrix(0,length(grid.kappa),1)
count <- 0
for (grid.i in grid.kappa){
count <- count+1
sqrttheta.prop <- (grid.i*sd_OLS)^2
cov.prop <- sqrt(D_t*diag(sqrttheta1)+(1-D_t)*sqrttheta.prop)
Lik.kappa[count,1] <-sum(dnorm(t(Achg),matrix(0,bigT,2),cov.prop,log=TRUE))
}
Lik.kappa.norm <- exp(Lik.kappa-max(Lik.kappa))
probs.kappa <- Lik.kappa.norm/sum(Lik.kappa.norm)
kappa0 <- sample(grid.kappa,size=1, prob=probs.kappa)
kappa00 <- kappa0*sd_OLS
}
}
Omega_t <- D_t%*%sqrttheta1+(1-D_t)%*%sqrttheta2
#------------------------------------------
# Step 2b: Draw variance of initial state
lambda2_tau <- rgamma(1,c_tau+a_tau*d,d_tau+a_tau/2*sum(V0prior)) # global component
# local component
for(dd in 1:d){
res <- try(do_rgig1(lambda=a_tau-0.5,
chi=A_draw[1,dd]^2,
psi=a_tau*lambda2_tau), silent=TRUE)
V0prior[dd] <- ifelse(is(res,"try-error"),next,res)
}
} # END PRIOR QUERY
#----------------------------------------------------------------------------
# Step 3: Sample variances
eps <- y - rowSums(hadamard.prod(X,A_draw))
if(sv){
para <- as.list(pars_var); names(para) <- c("mu","phi","sigma","latent0")
para$nu = Inf; para$rho=0; para$beta<-0
svdraw <- svsample_fast_cpp(y=eps, draws=1, burnin=0, designmatrix=matrix(NA_real_),
priorspec=Sv_priors, thinpara=1, thinlatent=1, keeptime="all",
startpara=para, startlatent=Sv_draw,
keeptau=FALSE, print_settings=list(quiet=TRUE, n_chains=1, chain=1),
correct_model_misspecification=FALSE, interweave=TRUE, myoffset=0,
fast_sv=default_fast_sv)
h_ <- exp(svdraw$latent[1,])
para$mu <- svdraw$para[1,"mu"]
para$phi <- svdraw$para[1,"phi"]
para$sigma <- svdraw$para[1,"sigma"]
para$latent0 <- svdraw$latent0[1,"h_0"]
pars_var <- unlist(para[c("mu","phi","sigma","latent0")])
Sv_draw <- log(h_)
}else{
C0 <- rgamma(1, g0+c0, G0+sig_eta)
S_1 <- c0+bigT/2
S_2 <- C0+crossprod(eps)/2
sig_eta <- 1/rgamma(1,S_1,S_2)
Sv_draw <- matrix(log(sig_eta),bigT,1)
}
#----------------------------------------------------------------------------
# Step 4: store draws
if(irep %in% thin.draws){
count <- count+1
A_store[count,,]<- A_draw
res_store[count,,]<- eps
# SV
Sv_store[count,,] <- Sv_draw
pars_store[count,,] <- pars_var
# TTVP
D_store[count,,,] <- D_t
Omega_store[count,,,] <- Omega_t
thrsh_store[count,,] <- thrsh
kappa_store[count,] <- kappa0
V0_store[count,,] <- V0prior
}
}
#---------------------------------------------------------------------------------------------------------
# END ESTIMATION
#---------------------------------------------------------------------------------------------------------
dimnames(A_store)=list(NULL,paste("t",seq(1,bigT),sep="."),colnames(X))
ret <- list(Y=y,X=X,A_store=A_store,Sv_store=Sv_store,pars_store=pars_store,res_store=res_store,
D_store=D_store,Omega_store=Omega_store,thrsh_store=thrsh_store,kappa_store=kappa_store,V0_store=V0_store)
return(ret)
}
#' @name .get_grid
#' @noRd
.get_grid <- function(Achg,sd.state,grid.length=150,thrsh.pct=0.1,thrsh.pct.high=0.9){
d_prop <- seq(thrsh.pct*sd.state,thrsh.pct.high*sd.state,length.out=grid.length)
return(d_prop)
}
#' @name .gck
#' @noRd
.gck <- function(yg,gg,hh,capg,f,capf,sigv,kold,t,ex0,vx0,nvalk,kprior,kvals,p,kstate){
# GCK's Step 1 on page 821
lpy2n=0;
mu = matrix(0,t*kstate,1);
omega = matrix(0,t*kstate,kstate);
for (i in seq(t-1,1,by=-1)){
gatplus1 = sigv%*%kold[i+1]
ftplus1 = capf[(kstate*i+1):(kstate*(i+1)),]
cgtplus1 = capg[(i*p+1):((i+1)*p),]
htplus1 = t(hh[(i*p+1):((i+1)*p),])
htt1 <- crossprod(htplus1,gatplus1)
rtplus1 = tcrossprod(htt1)+tcrossprod(cgtplus1,cgtplus1)
rtinv = solve(rtplus1)
btplus1 = tcrossprod(gatplus1)%*%htplus1%*%rtinv
atplus1 = (diag(kstate)-tcrossprod(btplus1,htplus1))%*%ftplus1
if (kold[i+1] == 0){
ctplus1 = matrix(0,kstate,kstate)
}else{
cct = gatplus1%*%(diag(kstate)-crossprod(gatplus1,htplus1)%*%tcrossprod(rtinv,htplus1)%*%gatplus1)%*%t(gatplus1)
ctplus1 = t(chol(cct))
}
otplus1 = omega[(kstate*i+1):(kstate*(i+1)),]
dtplus1 = crossprod(ctplus1,otplus1)%*%ctplus1+diag(kstate)
omega[(kstate*(i-1)+1):(kstate*i),] = crossprod(atplus1,(otplus1 - otplus1%*%ctplus1%*%solve(dtplus1)%*%t(ctplus1)%*%otplus1))%*%atplus1+t(ftplus1)%*%htplus1%*%rtinv%*%t(htplus1)%*%ftplus1
satplus1 = (diag(kstate)-tcrossprod(btplus1,htplus1))%*%(f[,i+1]-btplus1%*%gg[,i+1]) #CHCKCHCKCHKC
mutplus1 = mu[(kstate*i+1):(kstate*(i+1)),]
mu[(kstate*(i-1)+1):(kstate*i),] = crossprod(atplus1,(diag(kstate)-otplus1%*%ctplus1%*%solve(dtplus1)%*%t(ctplus1)))%*%(mutplus1-otplus1%*%(satplus1+btplus1%*%yg[i+1]))+t(ftplus1)%*%htplus1%*%rtinv%*%(yg[i+1]-gg[,i+1]-t(htplus1)%*%f[,i+1])
}
# GCKs Step 2 on pages 821-822
kdraw = kold;
ht = t(hh[1:p,])
ft = capf[1:kstate,]
gat = matrix(0,kstate,kstate)
# Note: this specification implies no shift in first period -- sensible
rt = t(ht)%*%ft%*%vx0%*%t(ft)%*%ht + crossprod(ht,gat)%*%crossprod(gat,ht)+ tcrossprod(capg[1:p,])
rtinv = solve(rt)
jt = (ft%*%vx0%*%t(ft)%*%ht + tcrossprod(gat)%*%ht)%*%rtinv
mtm1 = (diag(kstate) - tcrossprod(jt,ht))%*%(f[,1] + ft%*%ex0) + jt%*%(yg[1] - gg[,1])
vtm1 <- ft%*%tcrossprod(vx0,ft)+tcrossprod(gat)-jt%*%tcrossprod(rt,jt)
lprob <- matrix(0,nvalk,1)
for (i in 2:t){
ht <- t(hh[((i-1)*p+1):(i*p),])
ft <- capf[(kstate*(i-1)+1):(kstate*i),]
for (j in 1:nvalk){
gat <- kvals[j,1]%*%sigv
rt <- crossprod(ht,ft)%*%tcrossprod(vtm1,ft)%*%ht+crossprod(ht,gat)%*%crossprod(gat,ht)+tcrossprod(capg[((i-1)*p+1):(i*p),])
rtinv <- solve(rt)
jt <- (ft%*%tcrossprod(vtm1,ft)%*%ht+tcrossprod(gat)%*%ht)%*%rtinv
mt <- (diag(kstate)-tcrossprod(jt,ht))%*%(f[,i]+ft%*%mtm1)+jt%*%(yg[i]-gg[,i])
vt <- ft%*%tcrossprod(vtm1,ft)+tcrossprod(gat)-jt%*%tcrossprod(rt,jt)
lpyt = -.5*log(det(rt)) - .5*t(yg[i] - gg[,i] - t(ht)%*%t(f[,i] + ft%*%mtm1))%*%rtinv%*%(yg[i] - gg[,i] - t(ht)%*%(f[,i] + ft%*%mtm1))
if (det(vt)<=0){
tt <- matrix(0,kstate,kstate)
}else{
tt <- t(chol(vt))
}
ot = omega[(kstate*(i-1)+1):(kstate*i),]
mut = mu[(kstate*(i-1)+1):(kstate*i),]
tempv = diag(kstate) + crossprod(tt,ot)%*%tt
lpyt1n = -.5*log(det(tempv)) -.5*(crossprod(mt,ot)%*%mt-2*crossprod(mut,mt)-t(mut-ot%*%mt)%*%tt%*%solve(tempv)%*%t(tt)%*%(mut-ot%*%mt))
lprob[j,1] <- log(kprior[j,1])+lpyt1n+lpyt
if (i==2){
lpy2n <- lpyt1n+lpyt
}
}
pprob = exp(lprob-max(lprob))/sum(exp(lprob-max(lprob)))
tempv = runif(1)
tempu = 0
for (j in 1:nvalk){
tempu <- tempu+pprob[j,1]
if (tempu> tempv){
kdraw[i] <- kvals[j,1]
break
}
}
gat = kdraw[i]%*%sigv
rt = crossprod(ht,ft)%*%tcrossprod(vtm1,ft)%*%ht+t(ht)%*%tcrossprod(gat)%*%ht+tcrossprod(capg[((i-1)*p+1):(i*p)])
rtinv = solve(rt)
jt = (ft%*%tcrossprod(vtm1,ft)%*%ht+tcrossprod(gat)%*%ht)%*%rtinv
mtm1 <- (diag(kstate)-tcrossprod(jt,ht))%*%(f[,i]+ft%*%mtm1)+jt%*%(yg[i]-gg[,i])
vtm1 = ft%*%tcrossprod(vtm1,ft)+tcrossprod(gat)-jt%*%tcrossprod(rt,jt)
}
return(kdraw)
}
#' @name .var_posterior
#' @importFrom MASS ginv
#' @importFrom abind adrop abind
#' @noRd
.var_posterior <- function(post_draws, prior, draws, applyfun, cores){
M <- length(post_draws)
bigT <- nrow(post_draws[[1]]$Y)
bigK <- ncol(post_draws[[1]]$X)
K <- unlist(lapply(post_draws,function(l)ncol(l$X)))
# bind data
Y <- do.call("cbind",lapply(1:M,function(mm)post_draws[[mm]]$Y))
X <- post_draws[[1]]$X
# general stuff
res_store <- abind(lapply(1:M,function(mm)post_draws[[mm]]$res_store),along=3)
Sv_store <- abind(lapply(1:M,function(mm)post_draws[[mm]]$Sv_store),along=3)
pars_store <- abind(lapply(1:M,function(mm)post_draws[[mm]]$pars_store),along=3)
# container
A_store <- array(NA,c(draws,bigT,bigK,M))
L_store <- array(NA,c(draws,bigT,M,M))
S_store <- array(NA,c(draws,bigT,M,M))
timepoints <- paste("t",seq(1,bigT),sep=".")
store.obj <- applyfun(1:draws,function(irep){
At_store <- array(NA,c(bigT,bigK,M))
Lt_store <- array(NA,c(bigT,M,M))
St_store <- array(NA,c(bigT,M,M))
for(tt in 1:bigT){
A0 <- diag(M)
for(mm in 2:M){
A0[mm,1:(mm-1)] <- -post_draws[[mm]]$A_store[irep,timepoints[tt],1:(mm-1)]
}
A0inv <- try(solve(A0),silent=TRUE)
if(is(A0inv,"try-error")) A0inv <- ginv(A0)
Lt_store[tt,,] <- A0inv
St_store[tt,,] <- A0inv%*%diag(exp(Sv_store[irep,tt,]))%*%t(A0inv)
Atilde <- NULL
for(mm in 1:M) Atilde <- cbind(Atilde,post_draws[[mm]]$A_store[irep,timepoints[tt],mm:K[mm]])
At_store[tt,,] <- t(A0inv%*%t(Atilde))
}
return(list(At_store=At_store,Lt_store=Lt_store,St_store=St_store))
})
for(irep in 1:draws){
A_store[irep,,,] <- store.obj[[irep]]$At_store
L_store[irep,,,] <- store.obj[[irep]]$Lt_store
S_store[irep,,,] <- store.obj[[irep]]$St_store
}
dimnames(A_store) <- list(NULL,timepoints,colnames(X),colnames(Y))
Smed_store <- apply(S_store,c(1,3,4),median)
if(prior=="TVP"){
thetasqrt_store <- abind(lapply(1:M,function(mm)post_draws[[mm]]$thetasqrt_store[,mm:K[mm],]),along=3)
Lthetasqrt_store <- lapply(2:M,function(mm)adrop(post_draws[[mm]]$thetasqrt_store[,1:(mm-1),,drop=FALSE],drop=3))
tau2_store<-xi2_store<-Ltau2_store<-Lxi2_store<-lambda2_store<-kappa2_store<-a_xi_store<-a_tau_store<-D_store<-Omega_store<-thrsh_store<-kappa_store<-V0_store<-LD_store<-LOmega_store<-Lthrsh_store<-LV0_store<-NULL
}else if(prior=="TVP-NG"){
D_store<-Omega_store<-thrsh_store<-kappa_store<-V0_store<-LD_store<-LOmega_store<-Lthrsh_store<-LV0_store<-NULL
# general stuff
thetasqrt_store <- abind(lapply(1:M,function(mm)post_draws[[mm]]$thetasqrt_store[,mm:K[mm],]),along=3)
lambda2_store <- abind(lapply(1:M,function(mm)post_draws[[mm]]$lambda2_store),along=3)
kappa2_store <- abind(lapply(1:M,function(mm)post_draws[[mm]]$kappa2_store),along=3)
a_xi_store <- abind(lapply(1:M,function(mm)post_draws[[mm]]$a_xi_store),along=3)
a_tau_store <- abind(lapply(1:M,function(mm)post_draws[[mm]]$a_tau_store),along=3)
tau2_store <- abind(lapply(1:M,function(mm)post_draws[[mm]]$tau2_store[,mm:K[mm],]),along=3)
xi2_store <- abind(lapply(1:M,function(mm)post_draws[[mm]]$xi2_store[,mm:K[mm],]),along=3)
## ATTENTION: variances of L just as list !!
Lthetasqrt_store <- lapply(2:M,function(mm)adrop(post_draws[[mm]]$thetasqrt_store[,1:(mm-1),,drop=FALSE],drop=3))
Ltau2_store <- lapply(2:M,function(mm)adrop(post_draws[[mm]]$tau2_store[,1:(mm-1),,drop=FALSE],drop=3))
Lxi2_store <- lapply(2:M,function(mm)adrop(post_draws[[mm]]$xi2_store[,1:(mm-1),,drop=FALSE],drop=3))
}else if(prior=="TTVP"){
thetasqrt_store<-Lthetasqrt_store<-tau2_store<-xi2_store<-Ltau2_store<-Lxi2_store<-lambda2_store<-kappa2_store<-a_xi_store<-a_tau_store<-NULL
# general stuff
kappa_store <- abind(lapply(1:M,function(mm)post_draws[[mm]]$kappa_store),along=3)
D_store <- abind(lapply(1:M,function(mm)post_draws[[mm]]$D_store[,,mm:K[mm],]),along=4)
Omega_store <- abind(lapply(1:M,function(mm)post_draws[[mm]]$Omega_store[,,mm:K[mm],]),along=4)
thrsh_store <- abind(lapply(1:M,function(mm)post_draws[[mm]]$thrsh_store[,mm:K[mm],]),along=3)
V0_store <- abind(lapply(1:M,function(mm)post_draws[[mm]]$V0_store[,mm:K[mm],]),along=3)
## ATTENTION: variances of L just as list !!
LD_store <- lapply(2:M,function(mm)adrop(post_draws[[mm]]$D_store[,,1:(mm-1),,drop=FALSE],drop=4))
LOmega_store<- lapply(2:M,function(mm)adrop(post_draws[[mm]]$Omega_store[,,1:(mm-1),,drop=FALSE],drop=4))
Lthrsh_store<- lapply(2:M,function(mm)adrop(post_draws[[mm]]$thrsh_store[,1:(mm-1),,drop=FALSE],drop=3))
LV0_store <- lapply(2:M,function(mm)adrop(post_draws[[mm]]$V0_store[,1:(mm-1),,drop=FALSE],drop=3))
}
ret <- list(Y=Y,X=X,A_store=A_store,L_store=L_store,Sv_store=Sv_store,S_store=S_store,Smed_store=Smed_store,pars_store=pars_store,res_store=res_store,thetasqrt_store=thetasqrt_store,Lthetasqrt_store=Lthetasqrt_store,
tau2_store=tau2_store,xi2_store=xi2_store,Ltau2_store=Ltau2_store,Lxi2_store=Lxi2_store,lambda2_store=lambda2_store,kappa2_store=kappa2_store,a_xi_store=a_xi_store,a_tau_store=a_tau_store,
D_store=D_store,Omega_store=Omega_store,thrsh_store=thrsh_store,kappa_store=kappa_store,V0_store=V0_store,LD_store=LD_store,LOmega_store=LOmega_store,Lthrsh_store=Lthrsh_store,LV0_store=LV0_store)
return(ret)
}
#' @name .TVPBVAR_linear_R
#' @importFrom stochvol svsample_fast_cpp specify_priors default_fast_sv sv_normal sv_beta sv_gamma
#' @importFrom MASS ginv mvrnorm
#' @importFrom matrixcalc hadamard.prod
#' @importFrom methods is
#' @importFrom stats rnorm rgamma runif dnorm
#' @noRd
.TVPBVAR_linear_R <- function(Y_in,p_in,draws_in,burnin_in,cons_in,trend_in,sv_in,thin_in,quiet_in,prior_in,hyperparam_in,Ex_in){
#----------------------------------------INPUTS----------------------------------------------------#
Yraw <- Y_in
p <- p_in
Traw <- nrow(Yraw)
M <- ncol(Yraw)
K <- M*p
Ylag <- .mlag(Yraw,p)
nameslags <- NULL
for (ii in 1:p) nameslags <- c(nameslags,rep(paste("Ylag",ii,sep=""),M))
colnames(Ylag) <- nameslags
texo <- FALSE; Mex <- 0; Exraw <- NULL
if(!is.null(Ex_in)){
Exraw <- Ex_in; Mex <- ncol(Exraw)
texo <- TRUE
colnames(Exraw) <- rep("Tex",Mex)
}
X <- cbind(Ylag,Exraw)
X <- X[(p+1):nrow(X),,drop=FALSE]
Y <- Yraw[(p+1):Traw,,drop=FALSE]
bigT <- nrow(X)
cons <- cons_in
if(cons){
X <- cbind(X,1)
colnames(X)[ncol(X)] <- "cons"
}
trend <- trend_in
if(trend){
X <- cbind(X,seq(1,bigT))
colnames(X)[ncol(X)] <- "trend"
}
k <- ncol(X)
n <- k*M
v <- (M*(M-1))/2
#---------------------------------------------------------------------------------------------------------
# HYPERPARAMETERS
#---------------------------------------------------------------------------------------------------------
hyperpara <- hyperparam_in
prior <- prior_in
sv <- sv_in
prmean <- hyperpara$prmean
a_1 <- hyperpara$a_1
b_1 <- hyperpara$b_1
# SV
Bsigma <- hyperpara$Bsigma
a0 <- hyperpara$a0
b0 <- hyperpara$b0
bmu <- hyperpara$bmu
Bmu <- hyperpara$Bmu
# other stuff
d1 <- hyperpara$d1
d2 <- hyperpara$d2
e1 <- hyperpara$e1
e2 <- hyperpara$e2
b_xi <- hyperpara$b_xi
b_tau <- hyperpara$b_tau
nu_xi <- hyperpara$nu_xi
nu_tau <- hyperpara$nu_tau
a_start <- hyperpara$a_start
sample_A <- hyperpara$sample_A
#---------------------------------------------------------------------------------------------------------
# OLS Quantitites
#---------------------------------------------------------------------------------------------------------
XtXinv <- try(solve(crossprod(X)),silent=TRUE)
if(is(XtXinv,"try-error")) XtXinv <- ginv(crossprod(X))
A_OLS <- XtXinv%*%(t(X)%*%Y)
E_OLS <- Y - X%*%A_OLS
S_OLS <- crossprod(E_OLS)/(bigT-k)
#---------------------------------------------------------------------------------------------------------
# Initial Values
#---------------------------------------------------------------------------------------------------------
A_draw <- array(A_OLS, c(bigT+1,k,M))
S_draw <- array(S_OLS, c(M,M,bigT))
Em_draw <- Em_str <- E_OLS
L_draw <- diag(M)
# time-varying stuff
Am_draw <- A_OLS
At_draw <- array(0, c(bigT+1, k, M))
theta_draw <- matrix(1, k, M)
theta_sqrt <- sqrt(theta_draw)
#---------------------------------------------------------------------------------------------------------
# PRIORS
#---------------------------------------------------------------------------------------------------------
# Priors on VAR coefs
#-----------------------------
# prior mean
A_prior <- matrix(0,2*k,M)
diag(A_prior) <- prmean
# prior variance
tau2.draw <- matrix(10,k,M)
xi2.draw <- matrix(10,k,M)
# NG stuff
lambda2 <- matrix(10,p,1)
a_tau <- matrix(a_start,p,1)
scale_tau <- rep(.43,p)
acc_tau <- rep(0,p)
kappa2 <- 10
a_xi <- a_start
scale_xi <- .43
acc_xi <- 0
#------------------------------------
# Priors on coefs in H matrix of VCV
#------------------------------------
# prior mean
l_prior <- matrix(0,M,M)
# prior variance
L_prior <- matrix(10,M,M)
L_prior[upper.tri(L_prior)] <- 0; diag(L_prior) <- 0
# NG
lambda2_L <- 10
a_L_tau <- a_start
scale_L_tau <- .43
acc_L_tau <- 0
#------------------------------------
# SV quantities
#------------------------------------
Sv_draw <- matrix(-3,bigT,M)
svdraw <- list(para=c(mu=-10,phi=.9,sigma=.2),latent=rep(-3,bigT))
svl <- list()
for (jj in 1:M) svl[[jj]] <- svdraw
pars_var <- matrix(c(-3,.9,.2,-3),4,M,dimnames=list(c("mu","phi","sigma","latent0"),NULL))
hv <- svdraw$latent
para <- list(mu=-3,phi=.9,sigma=.2)
Sv_priors <- specify_priors(mu=sv_normal(mean=bmu, sd=Bmu), phi=sv_beta(a0,b0), sigma2=sv_gamma(shape=0.5,rate=1/(2*Bsigma)))
eta <- list()
#---------------------------------------------------------------------------------------------------------
# SAMPLER MISCELLANEOUS
#---------------------------------------------------------------------------------------------------------
nsave <- draws_in
nburn <- burnin_in
ntot <- nsave+nburn
# thinning
thin <- thin_in
count <- 0
thindraws <- nsave/thin
thin.draws <- seq(nburn+1,ntot,by=thin)
#---------------------------------------------------------------------------------------------------------
# STORAGES
#---------------------------------------------------------------------------------------------------------
A_store <- array(NA,c(thindraws,bigT+1,k,M))
Am_store <- array(NA,c(thindraws,k,M))
At_store <- array(NA,c(thindraws,bigT+1,k,M))
L_store <- array(NA,c(thindraws,M,M))
res_store <- array(NA,c(thindraws,bigT,M))
Sv_store <- array(NA,c(thindraws,bigT,M))
pars_store <- array(NA,c(thindraws,4,M))
# # NG
tau2_store <- array(NA,c(thindraws,k,M))
xi2_store <- array(NA,c(thindraws,k,M))
lambda2_store<- array(NA,c(thindraws,p,1))
kappa2_store <- array(NA,c(thindraws,1,1))
a_xi_store <- array(NA,c(thindraws,1,1))
a_tau_store <- array(NA,c(thindraws,p,1))
#---------------------------------------------------------------------------------------------------------
# MCMC LOOP
#---------------------------------------------------------------------------------------------------------
for (irep in 1:ntot){
#----------------------------------------------------------------------------
# Step 1: Sample coefficients
for (mm in 1:M){
if(mm==1){
Ystar <- (Y[,mm]-X%*%Am_draw)*exp(-0.5*Sv_draw[,mm])
Fstar <- (X%*%diag(theta_sqrt[,mm]))*exp(-0.5*Sv_draw[,mm])
At_draw[,,mm] <- sample_McCausland(Ystar, Fstar)
Y.i <- Y[,mm]*exp(-0.5*Sv_draw[,mm])
Z.i <- cbind(X,hadamard.prod(X,At_draw[2:(bigT+1),,mm]))*exp(-0.5*Sv_draw[,mm])
Vpriorinv <- diag(1/c(tau2.draw[,mm],xi2.draw[,mm]))
V_post <- try(chol2inv(chol(crossprod(Z.i)+Vpriorinv)),silent=TRUE)
if (is(V_post,"try-error")) V_post <- ginv(crossprod(Z.i)+Vpriorinv)
A_post <- V_post%*%(crossprod(Z.i,Y.i)+Vpriorinv%*%A_prior[,mm])
A.draw.i <- try(A_post+t(chol(V_post))%*%rnorm(ncol(Z.i)),silent=TRUE)
if (is(A.draw.i,"try-error")) A.draw.i <- matrix(mvrnorm(1,A_post,V_post),ncol(Z.i),1)
Am_draw[,mm] <- A.draw.i[1:k,,drop=FALSE]
theta_sqrt[,mm] <- A.draw.i[(k+1):(2*k),,drop=FALSE]
# compute errors
Em_draw[,mm] <- Em_str[,mm] <- Y[,mm] - X%*%Am_draw[,mm] - apply(hadamard.prod(X,At_draw[2:(bigT+1),,mm]%*%diag(theta_sqrt[,mm])),1,sum)
}else{
Ystar <- (Y[,mm]-X%*%Am_draw)*exp(-0.5*Sv_draw[,mm])
Fstar <- (X%*%diag(theta_sqrt[,mm]))*exp(-0.5*Sv_draw[,mm])
At_draw[,,mm] <- sample_McCausland(Ystar, Fstar)
Y.i <- Y[,mm]*exp(-0.5*Sv_draw[,mm])
Z.i <- cbind(X,hadamard.prod(X,At_draw[2:(bigT+1),,mm]),Em_draw[,1:(mm-1)])*exp(-0.5*Sv_draw[,mm])
Vpriorinv <- diag(1/c(tau2.draw[,mm],xi2.draw[,mm],L_prior[mm,1:(mm-1)]))
V_post <- try(chol2inv(chol((crossprod(Z.i)+Vpriorinv))),silent=TRUE)
if (is(V_post,"try-error")) V_post <- ginv((crossprod(Z.i)+Vpriorinv))
A_post <- V_post%*%(crossprod(Z.i,Y.i)+Vpriorinv%*%c(A_prior[,mm],l_prior[mm,1:(mm-1)]))
A.draw.i <- try(A_post+t(chol(V_post))%*%rnorm(ncol(Z.i)),silent=TRUE)
if (is(A.draw.i,"try-error")) A.draw.i <- matrix(mvrnorm(1,A_post,V_post),ncol(Z.i),1)
Am_draw[,mm] <- A.draw.i[1:k,,drop=FALSE]
theta_sqrt[,mm] <- A.draw.i[(k+1):(2*k),,drop=FALSE]
L_draw[mm,1:(mm-1)] <- A.draw.i[(2*k+1):ncol(Z.i),,drop=FALSE]
# compute errors
Em_draw[,mm] <- Y[,mm]-X%*%Am_draw[,mm]-apply(hadamard.prod(X,At_draw[2:(bigT+1),,mm]%*%diag(theta_sqrt[,mm])),1,sum)
Em_str[,mm] <- Y[,mm]-X%*%Am_draw[,mm]-apply(hadamard.prod(X,At_draw[2:(bigT+1),,mm]%*%diag(theta_sqrt[,mm])),1,sum)-Em_draw[,1:(mm-1),drop=FALSE]%*%t(L_draw[mm,1:(mm-1),drop=FALSE])
}
}
rownames(Am_draw) <- colnames(X)
theta_draw <- theta_sqrt^2
#----------------------------------------------------------------------------
# Step 3: Interweaving
theta_sign <- sign(theta_sqrt)
for(mm in 1:M){
A_draw[,,mm] <- matrix(Am_draw[,mm],bigT+1,k,byrow=TRUE) + At_draw[,,mm]%*%diag(theta_sqrt[,mm])
A_diff <- diff(At_draw[,,mm]%*%diag(theta_sqrt[,mm]))
for(kk in 1:k){
#theta.new
res <- do_rgig1(lambda=-bigT/2,
chi=sum(A_diff[,kk]^2)+(A_draw[1,kk,mm]-Am_draw[kk,mm])^2,
psi=1/xi2.draw[kk,mm])
theta_draw[kk,mm] <- res
theta_sqrt[kk,mm] <- sqrt(res)*theta_sign[kk,mm]
# Am_new
sigma2_A_mean <- 1/((1/tau2.draw[kk,mm]) + (1/theta_draw[kk,mm]))
mu_A_mean <- A_draw[1,kk,mm]*tau2.draw[kk,mm]/(tau2.draw[kk,mm] + theta_draw[kk,mm])
Am_draw[kk,mm] <- rnorm(1, mu_A_mean, sqrt(sigma2_A_mean))
}
At_draw[,,mm] <- sapply(1:k,function(kk)A_draw[,kk,mm]-Am_draw[kk,mm])%*%diag(1/theta_sqrt[,mm])
}
#----------------------------------------------------------------------------
# Step 4a: Shrinkage priors on state variances
kappa2 <- rgamma(1, d1+a_xi*k, d2+0.5*k*a_xi*mean(xi2.draw))
for(ii in 1:k){
for(jj in 1:M){
xi2.draw[ii,jj] <- do_rgig1(lambda=a_xi-0.5, chi=theta_draw[ii,jj], psi=a_xi*kappa2)
}
}
xi2.draw[xi2.draw<1e-7] <- 1e-7
if(sample_A){
before <- a_xi
a_xi <- MH_step(a_xi, scale_xi, k, kappa2, as.vector(theta_sqrt), b_xi, nu_xi, d1, d2)
if(before!=a_xi){
acc_xi <- acc_xi + 1
}
# scale MH proposal during the first 50% of the burn-in stage
if(irep<(0.5*burnin)){
if((acc_xi/irep)>0.30){scale_xi <- 1.01*scale_xi}
if((acc_xi/irep)<0.15){scale_xi <- 0.99*scale_xi}
}
}
# Step 4b: Shrinkage prior on mean (multiplicative Gamma prior)
for(pp in 1:p){
slct.i <- which(rownames(Am_draw)==paste("Ylag",pp,sep=""))
if(pp==1 & cons) slct.i <- c(slct.i,which(rownames(Am_draw)=="cons"))
if(pp==1 & trend) slct.i <- c(slct.i,which(rownames(Am_draw)=="trend"))
Am_lag.i <- Am_draw[slct.i,,drop=FALSE]
A_prior.i <- A_prior[slct.i,,drop=FALSE]
tau2.i <- tau2.draw[slct.i,,drop=FALSE]
if(pp==1){
lambda2[pp,1] <- rgamma(1, e1+a_tau[pp,1]*M^2, e2+0.5*a_tau[pp,1]*mean(tau2.i))
}else{
lambda2[pp,1] <- rgamma(1, e1+a_tau[pp,1]*M^2, e2+0.5*a_tau[pp,1]*prod(lambda2[1:(pp-1)])*mean(tau2.i))
}
Mend <- M + ifelse(pp==1&cons,1,0) + ifelse(pp==1&trend,1,0)
for(ii in 1:Mend){
for(jj in 1:M){
tau2.i[ii,jj] <- do_rgig1(lambda=a_tau[pp,1]-0.5, chi=(Am_lag.i[ii,jj]-A_prior.i[ii,jj])^2, psi=a_tau[pp,1]*prod(lambda2[1:pp,1]))
}
}
tau2.i[tau2.i<1e-7] <- 1e-7
if(sample_A){
before <- a_tau[pp,1]
a_tau[pp,1] <- MH_step(a_tau[pp,1], scale_xi[pp], M^2, lambda2[pp,1], as.vector(Am_lag.i), b_tau, nu_tau, e1, e2)
if(before!=a_tau[pp,1]){
acc_tau[pp] <- acc_tau[pp] + 1
}
# scale MH proposal during the first 50% of the burn-in stage
if(irep<(0.5*burnin)){
if((acc_tau[pp]/irep)>0.30){scale_xi[pp] <- 1.01*scale_xi[pp]}
if((acc_tau[pp]/irep)<0.15){scale_xi[pp] <- 0.99*scale_xi[pp]}
}
}
tau2.draw[slct.i,] <- tau2.i
}
# Step 4c: Shrinkage prior on covariances
lambda2_L <- rgamma(1, e1+a_L_tau*v, e2+0.5*v*a_L_tau*mean(L_prior[lower.tri(L_prior)]))
for(ii in 2:M){
for(jj in 1:(ii-1)){
res <- do_rgig1(lambda=a_L_tau-0.5, chi=(L_draw[mm,ii]-l_prior[mm,ii])^2, psi=a_L_tau*lambda2_L)
L_prior[ii,jj] <- ifelse(res<1e-7,1e-7,res)
}
}
if(sample_A){
before <- a_L_tau
a_L_tau <- MH_step(a_L_tau, scale_L_tau, v, lambda2_L, L_draw[lower.tri(L_draw)], b_tau, nu_tau, e1, e2)
if(before!=a_L_tau){
acc_L_tau <- acc_L_tau + 1
}
# scale MH proposal during the first 50% of the burn-in stage
if(irep<(0.5*burnin)){
if((acc_L_tau/irep)>0.30){scale_L_tau <- 1.01*scale_L_tau}
if((acc_L_tau/irep)<0.15){scale_L_tau <- 0.99*scale_L_tau}
}
}
#----------------------------------------------------------------------------
# Step 5: Sample variances
if (sv){
for (jj in 1:M){
para <- as.list(pars_var[,jj])
para$nu = Inf; para$rho=0; para$beta<-0
svdraw <- svsample_fast_cpp(y=Em_str[,jj], draws=1, burnin=0, designmatrix=matrix(NA_real_),
priorspec=Sv_priors, thinpara=1, thinlatent=1, keeptime="all",
startpara=para, startlatent=Sv_draw[,jj],
keeptau=FALSE, print_settings=list(quiet=TRUE, n_chains=1, chain=1),
correct_model_misspecification=FALSE, interweave=TRUE, myoffset=0,
fast_sv=default_fast_sv)
svl[[jj]] <- svdraw
h_ <- exp(svdraw$latent[1,])
para$mu <- svdraw$para[1,"mu"]
para$phi <- svdraw$para[1,"phi"]
para$sigma <- svdraw$para[1,"sigma"]
para$latent0 <- svdraw$latent0[1,"h_0"]
pars_var[,jj] <- unlist(para[c("mu","phi","sigma","latent0")])
Sv_draw[,jj] <- log(h_)
}
}else{
for (jj in 1:M){
S_1 <- a_1+bigT/2
S_2 <- b_1+crossprod(Em_str[,jj])/2
sig_eta <- 1/rgamma(1,S_1,S_2)
Sv_draw[,jj] <- log(sig_eta)
}
}
#-------------------------------------------------------------------------#
# STEP 6: RANDOM SIGN SWITCH
for(mm in 1:M){
for(kk in 1:k){
if(runif(1,0,1)>0.5){
theta_sqrt[kk,mm] <- -theta_sqrt[kk,mm]
}
}
}
#----------------------------------------------------------------------------
# Step 7: store draws
if(irep %in% thin.draws){
count <- count+1
A_store[count,,,] <- A_draw
L_store[count,,] <- L_draw
res_store[count,,] <- Em_draw
# SV
Sv_store[count,,] <- Sv_draw
pars_store[count,,] <- pars_var
# NG
tau2_store[count,,] <- tau2.draw
xi2_store[count,,] <- xi2.draw
lambda2_store[count,,] <- lambda2
kappa2_store[count,,] <- kappa2
a_xi_store[count,,] <- a_xi
a_tau_store[count,,] <- a_tau
}
}
#---------------------------------------------------------------------------------------------------------
# END ESTIMATION
#---------------------------------------------------------------------------------------------------------
dimnames(A_store)=list(NULL,paste("t",seq(0,bigT),sep="."),colnames(X),colnames(A_OLS))
ret <- list(Y=Y,X=X,A_store=A_store,L_store=L_store,Sv_store=Sv_store,pars_store=pars_store,res_store=res_store,
tau2_store=tau2_store,xi2_store=xi2_store,lambda2_store=lambda2_store,kappa2_store=kappa2_store,a_xi_store=a_xi_store,a_tau_store=a_tau_store)
return(ret)
}
|
/R/utils_tvpbvar.R
|
no_license
|
MacroFinanceHub/BTSM
|
R
| false
| false
| 62,780
|
r
|
#' @name .TVPBVAR_linear_wrapper
#' @noRd
#' @importFrom abind adrop
#' @importFrom utils capture.output
.TVPBVAR_linear_wrapper <- function(Yraw, prior, plag, draws, burnin, cons, trend, SV, thin, default_hyperpara, Ex, applyfun, cores, eigen, trim){
class(Yraw) <- "numeric"
prior_in <- prior
if(default_hyperpara[["a_log"]]) default_hyperpara["a_start"] <- 1/log(ncol(Yraw))
if(prior=="TVP" || prior=="TVP-NG"){
prior_in <- ifelse(prior=="TVP",1,2)
post_draws <- applyfun(1:ncol(Yraw), function(nr){
.TVPBVAR_noncentered_R(nr=nr,Y_in=Yraw,p_in=plag,draws_in=draws,burnin_in=burnin,cons_in=cons,trend_in=trend,sv_in=SV,thin_in=thin,prior_in=prior_in,hyperparam_in=default_hyperpara,Ex_in=Ex)
})
tvpbvar <- .var_posterior(post_draws, prior, draws/thin, applyfun, cores)
}else if(prior=="TTVP"){
prior_in <- 3
post_draws <- applyfun(1:ncol(Yraw), function(nr){
.TVPBVAR_centered_R(nr=nr,Y_in=Yraw,p_in=plag,draws_in=draws,burnin_in=burnin,cons_in=cons,trend_in=trend,sv_in=SV,thin_in=thin,prior_in=prior_in,hyperparam_in=default_hyperpara,Ex_in=Ex)
})
tvpbvar <- .var_posterior(post_draws, prior, draws/thin, applyfun, cores)
}
#------------------------------------------------ get data ----------------------------------------#
Y <- tvpbvar$Y; colnames(Y) <- colnames(Yraw); X <- tvpbvar$X
M <- ncol(Y); bigT <- nrow(Y); K <- ncol(X)
if(!is.null(Ex)) Mex <- ncol(Ex)
names <- colnames(Yraw)
if(is.null(names)) names <- rep("Y",M)
xnames <-NULL
for(ii in 1:plag) xnames <- c(xnames,paste0(names,".lag",ii))
if(!is.null(Ex)) enames <- c(enames,paste(rep("Tex",Mex))) else enames <- NULL
if(cons) cnames <- "cons" else cnames <- NULL
if(trend) tnames <- "trend" else tnames <- NULL
colnames(X) <- c(xnames,enames,cnames,tnames)
#-----------------------------------------get containers ------------------------------------------#
A_store <- tvpbvar$A_store; dimnames(A_store)[[2]] <- paste("t",seq(1,bigT),sep="."); dimnames(A_store)[[3]] <- colnames(X); dimnames(A_store)[[4]] <- colnames(Y)
# splitting up stores
dims <- dimnames(A_store)[[3]]
a0_store <- a1_store <- Ex_store <- Phi_store <- NULL
if(cons) a0_store <- A_store[,,which(dims%in%cnames),]
if(trend) a1_store <- A_store[,,which(dims%in%tnames),]
if(!is.null(Ex)) Ex_store <- A_store[,which(dims%in%enames),,drop=FALSE]
for(jj in 1:plag) {
xnames.jj <- xnames[grepl(paste0("lag",jj),xnames)]
Phi_store[[jj]] <- A_store[,,which(dims%in%xnames.jj),]
dimnames(Phi_store[[jj]]) <- list(NULL,paste("t",seq(1,bigT),sep="."),xnames.jj,names)
}
L_store <- tvpbvar$L_store
S_store <- tvpbvar$S_store
Smed_store <- tvpbvar$Smed_store
vola_store <- tvpbvar$Sv_store; dimnames(vola_store) <- list(NULL,NULL,colnames(Y))
if(SV){
pars_store <- tvpbvar$pars_store; dimnames(pars_store) <- list(NULL,c("mu","phi","sigma","latent0"),colnames(Y))
}else pars_store <- NULL
res_store <- tvpbvar$res_store; dimnames(res_store) <- list(NULL,NULL,colnames(Y))
# NG
if(prior=="TVP"){
thetasqrt_store<- tvpbvar$thetasqrt_store
Lthetasqrt_store<-tvpbvar$Lthetasqrt_store
tau2_store<-xi2_store<-lambda2_store<-kappa2_store<-a_tau_store<-a_xi_store<-Ltau2_store<-Lxi2_store <- NULL
D_store<-Omega_store<-thrsh_store<-kappa_store<-V0_store<-LD_store<-LOmega_store<-Lthrsh_store<-LV0_store <- NULL
}else if(prior=="TVP-NG"){
D_store<-Omega_store<-thrsh_store<-kappa_store<-V0_store<-LD_store<-LOmega_store<-Lthrsh_store<-LV0_store<-NULL
thetasqrt_store <- tvpbvar$thetasqrt_store
tau2_store <- tvpbvar$tau2_store
xi2_store <- tvpbvar$xi2_store
lambda2_store <- tvpbvar$lambda2_store
kappa2_store <- tvpbvar$kappa2_store
a_tau_store <- tvpbvar$a_tau_store
a_xi_store <- tvpbvar$a_xi_store
Lthetasqrt_store<-tvpbvar$Lthetasqrt_store
Ltau2_store <- tvpbvar$Ltau2_store
Lxi2_store <- tvpbvar$Lxi2_store
}else if(prior=="TTVP"){
thetasqrt_store<-Lthetasqrt_store<-tau2_store<-xi2_store<-lambda2_store<-kappa2_store<-a_tau_store<-a_xi_store<-Ltau2_store<-Lxi2_store<-NULL
D_store <- tvpbvar$D_store
Omega_store <- tvpbvar$Omega_store
thrsh_store <- tvpbvar$thrsh_store
kappa_store <- tvpbvar$kappa_store
V0_store <- tvpbvar$V0_store
LD_store <- tvpbvar$LD_store
LOmega_store <- tvpbvar$LOmega_store
Lthrsh_store <- tvpbvar$Lthrsh_store
LV0_store <- tvpbvar$LV0_store
}
if(eigen){
# check medians: could be done more carefully
A.eigen <- unlist(applyfun(1:(draws/thin),function(irep){
Cm <- .gen_compMat(apply(A_store[irep,,,],c(2,3),median),ncol(Yraw),plag)$Cm
return(max(abs(Re(eigen(Cm)$values))))
}))
trim_eigen <- which(A.eigen<trim)
if(length(trim_eigen)==0) stop("No stable draws found. Either increase number of draws or trimming factor.")
A_store<-A_store[trim_eigen,,,,drop=FALSE]
if(cons) a0_store <- a0_store[trim_eigen,,,drop=FALSE]
if(trend) a1_store <- a1_store[trim_eigen,,,drop=FALSE]
if(!is.null(Ex)) Ex_store <- Ex_store[trim_eigen,,,,drop=FALSE]
Phi_store<-lapply(Phi_store,function(l)l[trim_eigen,,,,drop=FALSE])
L_store<-L_store[trim_eigen,,,,drop=FALSE]
S_store<-S_store[trim_eigen,,,,drop=FALSE]
Smed_store<-Smed_store[trim_eigen,,,drop=FALSE]
vola_store<-vola_store[trim_eigen,,,drop=FALSE]
if(SV) pars_store<-pars_store[trim_eigen,,,drop=FALSE]
res_store<-res_store[trim_eigen,,,drop=FALSE]
if(prior=="TVP"){
thetasqrt_store<-thetasqrt_store[trim_eigen,,,drop=FALSE]
Lthetasqrt_store<-lapply(Lthetasqrt_store,function(l)l[trim_eigen,,drop=FALSE])
}
if(prior=="TVP-NG"){
thetasqrt_store<-thetasqrt_store[trim_eigen,,,drop=FALSE]
tau2_store<-tau2_store[trim_eigen,,,drop=FALSE]
xi2_store<-xi2_store[trim_eigen,,,drop=FALSE]
lambda2_store<-lambda2_store[trim_eigen,,,drop=FALSE]
kappa2_store<-kappa2_store[trim_eigen,,,drop=FALSE]
a_tau_store<-a_tau_store[trim_eigen,,,drop=FALSE]
a_xi_store<-a_xi_store[trim_eigen,,,drop=FALSE]
Lthetasqrt_store<-lapply(Lthetasqrt_store,function(l)l[trim_eigen,,drop=FALSE])
Ltau2_store<-lapply(Ltau2_store,function(l)l[trim_eigen,,drop=FALSE])
Lxi2_store<-lapply(Lxi2_store,function(l)l[trim_eigen,,drop=FALSE])
}else if(prior=="TTVP"){
D_store<-D_store[trim_eigen,,,,drop=FALSE]
Omega_store<-Omega_store[trim_eigen,,,,drop=FALSE]
thrsh_store<-thrsh_store[trim_eigen,,,drop=FALSE]
kappa_store<-kappa_store[trim_eigen,,,drop=FALSE]
V0_store<-V0_store[trim_eigen,,,drop=FALSE]
LD_store<-lapply(LD_store,function(l)l[trim_eigen,,,drop=FALSE])
LOmega_store<-lapply(LOmega_store,function(l)l[trim_eigen,,,drop=FALSE])
Lthrsh_store<-lapply(Lthrsh_store,function(l)l[trim_eigen,,drop=FALSE])
LV0_store<-lapply(LV0_store,function(l)l[trim_eigen,,drop=FALSE])
}
}else{A.eigen<-NULL}
store <- list(A_store=A_store,a0_store=a0_store,a1_store=a1_store,Phi_store=Phi_store,Ex_store=Ex_store,S_store=S_store,Smed_store=Smed_store,L_store=L_store,Lthetasqrt_store=Lthetasqrt_store,
vola_store=vola_store,pars_store=pars_store,res_store=res_store,thetasqrt_store=thetasqrt_store,tau2_store=tau2_store,xi2_store=xi2_store,lambda2_store=lambda2_store,
kappa2_store=kappa2_store,a_tau_store=a_tau_store,a_xi_store=a_xi_store,Ltau2_store=Ltau2_store,Lxi2_store=Lxi2_store,D_store=D_store,
Omega_store=Omega_store,thrsh_store=thrsh_store,kappa_store=kappa_store,V0_store=V0_store,LD_store=LD_store,LOmega_store=LOmega_store,
Lthrsh_store=Lthrsh_store,LV0_store=LV0_store,A.eigen=A.eigen)
#------------------------------------ compute posteriors -------------------------------------------#
A_post <- apply(A_store,c(2,3,4),median)
L_post <- apply(L_store,c(2,3,4),median)
S_post <- apply(S_store,c(2,3,4),median)
Smed_post <- apply(Smed_store,c(2,3),median)
Sig <- apply(S_post,c(2,3),mean)/(bigT-K)
res_post <- apply(res_store,c(2,3),median)
# splitting up posteriors
a0_post <- a1_post <- Ex_post <- NULL
if(cons) a0_post <- A_post[,which(dims=="cons"),,drop=FALSE]
if(trend) a1_post <- A_post[,which(dims=="trend"),,drop=FALSE]
if(!is.null(Ex)) Ex_post <- A_post[,which(dims=="Tex"),,drop=FALSE]
Phi_post<- NULL
for(jj in 1:plag){
Phi_post[[jj]] <- A_post[,which(dims==paste("Ylag",jj,sep="")),,drop=FALSE]
}
vola_post <- apply(vola_store,c(2,3),median); dimnames(vola_post) <- list(NULL,colnames(Y))
if(SV){
pars_post <- apply(pars_store,c(2,3),median); dimnames(pars_post) <- list(c("mu","phi","sigma","latent0"),colnames(Y))
}else pars_post <- NULL
if(prior=="TVP"){
thetasqrt_post<-apply(thetasqrt_store,c(2,3),median)
Lthetasqrt_post<-lapply(Lthetasqrt_store,function(l)apply(l,2,median))
tau2_post<-xi2_post<-lambda2_post<-kappa2_post<-a_tau_post<-a_xi_post<-Ltau2_post<-Lxi2_post<-NULL
D_post<-Omega_post<-thrsh_post<-kappa_post<-V0_post<-LD_post<-LOmega_post<-Lthrsh_post<-LV0_post<-NULL
}else if(prior=="TVP-NG"){
D_post<-Omega_post<-thrsh_post<-kappa_post<-V0_post<-LD_post<-LOmega_post<-Lthrsh_post<-LV0_post<-NULL
thetasqrt_post<-apply(thetasqrt_store,c(2,3),median)
tau2_post <- apply(tau2_store,c(2,3),median)
xi2_post <- apply(xi2_store,c(2,3),median)
lambda2_post <- apply(lambda2_store,c(2,3),median)
kappa2_post <- apply(kappa2_store,c(2,3),median)
a_tau_post <- apply(a_tau_store,c(2,3),median)
a_xi_post <- apply(a_xi_store,c(2,3),median)
Lthetasqrt_post<-lapply(Lthetasqrt_store,function(l)apply(l,2,median))
Ltau2_post <- lapply(Ltau2_store,function(l)apply(l,c(2),median))
Lxi2_post <- lapply(Lxi2_store,function(l)apply(l,c(2),median))
}else if(prior=="TTVP"){
thetasqrt_post<-Lthetasqrt_post<-tau2_post<-xi2_post<-lambda2_post<-kappa2_post<-a_tau_post<-a_xi_post<-Ltau2_post<-Lxi2_post<-NULL
D_post <- apply(D_store,c(2,3.4),median)
Omega_post <- apply(Omega_store,c(2,3,4),median)
thrsh_post <- apply(thrsh_store,c(2,3),median)
kappa_post <- apply(kappa_store,c(2,3),median)
V0_post <- apply(V0_store,c(2,3),median)
LD_post <- lapply(LD_store,function(l)apply(l,c(2,3),median))
LOmega_post <- lapply(LOmega_store,function(l)apply(l,c(2,3),median))
Lthrsh_post <- lapply(Lthrsh_store,function(l)apply(l,2,median))
LV0_post <- lapply(LV0_store,function(l)apply(l,2,median))
}
post <- list(A_post=A_post,a0_post=a0_post,a1_post=a1_post,Phi_post=Phi_post,Ex_post=Ex_post,S_post=S_post,Smed_post=Smed_post,L_post=L_post,Lthetasqrt_post=Lthetasqrt_post,
vola_post=vola_post,pars_post=pars_post,res_post=res_post,tau2_post=tau2_post,thetasqrt_post=thetasqrt_post,xi2_post=xi2_post,lambda2_post=lambda2_post,
kappa2_post=kappa2_post,a_tau_post=a_tau_post,a_xi_post=a_xi_post,Ltau2_post=Ltau2_post,Lxi2_post=Lxi2_post,D_post=D_post,
Omega_post=Omega_post,thrsh_post=thrsh_post,kappa_post=kappa_post,V0_post=V0_post,LD_post=LD_post,LOmega_post=LOmega_post,
Lthrsh_post=Lthrsh_post,LV0_post=LV0_post)
return(list(Y=Y,X=X,store=store,post=post))
}
#' @name .TVPBVAR_noncentered_R.m
#' @importFrom stochvol svsample_fast_cpp specify_priors default_fast_sv sv_normal sv_beta sv_gamma
#' @importFrom MASS ginv mvrnorm
#' @importFrom matrixcalc hadamard.prod
#' @importFrom methods is
#' @importFrom stats rnorm rgamma runif dnorm
#' @noRd
.TVPBVAR_noncentered_R <- function(nr,Y_in,p_in,draws_in,burnin_in,cons_in,trend_in,sv_in,thin_in,quiet_in,prior_in,hyperparam_in,Ex_in){
#----------------------------------------INPUTS----------------------------------------------------#
Yraw <- Y_in
p <- p_in
Traw <- nrow(Yraw)
M <- ncol(Yraw)
K <- M*p
Ylag <- .mlag(Yraw,p)
names <- colnames(Yraw)
if(is.null(names)) names <- rep("Y",M)
colnames(Yraw) <- names
nameslags <- NULL
for(ii in 1:p) nameslags <- c(nameslags,paste0(names,".lag",ii))
colnames(Ylag) <- nameslags
texo <- FALSE; Mex <- 0; Exraw <- NULL; enames <- NULL
if(!is.null(Ex_in)){
Exraw <- Ex_in; Mex <- ncol(Exraw); texo <- TRUE
enames <- colnames(Exraw)
if(is.null(enames)) enames <- rep("Tex",Mex)
colnames(Exraw) <- enames
}
if(nr==1) slct <- NULL else slct <- 1:(nr-1)
Xraw <- cbind(Yraw[,slct],Ylag,Exraw)
colnames(Xraw) <- c(colnames(Yraw)[slct],nameslags,enames)
X <- Xraw[(p+1):nrow(Xraw),,drop=FALSE]
y <- Yraw[(p+1):Traw,nr,drop=FALSE]
bigT <- nrow(X)
M_ <- M-length(slct)
cons <- cons_in
if(cons){
X <- cbind(X,1)
colnames(X)[ncol(X)] <- "cons"
}
trend <- trend_in
if(trend){
X <- cbind(X,seq(1,bigT))
colnames(X)[ncol(X)] <- "trend"
}
d <- ncol(X)
n <- d*M
v <- (M*(M-1))/2
#---------------------------------------------------------------------------------------------------------
# HYPERPARAMETERS
#---------------------------------------------------------------------------------------------------------
hyperpara <- hyperparam_in
prior <- prior_in
sv <- sv_in
prmean <- hyperpara$prmean
# non-SV
c0 <- hyperpara$c0
g0 <- hyperpara$g0
# SV
bmu <- hyperpara$bmu
Bmu <- hyperpara$Bmu
a0 <- hyperpara$a0
b0 <- hyperpara$b0
Bsigma <- hyperpara$Bsigma
# TVP-NG
d1 <- hyperpara$d1
d2 <- hyperpara$d2
e1 <- hyperpara$e1
e2 <- hyperpara$e2
b_xi <- hyperpara$b_xi
b_tau <- hyperpara$b_tau
nu_xi <- hyperpara$nu_xi
nu_tau <- hyperpara$nu_tau
a_start <- hyperpara$a_start
sample_A <- hyperpara$sample_A
#---------------------------------------------------------------------------------------------------------
# OLS Quantitites
#---------------------------------------------------------------------------------------------------------
XtXinv <- try(solve(crossprod(X)),silent=TRUE)
if(is(XtXinv,"try-error")) XtXinv <- ginv(crossprod(X))
A_OLS <- XtXinv%*%(t(X)%*%y)
E_OLS <- y - X%*%A_OLS
S_OLS <- crossprod(E_OLS)/(bigT-d)
#---------------------------------------------------------------------------------------------------------
# Initial Values
#---------------------------------------------------------------------------------------------------------
A_draw <- matrix(A_OLS, bigT+1, d, byrow=TRUE, dimnames=list(NULL,colnames(X)))
S_draw <- matrix(S_OLS, bigT, 1)
# time-varying stuff
Am_draw <- A_OLS
At_draw <- matrix(0, bigT+1, d)
theta_draw <- rep(1,d)
theta_sqrt <- sqrt(theta_draw)
#---------------------------------------------------------------------------------------------------------
# PRIORS
#---------------------------------------------------------------------------------------------------------
# Priors on VAR coefs
#-----------------------------
# prior mean
A_prior <- matrix(0,2*d, 1)
A_prior[2*nr-1,1] <- prmean
# prior variance
tau2_draw <- rep(10,d)
xi2_draw <- rep(10,d)
# NG stuff
lambda2 <- 10
a_tau <- a_start
scale_tau <- .43
acc_tau <- 0
kappa2 <- 10
a_xi <- a_start
scale_xi <- .43
acc_xi <- 0
#------------------------------------
# SV quantities
#------------------------------------
svdraw <- list(para=c(mu=-10,phi=.9,sigma=.2,latent0=-3),latent=rep(-3,bigT))
Sv_draw <- svdraw$latent
pars_var <- matrix(c(-3,.9,.2,-3),4,1,dimnames=list(c("mu","phi","sigma","latent0"),NULL))
Sv_priors <- specify_priors(mu=sv_normal(mean=bmu, sd=Bmu), phi=sv_beta(a0,b0), sigma2=sv_gamma(shape=0.5,rate=1/(2*Bsigma)))
#-----------------------------------
# non-SV quantities
#-----------------------------------
sig_eta <- exp(-3)
G0 <- g0/S_OLS*(c0-1)
#---------------------------------------------------------------------------------------------------------
# SAMPLER MISCELLANEOUS
#---------------------------------------------------------------------------------------------------------
nsave <- draws_in
nburn <- burnin_in
ntot <- nsave+nburn
# thinning
thin <- thin_in
count <- 0
thindraws <- nsave/thin
thin.draws <- seq(nburn+1,ntot,by=thin)
#---------------------------------------------------------------------------------------------------------
# STORAGES
#---------------------------------------------------------------------------------------------------------
A_store <- array(NA,c(thindraws,bigT+1,d))
Am_store <- array(NA,c(thindraws,d,1))
At_store <- array(NA,c(thindraws,bigT+1,d))
res_store <- array(NA,c(thindraws,bigT,1))
Sv_store <- array(NA,c(thindraws,bigT,1))
pars_store <- array(NA,c(thindraws,4,1))
# state variances
thetasqrt_store <- array(NA,c(thindraws,d,1))
# TVP-NG
tau2_store <- array(NA,c(thindraws,d,1))
xi2_store <- array(NA,c(thindraws,d,1))
lambda2_store<- array(NA,c(thindraws,p,1))
kappa2_store <- array(NA,c(thindraws,1,1))
a_xi_store <- array(NA,c(thindraws,1,1))
a_tau_store <- array(NA,c(thindraws,p,1))
#---------------------------------------------------------------------------------------------------------
# MCMC LOOP
#---------------------------------------------------------------------------------------------------------
for (irep in 1:ntot){
#----------------------------------------------------------------------------
# Step 0: Normalize data
Xt <- apply(X,2,function(x)x*exp(-0.5*Sv_draw))
yt <- y*exp(-0.5*Sv_draw)
#----------------------------------------------------------------------------
# Step 1: Sample coefficients
Zt <- cbind(Xt,hadamard.prod(Xt,At_draw[2:(bigT+1),]))
Vpriorinv <- diag(1/c(tau2_draw,xi2_draw))
# V_post <- try(chol2inv(chol(crossprod(Zt)+Vpriorinv)),silent=TRUE)
# if (is(V_post,"try-error")) V_post <- ginv(crossprod(Zt)+Vpriorinv)
# alternative a la supplementary bitto/sfs s.3
Vpriorsqrt <- diag(c(sqrt(tau2_draw),sqrt(xi2_draw)))
V_poststar <- solve(Vpriorsqrt%*%crossprod(Zt)%*%Vpriorsqrt + diag(2*d))
V_post <- Vpriorsqrt%*%V_poststar%*%Vpriorsqrt
A_post <- V_post%*%(crossprod(Zt,yt)+Vpriorinv%*%A_prior)
alph_draw <- try(A_post+t(chol(V_post))%*%rnorm(ncol(Zt)),silent=TRUE)
if (is(alph_draw,"try-error")) alph_draw <- matrix(mvrnorm(1,A_post,V_post),ncol(Zt),1)
Am_draw <- alph_draw[1:d,,drop=FALSE]
theta_sqrt <- alph_draw[(d+1):(2*d),,drop=TRUE]
theta_draw <- theta_sqrt^2
#----------------------------------------------------------------------------
# Step 2: Sample TVP-coef
ystar <- yt - Xt%*%Am_draw
Fstar <- Xt%*%diag(theta_sqrt)
At_draw <- sample_McCausland(ystar, Fstar)
#----------------------------------------------------------------------------
# Step 3: Interweaving
theta_sign <- sign(theta_sqrt)
A_draw <- matrix(Am_draw,bigT+1,d,byrow=TRUE) + At_draw%*%diag(theta_sqrt)
A_diff <- diff(At_draw%*%diag(theta_sqrt))
#A_diff <- diff(A_draw) # same as line above
for(dd in 1:d){
# theta.new
res <- do_rgig1(lambda=-bigT/2,
chi=sum(A_diff[,dd]^2)+(A_draw[1,dd]-Am_draw[dd,1])^2,
psi=1/xi2_draw[dd])
theta_draw[dd] <- res
theta_sqrt[dd] <- sqrt(res)*theta_sign[dd]
# betam.new
sigma2_A_mean <- 1/((1/tau2_draw[dd]) + (1/theta_draw[dd]))
mu_A_mean <- A_draw[1,dd]*tau2_draw[dd]/(tau2_draw[dd] + theta_draw[dd])
Am_draw[dd,1] <- rnorm(1, mu_A_mean, sqrt(sigma2_A_mean))
}
At_draw <- sapply(1:d,function(dd)A_draw[,dd]-Am_draw[dd,1])%*%diag(1/theta_sqrt)
#----------------------------------------------------------------------------
# Step 4: Prior choice
if(prior==1){ # TVP
### no hierarchical priors
}else if(prior==2){ # TVP-NG
kappa2 <- rgamma(1, d1+a_xi*d, d2+0.5*a_xi*mean(xi2_draw)*d)
lambda2 <- rgamma(1, e1+a_tau*d, e2+0.5*a_tau*mean(tau2_draw)*d)
for(dd in 1:d){
xi2_draw[dd] <- do_rgig1(lambda=a_xi-0.5, chi=theta_draw[dd], psi=a_xi*kappa2)
tau2_draw[dd] <- do_rgig1(lambda=a_tau-0.5, chi=(Am_draw[dd,1]-A_prior[dd,1])^2, psi=a_tau*lambda2)
}
xi2_draw[xi2_draw<1e-7] <- 1e-7
tau2_draw[tau2_draw<1e-7] <- 1e-7
if(sample_A){
before <- a_xi
a_xi <- MH_step(a_xi, scale_xi, d, kappa2, theta_sqrt, b_xi, nu_xi, d1, d2)
if(before!=a_xi){
acc_xi <- acc_xi + 1
}
before <- a_tau
a_tau <- MH_step(a_tau, scale_xi, d, lambda2, Am_draw, b_tau, nu_tau, e1, e2)
if(before!=a_tau){
acc_tau <- acc_tau + 1
}
# scale MH proposal during the first 50% of the burn-in stage
if(irep<(0.5*nburn)){
if((acc_xi/irep)>0.30){scale_xi <- 1.01*scale_xi}
if((acc_xi/irep)<0.15){scale_xi <- 0.99*scale_xi}
if((acc_tau/irep)>0.30){scale_xi <- 1.01*scale_xi}
if((acc_tau/irep)<0.15){scale_xi <- 0.99*scale_xi}
}
}
} # END PRIOR QUERY
#----------------------------------------------------------------------------
# Step 5: Sample variances
eps <- y - cbind(Xt,hadamard.prod(Xt,At_draw[2:(bigT+1),]))%*%alph_draw
if(sv){
para <- as.list(pars_var); names(para) <- c("mu","phi","sigma","latent0")
para$nu = Inf; para$rho=0; para$beta<-0
svdraw <- svsample_fast_cpp(y=eps, draws=1, burnin=0, designmatrix=matrix(NA_real_),
priorspec=Sv_priors, thinpara=1, thinlatent=1, keeptime="all",
startpara=para, startlatent=Sv_draw,
keeptau=FALSE, print_settings=list(quiet=TRUE, n_chains=1, chain=1),
correct_model_misspecification=FALSE, interweave=TRUE, myoffset=0,
fast_sv=default_fast_sv)
h_ <- exp(svdraw$latent[1,])
para$mu <- svdraw$para[1,"mu"]
para$phi <- svdraw$para[1,"phi"]
para$sigma <- svdraw$para[1,"sigma"]
para$latent0 <- svdraw$latent0[1,"h_0"]
pars_var <- unlist(para[c("mu","phi","sigma","latent0")])
Sv_draw <- log(h_)
}else{
C0 <- rgamma(1, g0+c0, G0+sig_eta)
S_1 <- c0+bigT/2
S_2 <- C0+crossprod(eps)/2
sig_eta <- 1/rgamma(1,S_1,S_2)
Sv_draw <- matrix(log(sig_eta),bigT,1)
}
#-------------------------------------------------------------------------#
# STEP 6: RANDOM SIGN SWITCH
for(dd in 1:d){
if(runif(1,0,1)>0.5){
theta_sqrt[dd] <- -theta_sqrt[dd]
}
}
#----------------------------------------------------------------------------
# Step 7: store draws
if(irep %in% thin.draws){
count <- count+1
A_store[count,,]<- A_draw
res_store[count,,]<- eps
# SV
Sv_store[count,,] <- Sv_draw
pars_store[count,,] <- pars_var
# NG
thetasqrt_store[count,,] <- theta_sqrt
tau2_store[count,,]<- tau2_draw
xi2_store[count,,] <- xi2_draw
lambda2_store[count,,] <- lambda2
kappa2_store[count,,] <- kappa2
a_xi_store[count,,] <- a_xi
a_tau_store[count,,]<- a_tau
}
}
#---------------------------------------------------------------------------------------------------------
# END ESTIMATION
#---------------------------------------------------------------------------------------------------------
dimnames(A_store)=list(NULL,paste("t",seq(0,bigT),sep="."),colnames(X))
ret <- list(Y=y,X=X,A_store=A_store,Sv_store=Sv_store,pars_store=pars_store,res_store=res_store,
thetasqrt_store=thetasqrt_store,tau2_store=tau2_store,xi2_store=xi2_store,lambda2_store=lambda2_store,kappa2_store=kappa2_store,a_xi_store=a_xi_store,a_tau_store=a_tau_store)
return(ret)
}
#' @name .TVPBVAR_centered_R.m
#' @importFrom stochvol svsample_fast_cpp specify_priors default_fast_sv sv_normal sv_beta sv_gamma
#' @importFrom dlm dlmModReg dlmMLE dlmSmooth
#' @importFrom MASS ginv mvrnorm
#' @importFrom matrixcalc hadamard.prod
#' @importFrom methods is
#' @importFrom stats rnorm rgamma runif dnorm
#' @noRd
.TVPBVAR_centered_R <- function(nr,Y_in,p_in,draws_in,burnin_in,cons_in,trend_in,sv_in,thin_in,quiet_in,prior_in,hyperparam_in,Ex_in){
#----------------------------------------INPUTS----------------------------------------------------#
Yraw <- Y_in
p <- p_in
Traw <- nrow(Yraw)
M <- ncol(Yraw)
K <- M*p
Ylag <- .mlag(Yraw,p)
names <- colnames(Yraw)
if(is.null(names)) names <- rep("Y",M)
colnames(Yraw) <- names
nameslags <- NULL
for(ii in 1:p) nameslags <- c(nameslags,paste0(names,".lag",ii))
colnames(Ylag) <- nameslags
texo <- FALSE; Mex <- 0; Exraw <- NULL; enames <- NULL
if(!is.null(Ex_in)){
Exraw <- Ex_in; Mex <- ncol(Exraw); texo <- TRUE
enames <- colnames(Exraw)
if(is.null(enames)) enames <- rep("Tex",Mex)
colnames(Exraw) <- enames
}
if(nr==1) slct <- NULL else slct <- 1:(nr-1)
Xraw <- cbind(Yraw[,slct],Ylag,Exraw)
colnames(Xraw) <- c(colnames(Yraw)[slct],nameslags,enames)
X <- Xraw[(p+1):nrow(Xraw),,drop=FALSE]
y <- Yraw[(p+1):Traw,nr,drop=FALSE]
bigT <- nrow(X)
M_ <- M-length(slct)
cons <- cons_in
if(cons){
X <- cbind(X,1)
colnames(X)[ncol(X)] <- "cons"
}
trend <- trend_in
if(trend){
X <- cbind(X,seq(1,bigT))
colnames(X)[ncol(X)] <- "trend"
}
d <- ncol(X)
n <- d*M
v <- (M*(M-1))/2
#---------------------------------------------------------------------------------------------------------
# HYPERPARAMETERS
#---------------------------------------------------------------------------------------------------------
hyperpara <- hyperparam_in
prior <- prior_in
sv <- sv_in
prmean <- hyperpara$prmean
# non-SV
c0 <- hyperpara$c0
g0 <- hyperpara$g0
# SV
bmu <- hyperpara$bmu
Bmu <- hyperpara$Bmu
a0 <- hyperpara$a0
b0 <- hyperpara$b0
Bsigma <- hyperpara$Bsigma
# TTVP
B_1 <- hyperpara$B_1
B_2 <- hyperpara$B_2
kappa0 <- hyperpara$kappa0
a_tau <- hyperpara$a_tau
c_tau <- hyperpara$c_tau
d_tau <- hyperpara$d_tau
h0prior <- hyperpara$h0prior
grid.length <- hyperpara$grid.length
thrsh.pct <- hyperpara$thrsh.pct
thrsh.pct.high <- hyperpara$thres.pct.high
TVS <- hyperpara$TVS
a.approx <- hyperpara$a.approx
sim.kappa <- hyperpara$sim.kappa
kappa.grid <- hyperpara$kappa.grid
MaxTrys <- hyperpara$MaxTrys
#---------------------------------------------------------------------------------------------------------
# OLS Quantitites
#---------------------------------------------------------------------------------------------------------
XtXinv <- try(solve(crossprod(X)),silent=TRUE)
if(is(XtXinv,"try-error")) XtXinv <- ginv(crossprod(X))
A_OLS <- XtXinv%*%(t(X)%*%y)
E_OLS <- y - X%*%A_OLS
S_OLS <- crossprod(E_OLS)/(bigT-d)
V_OLS <- as.numeric(S_OLS)*XtXinv
sd_OLS <- sqrt(diag(V_OLS))
#---------------------------------------------------------------------------------------------------------
# Initial Values
#---------------------------------------------------------------------------------------------------------
A_draw <- matrix(A_OLS, bigT+1, d, byrow=TRUE, dimnames=list(NULL,colnames(X)))
S_draw <- matrix(S_OLS, bigT,1)
# state variances
Omega_t <- matrix(1,bigT,d)
# state indicator
D_t <- matrix(1,bigT,d)
#---------------------------------------------------------------------------------------------------------
# PRIORS
#---------------------------------------------------------------------------------------------------------
# Priors on VAR coefs
#-----------------------------
# prior mean
A_prior <- matrix(0,2*d, 1)
A_prior[2*nr-1,1] <- prmean
# prior variance
sqrttheta1 <- diag(d)*0.1
sqrttheta2 <-diag(d)*0.01
Omega_t <- D_t%*%sqrttheta1+(1-D_t)%*%sqrttheta2
kappa00 <- kappa0
if(kappa0<0) kappa00 <- -kappa0 * sd.OLS else kappa00 <- matrix(kappa0,d,1)
if (a.approx){
buildCapm <- function(u){
dlm::dlmModReg(X, dV = exp(u[1]), dW = exp(u[2:(d+1)]),addInt = FALSE)
}
outMLE <- dlm::dlmMLE(y, parm = rep(0,d+1), buildCapm)
mod <- buildCapm(outMLE$par)
outS <- dlm::dlmSmooth(y, mod)
states.OLS <- t(matrix(outS$s,bigT+1,d))
Achg.OLS <- t(diff(t(states.OLS)))#t(as.numeric(ALPHA0)+ALPHA2)
}
# threshold
thrsh <- matrix(0,d,1)
# priors on initial state
B0prior <- matrix(0,d,1)
V0prior <- rep(4,d)
#------------------------------------
# SV quantities
#------------------------------------
svdraw <- list(para=c(mu=-10,phi=.9,sigma=.2,latent0=-3),latent=rep(-3,bigT))
Sv_draw <- svdraw$latent
pars_var <- matrix(c(-3,.9,.2,-3),4,1,dimnames=list(c("mu","phi","sigma","latent0"),NULL))
Sv_priors <- specify_priors(mu=sv_normal(mean=bmu, sd=Bmu), phi=sv_beta(a0,b0), sigma2=sv_gamma(shape=0.5,rate=1/(2*Bsigma)))
#-----------------------------------
# non-SV quantities
#-----------------------------------
sig_eta <- exp(-3)
G0 <- g0/S_OLS*(c0-1)
#---------------------------------------------------------------------------------------------------------
# SAMPLER MISCELLANEOUS
#---------------------------------------------------------------------------------------------------------
nsave <- draws_in
nburn <- burnin_in
ntot <- nsave+nburn
# thinning
thin <- thin_in
count <- 0
thindraws <- nsave/thin
thin.draws <- seq(nburn+1,ntot,by=thin)
#---------------------------------------------------------------------------------------------------------
# STORAGES
#---------------------------------------------------------------------------------------------------------
A_store <- array(NA,c(thindraws,bigT,d))
res_store <- array(NA,c(thindraws,bigT,1))
Sv_store <- array(NA,c(thindraws,bigT,1))
pars_store <- array(NA,c(thindraws,4,1))
# TTVP
D_store <- array(NA,c(thindraws,bigT,d,1))
Omega_store <- array(NA,c(thindraws,bigT,d,1))
thrsh_store <- array(NA,c(thindraws,d,1))
kappa_store <- array(NA,c(thindraws,1))
V0_store <- array(NA,c(thindraws,d,1))
#---------------------------------------------------------------------------------------------------------
# MCMC LOOP
#---------------------------------------------------------------------------------------------------------
for (irep in 1:ntot){
#----------------------------------------------------------------------------
# Step 1: Draw A
invisible(capture.output(
A_draw1 <- try(KF_fast(t(as.matrix(y)), X,as.matrix(exp(Sv_draw)),Omega_t,d, 1, bigT, B0prior, diag(V0prior)),silent=TRUE),
type="message"))
if (is(A_draw1,"try-error")){
invisible(capture.output(
A_draw1 <- KF(t(as.matrix(y)), X,as.matrix(exp(Sv_draw)),Omega_t,d, 1, bigT, B0prior, diag(V0prior)),
type="message"))
try0 <- 0
while (any(abs(A_draw1$bdraw)>1e+10) && try0<MaxTrys){ #This block resamples if the draw from the state vector is not well behaved
invisible(capture.output(
A_draw1 <- try(KF(t(as.matrix(y)), X,as.matrix(exp(Sv_draw)),Omega_t,d, 1, bigT, B0prior, diag(V0prior)),silent=TRUE),
type="message"))
try0 <- try0+1
}
}
A_draw <- t(A_draw1$bdraw)
VCOV <- A_draw1$Vcov
#----------------------------------------------------------------------------
# Step 2: Prior choice
if(prior==3){
#------------------------------------------
# Step 2a: Sample variances
A_diff <- diff(A_draw)
for(dd in 1:d){
sig_q <- sqrttheta1[dd,dd]
if (!a.approx){
si <- (abs(A_diff[,dd])>thrsh[dd,1])*1
}else{
si <- (abs(Achg.OLS[,dd])>thrsh[dd,1])*1
}
si <- D_t[2:bigT,dd]
s_1 <- B_1 + sum(si)/2 + 0.5
s_2 <- B_2 + 0.5*crossprod(A_diff[si==1,dd,drop=FALSE])
sig_q <- 1/rgamma(1,s_1,s_2)
sqrttheta1[dd,dd] <- sig_q
sqrttheta2[dd,dd] <- kappa00[dd,1]^2
}
#------------------------------------------
# sample indicator
if(TVS){
#Check whether coefficient is time-varying or constant at each point in time
Achg <- t(A_diff)
Achg <- cbind(matrix(0,d,1),Achg) #we simply assume that the parameters stayed constant between t=0 and t=1
if(a.approx) Achg.approx <- Achg.OLS else Achg.approx <- Achg
grid.mat <- matrix(unlist(lapply(1:d,function(x) .get_grid(Achg[x,],sqrt(sqrttheta1[x,x]),grid.length=grid.length,thrsh.pct=thrsh.pct,thrsh.pct.high=thrsh.pct.high))),ncol = d)
probs <- get_threshold(Achg, sqrttheta1, sqrttheta2, grid.mat, Achg.approx)
for(dd in 1:d){
post1 <- probs[,dd]
probs1 <- exp(post1-max(post1))/sum(exp(post1-max(post1)))
thrsh[dd,] <- sample(grid.mat[,dd],1,prob=probs1)
if (!a.approx){
D_t[,dd] <- (abs(Achg[dd,])>thrsh[dd,])*1 #change 2:T usw. here
}else{
D_t[,dd] <- (abs(Achg.OLS[dd,])>thrsh[dd,])*1 #change 2:T usw. here
}
}
if (sim.kappa){
grid.kappa <- kappa.grid
Lik.kappa <- matrix(0,length(grid.kappa),1)
count <- 0
for (grid.i in grid.kappa){
count <- count+1
sqrttheta.prop <- (grid.i*sd_OLS)^2
cov.prop <- sqrt(D_t*diag(sqrttheta1)+(1-D_t)*sqrttheta.prop)
Lik.kappa[count,1] <-sum(dnorm(t(Achg),matrix(0,bigT,2),cov.prop,log=TRUE))
}
Lik.kappa.norm <- exp(Lik.kappa-max(Lik.kappa))
probs.kappa <- Lik.kappa.norm/sum(Lik.kappa.norm)
kappa0 <- sample(grid.kappa,size=1, prob=probs.kappa)
kappa00 <- kappa0*sd_OLS
}
}
Omega_t <- D_t%*%sqrttheta1+(1-D_t)%*%sqrttheta2
#------------------------------------------
# Step 2b: Draw variance of initial state
lambda2_tau <- rgamma(1,c_tau+a_tau*d,d_tau+a_tau/2*sum(V0prior)) # global component
# local component
for(dd in 1:d){
res <- try(do_rgig1(lambda=a_tau-0.5,
chi=A_draw[1,dd]^2,
psi=a_tau*lambda2_tau), silent=TRUE)
V0prior[dd] <- ifelse(is(res,"try-error"),next,res)
}
} # END PRIOR QUERY
#----------------------------------------------------------------------------
# Step 3: Sample variances
eps <- y - rowSums(hadamard.prod(X,A_draw))
if(sv){
para <- as.list(pars_var); names(para) <- c("mu","phi","sigma","latent0")
para$nu = Inf; para$rho=0; para$beta<-0
svdraw <- svsample_fast_cpp(y=eps, draws=1, burnin=0, designmatrix=matrix(NA_real_),
priorspec=Sv_priors, thinpara=1, thinlatent=1, keeptime="all",
startpara=para, startlatent=Sv_draw,
keeptau=FALSE, print_settings=list(quiet=TRUE, n_chains=1, chain=1),
correct_model_misspecification=FALSE, interweave=TRUE, myoffset=0,
fast_sv=default_fast_sv)
h_ <- exp(svdraw$latent[1,])
para$mu <- svdraw$para[1,"mu"]
para$phi <- svdraw$para[1,"phi"]
para$sigma <- svdraw$para[1,"sigma"]
para$latent0 <- svdraw$latent0[1,"h_0"]
pars_var <- unlist(para[c("mu","phi","sigma","latent0")])
Sv_draw <- log(h_)
}else{
C0 <- rgamma(1, g0+c0, G0+sig_eta)
S_1 <- c0+bigT/2
S_2 <- C0+crossprod(eps)/2
sig_eta <- 1/rgamma(1,S_1,S_2)
Sv_draw <- matrix(log(sig_eta),bigT,1)
}
#----------------------------------------------------------------------------
# Step 4: store draws
if(irep %in% thin.draws){
count <- count+1
A_store[count,,]<- A_draw
res_store[count,,]<- eps
# SV
Sv_store[count,,] <- Sv_draw
pars_store[count,,] <- pars_var
# TTVP
D_store[count,,,] <- D_t
Omega_store[count,,,] <- Omega_t
thrsh_store[count,,] <- thrsh
kappa_store[count,] <- kappa0
V0_store[count,,] <- V0prior
}
}
#---------------------------------------------------------------------------------------------------------
# END ESTIMATION
#---------------------------------------------------------------------------------------------------------
dimnames(A_store)=list(NULL,paste("t",seq(1,bigT),sep="."),colnames(X))
ret <- list(Y=y,X=X,A_store=A_store,Sv_store=Sv_store,pars_store=pars_store,res_store=res_store,
D_store=D_store,Omega_store=Omega_store,thrsh_store=thrsh_store,kappa_store=kappa_store,V0_store=V0_store)
return(ret)
}
#' @name .get_grid
#' @noRd
.get_grid <- function(Achg,sd.state,grid.length=150,thrsh.pct=0.1,thrsh.pct.high=0.9){
d_prop <- seq(thrsh.pct*sd.state,thrsh.pct.high*sd.state,length.out=grid.length)
return(d_prop)
}
#' @name .gck
#' @noRd
.gck <- function(yg,gg,hh,capg,f,capf,sigv,kold,t,ex0,vx0,nvalk,kprior,kvals,p,kstate){
# GCK's Step 1 on page 821
lpy2n=0;
mu = matrix(0,t*kstate,1);
omega = matrix(0,t*kstate,kstate);
for (i in seq(t-1,1,by=-1)){
gatplus1 = sigv%*%kold[i+1]
ftplus1 = capf[(kstate*i+1):(kstate*(i+1)),]
cgtplus1 = capg[(i*p+1):((i+1)*p),]
htplus1 = t(hh[(i*p+1):((i+1)*p),])
htt1 <- crossprod(htplus1,gatplus1)
rtplus1 = tcrossprod(htt1)+tcrossprod(cgtplus1,cgtplus1)
rtinv = solve(rtplus1)
btplus1 = tcrossprod(gatplus1)%*%htplus1%*%rtinv
atplus1 = (diag(kstate)-tcrossprod(btplus1,htplus1))%*%ftplus1
if (kold[i+1] == 0){
ctplus1 = matrix(0,kstate,kstate)
}else{
cct = gatplus1%*%(diag(kstate)-crossprod(gatplus1,htplus1)%*%tcrossprod(rtinv,htplus1)%*%gatplus1)%*%t(gatplus1)
ctplus1 = t(chol(cct))
}
otplus1 = omega[(kstate*i+1):(kstate*(i+1)),]
dtplus1 = crossprod(ctplus1,otplus1)%*%ctplus1+diag(kstate)
omega[(kstate*(i-1)+1):(kstate*i),] = crossprod(atplus1,(otplus1 - otplus1%*%ctplus1%*%solve(dtplus1)%*%t(ctplus1)%*%otplus1))%*%atplus1+t(ftplus1)%*%htplus1%*%rtinv%*%t(htplus1)%*%ftplus1
satplus1 = (diag(kstate)-tcrossprod(btplus1,htplus1))%*%(f[,i+1]-btplus1%*%gg[,i+1]) #CHCKCHCKCHKC
mutplus1 = mu[(kstate*i+1):(kstate*(i+1)),]
mu[(kstate*(i-1)+1):(kstate*i),] = crossprod(atplus1,(diag(kstate)-otplus1%*%ctplus1%*%solve(dtplus1)%*%t(ctplus1)))%*%(mutplus1-otplus1%*%(satplus1+btplus1%*%yg[i+1]))+t(ftplus1)%*%htplus1%*%rtinv%*%(yg[i+1]-gg[,i+1]-t(htplus1)%*%f[,i+1])
}
# GCKs Step 2 on pages 821-822
kdraw = kold;
ht = t(hh[1:p,])
ft = capf[1:kstate,]
gat = matrix(0,kstate,kstate)
# Note: this specification implies no shift in first period -- sensible
rt = t(ht)%*%ft%*%vx0%*%t(ft)%*%ht + crossprod(ht,gat)%*%crossprod(gat,ht)+ tcrossprod(capg[1:p,])
rtinv = solve(rt)
jt = (ft%*%vx0%*%t(ft)%*%ht + tcrossprod(gat)%*%ht)%*%rtinv
mtm1 = (diag(kstate) - tcrossprod(jt,ht))%*%(f[,1] + ft%*%ex0) + jt%*%(yg[1] - gg[,1])
vtm1 <- ft%*%tcrossprod(vx0,ft)+tcrossprod(gat)-jt%*%tcrossprod(rt,jt)
lprob <- matrix(0,nvalk,1)
for (i in 2:t){
ht <- t(hh[((i-1)*p+1):(i*p),])
ft <- capf[(kstate*(i-1)+1):(kstate*i),]
for (j in 1:nvalk){
gat <- kvals[j,1]%*%sigv
rt <- crossprod(ht,ft)%*%tcrossprod(vtm1,ft)%*%ht+crossprod(ht,gat)%*%crossprod(gat,ht)+tcrossprod(capg[((i-1)*p+1):(i*p),])
rtinv <- solve(rt)
jt <- (ft%*%tcrossprod(vtm1,ft)%*%ht+tcrossprod(gat)%*%ht)%*%rtinv
mt <- (diag(kstate)-tcrossprod(jt,ht))%*%(f[,i]+ft%*%mtm1)+jt%*%(yg[i]-gg[,i])
vt <- ft%*%tcrossprod(vtm1,ft)+tcrossprod(gat)-jt%*%tcrossprod(rt,jt)
lpyt = -.5*log(det(rt)) - .5*t(yg[i] - gg[,i] - t(ht)%*%t(f[,i] + ft%*%mtm1))%*%rtinv%*%(yg[i] - gg[,i] - t(ht)%*%(f[,i] + ft%*%mtm1))
if (det(vt)<=0){
tt <- matrix(0,kstate,kstate)
}else{
tt <- t(chol(vt))
}
ot = omega[(kstate*(i-1)+1):(kstate*i),]
mut = mu[(kstate*(i-1)+1):(kstate*i),]
tempv = diag(kstate) + crossprod(tt,ot)%*%tt
lpyt1n = -.5*log(det(tempv)) -.5*(crossprod(mt,ot)%*%mt-2*crossprod(mut,mt)-t(mut-ot%*%mt)%*%tt%*%solve(tempv)%*%t(tt)%*%(mut-ot%*%mt))
lprob[j,1] <- log(kprior[j,1])+lpyt1n+lpyt
if (i==2){
lpy2n <- lpyt1n+lpyt
}
}
pprob = exp(lprob-max(lprob))/sum(exp(lprob-max(lprob)))
tempv = runif(1)
tempu = 0
for (j in 1:nvalk){
tempu <- tempu+pprob[j,1]
if (tempu> tempv){
kdraw[i] <- kvals[j,1]
break
}
}
gat = kdraw[i]%*%sigv
rt = crossprod(ht,ft)%*%tcrossprod(vtm1,ft)%*%ht+t(ht)%*%tcrossprod(gat)%*%ht+tcrossprod(capg[((i-1)*p+1):(i*p)])
rtinv = solve(rt)
jt = (ft%*%tcrossprod(vtm1,ft)%*%ht+tcrossprod(gat)%*%ht)%*%rtinv
mtm1 <- (diag(kstate)-tcrossprod(jt,ht))%*%(f[,i]+ft%*%mtm1)+jt%*%(yg[i]-gg[,i])
vtm1 = ft%*%tcrossprod(vtm1,ft)+tcrossprod(gat)-jt%*%tcrossprod(rt,jt)
}
return(kdraw)
}
#' @name .var_posterior
#' @importFrom MASS ginv
#' @importFrom abind adrop abind
#' @noRd
.var_posterior <- function(post_draws, prior, draws, applyfun, cores){
M <- length(post_draws)
bigT <- nrow(post_draws[[1]]$Y)
bigK <- ncol(post_draws[[1]]$X)
K <- unlist(lapply(post_draws,function(l)ncol(l$X)))
# bind data
Y <- do.call("cbind",lapply(1:M,function(mm)post_draws[[mm]]$Y))
X <- post_draws[[1]]$X
# general stuff
res_store <- abind(lapply(1:M,function(mm)post_draws[[mm]]$res_store),along=3)
Sv_store <- abind(lapply(1:M,function(mm)post_draws[[mm]]$Sv_store),along=3)
pars_store <- abind(lapply(1:M,function(mm)post_draws[[mm]]$pars_store),along=3)
# container
A_store <- array(NA,c(draws,bigT,bigK,M))
L_store <- array(NA,c(draws,bigT,M,M))
S_store <- array(NA,c(draws,bigT,M,M))
timepoints <- paste("t",seq(1,bigT),sep=".")
store.obj <- applyfun(1:draws,function(irep){
At_store <- array(NA,c(bigT,bigK,M))
Lt_store <- array(NA,c(bigT,M,M))
St_store <- array(NA,c(bigT,M,M))
for(tt in 1:bigT){
A0 <- diag(M)
for(mm in 2:M){
A0[mm,1:(mm-1)] <- -post_draws[[mm]]$A_store[irep,timepoints[tt],1:(mm-1)]
}
A0inv <- try(solve(A0),silent=TRUE)
if(is(A0inv,"try-error")) A0inv <- ginv(A0)
Lt_store[tt,,] <- A0inv
St_store[tt,,] <- A0inv%*%diag(exp(Sv_store[irep,tt,]))%*%t(A0inv)
Atilde <- NULL
for(mm in 1:M) Atilde <- cbind(Atilde,post_draws[[mm]]$A_store[irep,timepoints[tt],mm:K[mm]])
At_store[tt,,] <- t(A0inv%*%t(Atilde))
}
return(list(At_store=At_store,Lt_store=Lt_store,St_store=St_store))
})
for(irep in 1:draws){
A_store[irep,,,] <- store.obj[[irep]]$At_store
L_store[irep,,,] <- store.obj[[irep]]$Lt_store
S_store[irep,,,] <- store.obj[[irep]]$St_store
}
dimnames(A_store) <- list(NULL,timepoints,colnames(X),colnames(Y))
Smed_store <- apply(S_store,c(1,3,4),median)
if(prior=="TVP"){
thetasqrt_store <- abind(lapply(1:M,function(mm)post_draws[[mm]]$thetasqrt_store[,mm:K[mm],]),along=3)
Lthetasqrt_store <- lapply(2:M,function(mm)adrop(post_draws[[mm]]$thetasqrt_store[,1:(mm-1),,drop=FALSE],drop=3))
tau2_store<-xi2_store<-Ltau2_store<-Lxi2_store<-lambda2_store<-kappa2_store<-a_xi_store<-a_tau_store<-D_store<-Omega_store<-thrsh_store<-kappa_store<-V0_store<-LD_store<-LOmega_store<-Lthrsh_store<-LV0_store<-NULL
}else if(prior=="TVP-NG"){
D_store<-Omega_store<-thrsh_store<-kappa_store<-V0_store<-LD_store<-LOmega_store<-Lthrsh_store<-LV0_store<-NULL
# general stuff
thetasqrt_store <- abind(lapply(1:M,function(mm)post_draws[[mm]]$thetasqrt_store[,mm:K[mm],]),along=3)
lambda2_store <- abind(lapply(1:M,function(mm)post_draws[[mm]]$lambda2_store),along=3)
kappa2_store <- abind(lapply(1:M,function(mm)post_draws[[mm]]$kappa2_store),along=3)
a_xi_store <- abind(lapply(1:M,function(mm)post_draws[[mm]]$a_xi_store),along=3)
a_tau_store <- abind(lapply(1:M,function(mm)post_draws[[mm]]$a_tau_store),along=3)
tau2_store <- abind(lapply(1:M,function(mm)post_draws[[mm]]$tau2_store[,mm:K[mm],]),along=3)
xi2_store <- abind(lapply(1:M,function(mm)post_draws[[mm]]$xi2_store[,mm:K[mm],]),along=3)
## ATTENTION: variances of L just as list !!
Lthetasqrt_store <- lapply(2:M,function(mm)adrop(post_draws[[mm]]$thetasqrt_store[,1:(mm-1),,drop=FALSE],drop=3))
Ltau2_store <- lapply(2:M,function(mm)adrop(post_draws[[mm]]$tau2_store[,1:(mm-1),,drop=FALSE],drop=3))
Lxi2_store <- lapply(2:M,function(mm)adrop(post_draws[[mm]]$xi2_store[,1:(mm-1),,drop=FALSE],drop=3))
}else if(prior=="TTVP"){
thetasqrt_store<-Lthetasqrt_store<-tau2_store<-xi2_store<-Ltau2_store<-Lxi2_store<-lambda2_store<-kappa2_store<-a_xi_store<-a_tau_store<-NULL
# general stuff
kappa_store <- abind(lapply(1:M,function(mm)post_draws[[mm]]$kappa_store),along=3)
D_store <- abind(lapply(1:M,function(mm)post_draws[[mm]]$D_store[,,mm:K[mm],]),along=4)
Omega_store <- abind(lapply(1:M,function(mm)post_draws[[mm]]$Omega_store[,,mm:K[mm],]),along=4)
thrsh_store <- abind(lapply(1:M,function(mm)post_draws[[mm]]$thrsh_store[,mm:K[mm],]),along=3)
V0_store <- abind(lapply(1:M,function(mm)post_draws[[mm]]$V0_store[,mm:K[mm],]),along=3)
## ATTENTION: variances of L just as list !!
LD_store <- lapply(2:M,function(mm)adrop(post_draws[[mm]]$D_store[,,1:(mm-1),,drop=FALSE],drop=4))
LOmega_store<- lapply(2:M,function(mm)adrop(post_draws[[mm]]$Omega_store[,,1:(mm-1),,drop=FALSE],drop=4))
Lthrsh_store<- lapply(2:M,function(mm)adrop(post_draws[[mm]]$thrsh_store[,1:(mm-1),,drop=FALSE],drop=3))
LV0_store <- lapply(2:M,function(mm)adrop(post_draws[[mm]]$V0_store[,1:(mm-1),,drop=FALSE],drop=3))
}
ret <- list(Y=Y,X=X,A_store=A_store,L_store=L_store,Sv_store=Sv_store,S_store=S_store,Smed_store=Smed_store,pars_store=pars_store,res_store=res_store,thetasqrt_store=thetasqrt_store,Lthetasqrt_store=Lthetasqrt_store,
tau2_store=tau2_store,xi2_store=xi2_store,Ltau2_store=Ltau2_store,Lxi2_store=Lxi2_store,lambda2_store=lambda2_store,kappa2_store=kappa2_store,a_xi_store=a_xi_store,a_tau_store=a_tau_store,
D_store=D_store,Omega_store=Omega_store,thrsh_store=thrsh_store,kappa_store=kappa_store,V0_store=V0_store,LD_store=LD_store,LOmega_store=LOmega_store,Lthrsh_store=Lthrsh_store,LV0_store=LV0_store)
return(ret)
}
#' @name .TVPBVAR_linear_R
#' @importFrom stochvol svsample_fast_cpp specify_priors default_fast_sv sv_normal sv_beta sv_gamma
#' @importFrom MASS ginv mvrnorm
#' @importFrom matrixcalc hadamard.prod
#' @importFrom methods is
#' @importFrom stats rnorm rgamma runif dnorm
#' @noRd
.TVPBVAR_linear_R <- function(Y_in,p_in,draws_in,burnin_in,cons_in,trend_in,sv_in,thin_in,quiet_in,prior_in,hyperparam_in,Ex_in){
#----------------------------------------INPUTS----------------------------------------------------#
Yraw <- Y_in
p <- p_in
Traw <- nrow(Yraw)
M <- ncol(Yraw)
K <- M*p
Ylag <- .mlag(Yraw,p)
nameslags <- NULL
for (ii in 1:p) nameslags <- c(nameslags,rep(paste("Ylag",ii,sep=""),M))
colnames(Ylag) <- nameslags
texo <- FALSE; Mex <- 0; Exraw <- NULL
if(!is.null(Ex_in)){
Exraw <- Ex_in; Mex <- ncol(Exraw)
texo <- TRUE
colnames(Exraw) <- rep("Tex",Mex)
}
X <- cbind(Ylag,Exraw)
X <- X[(p+1):nrow(X),,drop=FALSE]
Y <- Yraw[(p+1):Traw,,drop=FALSE]
bigT <- nrow(X)
cons <- cons_in
if(cons){
X <- cbind(X,1)
colnames(X)[ncol(X)] <- "cons"
}
trend <- trend_in
if(trend){
X <- cbind(X,seq(1,bigT))
colnames(X)[ncol(X)] <- "trend"
}
k <- ncol(X)
n <- k*M
v <- (M*(M-1))/2
#---------------------------------------------------------------------------------------------------------
# HYPERPARAMETERS
#---------------------------------------------------------------------------------------------------------
hyperpara <- hyperparam_in
prior <- prior_in
sv <- sv_in
prmean <- hyperpara$prmean
a_1 <- hyperpara$a_1
b_1 <- hyperpara$b_1
# SV
Bsigma <- hyperpara$Bsigma
a0 <- hyperpara$a0
b0 <- hyperpara$b0
bmu <- hyperpara$bmu
Bmu <- hyperpara$Bmu
# other stuff
d1 <- hyperpara$d1
d2 <- hyperpara$d2
e1 <- hyperpara$e1
e2 <- hyperpara$e2
b_xi <- hyperpara$b_xi
b_tau <- hyperpara$b_tau
nu_xi <- hyperpara$nu_xi
nu_tau <- hyperpara$nu_tau
a_start <- hyperpara$a_start
sample_A <- hyperpara$sample_A
#---------------------------------------------------------------------------------------------------------
# OLS Quantitites
#---------------------------------------------------------------------------------------------------------
XtXinv <- try(solve(crossprod(X)),silent=TRUE)
if(is(XtXinv,"try-error")) XtXinv <- ginv(crossprod(X))
A_OLS <- XtXinv%*%(t(X)%*%Y)
E_OLS <- Y - X%*%A_OLS
S_OLS <- crossprod(E_OLS)/(bigT-k)
#---------------------------------------------------------------------------------------------------------
# Initial Values
#---------------------------------------------------------------------------------------------------------
A_draw <- array(A_OLS, c(bigT+1,k,M))
S_draw <- array(S_OLS, c(M,M,bigT))
Em_draw <- Em_str <- E_OLS
L_draw <- diag(M)
# time-varying stuff
Am_draw <- A_OLS
At_draw <- array(0, c(bigT+1, k, M))
theta_draw <- matrix(1, k, M)
theta_sqrt <- sqrt(theta_draw)
#---------------------------------------------------------------------------------------------------------
# PRIORS
#---------------------------------------------------------------------------------------------------------
# Priors on VAR coefs
#-----------------------------
# prior mean
A_prior <- matrix(0,2*k,M)
diag(A_prior) <- prmean
# prior variance
tau2.draw <- matrix(10,k,M)
xi2.draw <- matrix(10,k,M)
# NG stuff
lambda2 <- matrix(10,p,1)
a_tau <- matrix(a_start,p,1)
scale_tau <- rep(.43,p)
acc_tau <- rep(0,p)
kappa2 <- 10
a_xi <- a_start
scale_xi <- .43
acc_xi <- 0
#------------------------------------
# Priors on coefs in H matrix of VCV
#------------------------------------
# prior mean
l_prior <- matrix(0,M,M)
# prior variance
L_prior <- matrix(10,M,M)
L_prior[upper.tri(L_prior)] <- 0; diag(L_prior) <- 0
# NG
lambda2_L <- 10
a_L_tau <- a_start
scale_L_tau <- .43
acc_L_tau <- 0
#------------------------------------
# SV quantities
#------------------------------------
Sv_draw <- matrix(-3,bigT,M)
svdraw <- list(para=c(mu=-10,phi=.9,sigma=.2),latent=rep(-3,bigT))
svl <- list()
for (jj in 1:M) svl[[jj]] <- svdraw
pars_var <- matrix(c(-3,.9,.2,-3),4,M,dimnames=list(c("mu","phi","sigma","latent0"),NULL))
hv <- svdraw$latent
para <- list(mu=-3,phi=.9,sigma=.2)
Sv_priors <- specify_priors(mu=sv_normal(mean=bmu, sd=Bmu), phi=sv_beta(a0,b0), sigma2=sv_gamma(shape=0.5,rate=1/(2*Bsigma)))
eta <- list()
#---------------------------------------------------------------------------------------------------------
# SAMPLER MISCELLANEOUS
#---------------------------------------------------------------------------------------------------------
nsave <- draws_in
nburn <- burnin_in
ntot <- nsave+nburn
# thinning
thin <- thin_in
count <- 0
thindraws <- nsave/thin
thin.draws <- seq(nburn+1,ntot,by=thin)
#---------------------------------------------------------------------------------------------------------
# STORAGES
#---------------------------------------------------------------------------------------------------------
A_store <- array(NA,c(thindraws,bigT+1,k,M))
Am_store <- array(NA,c(thindraws,k,M))
At_store <- array(NA,c(thindraws,bigT+1,k,M))
L_store <- array(NA,c(thindraws,M,M))
res_store <- array(NA,c(thindraws,bigT,M))
Sv_store <- array(NA,c(thindraws,bigT,M))
pars_store <- array(NA,c(thindraws,4,M))
# # NG
tau2_store <- array(NA,c(thindraws,k,M))
xi2_store <- array(NA,c(thindraws,k,M))
lambda2_store<- array(NA,c(thindraws,p,1))
kappa2_store <- array(NA,c(thindraws,1,1))
a_xi_store <- array(NA,c(thindraws,1,1))
a_tau_store <- array(NA,c(thindraws,p,1))
#---------------------------------------------------------------------------------------------------------
# MCMC LOOP
#---------------------------------------------------------------------------------------------------------
for (irep in 1:ntot){
#----------------------------------------------------------------------------
# Step 1: Sample coefficients
for (mm in 1:M){
if(mm==1){
Ystar <- (Y[,mm]-X%*%Am_draw)*exp(-0.5*Sv_draw[,mm])
Fstar <- (X%*%diag(theta_sqrt[,mm]))*exp(-0.5*Sv_draw[,mm])
At_draw[,,mm] <- sample_McCausland(Ystar, Fstar)
Y.i <- Y[,mm]*exp(-0.5*Sv_draw[,mm])
Z.i <- cbind(X,hadamard.prod(X,At_draw[2:(bigT+1),,mm]))*exp(-0.5*Sv_draw[,mm])
Vpriorinv <- diag(1/c(tau2.draw[,mm],xi2.draw[,mm]))
V_post <- try(chol2inv(chol(crossprod(Z.i)+Vpriorinv)),silent=TRUE)
if (is(V_post,"try-error")) V_post <- ginv(crossprod(Z.i)+Vpriorinv)
A_post <- V_post%*%(crossprod(Z.i,Y.i)+Vpriorinv%*%A_prior[,mm])
A.draw.i <- try(A_post+t(chol(V_post))%*%rnorm(ncol(Z.i)),silent=TRUE)
if (is(A.draw.i,"try-error")) A.draw.i <- matrix(mvrnorm(1,A_post,V_post),ncol(Z.i),1)
Am_draw[,mm] <- A.draw.i[1:k,,drop=FALSE]
theta_sqrt[,mm] <- A.draw.i[(k+1):(2*k),,drop=FALSE]
# compute errors
Em_draw[,mm] <- Em_str[,mm] <- Y[,mm] - X%*%Am_draw[,mm] - apply(hadamard.prod(X,At_draw[2:(bigT+1),,mm]%*%diag(theta_sqrt[,mm])),1,sum)
}else{
Ystar <- (Y[,mm]-X%*%Am_draw)*exp(-0.5*Sv_draw[,mm])
Fstar <- (X%*%diag(theta_sqrt[,mm]))*exp(-0.5*Sv_draw[,mm])
At_draw[,,mm] <- sample_McCausland(Ystar, Fstar)
Y.i <- Y[,mm]*exp(-0.5*Sv_draw[,mm])
Z.i <- cbind(X,hadamard.prod(X,At_draw[2:(bigT+1),,mm]),Em_draw[,1:(mm-1)])*exp(-0.5*Sv_draw[,mm])
Vpriorinv <- diag(1/c(tau2.draw[,mm],xi2.draw[,mm],L_prior[mm,1:(mm-1)]))
V_post <- try(chol2inv(chol((crossprod(Z.i)+Vpriorinv))),silent=TRUE)
if (is(V_post,"try-error")) V_post <- ginv((crossprod(Z.i)+Vpriorinv))
A_post <- V_post%*%(crossprod(Z.i,Y.i)+Vpriorinv%*%c(A_prior[,mm],l_prior[mm,1:(mm-1)]))
A.draw.i <- try(A_post+t(chol(V_post))%*%rnorm(ncol(Z.i)),silent=TRUE)
if (is(A.draw.i,"try-error")) A.draw.i <- matrix(mvrnorm(1,A_post,V_post),ncol(Z.i),1)
Am_draw[,mm] <- A.draw.i[1:k,,drop=FALSE]
theta_sqrt[,mm] <- A.draw.i[(k+1):(2*k),,drop=FALSE]
L_draw[mm,1:(mm-1)] <- A.draw.i[(2*k+1):ncol(Z.i),,drop=FALSE]
# compute errors
Em_draw[,mm] <- Y[,mm]-X%*%Am_draw[,mm]-apply(hadamard.prod(X,At_draw[2:(bigT+1),,mm]%*%diag(theta_sqrt[,mm])),1,sum)
Em_str[,mm] <- Y[,mm]-X%*%Am_draw[,mm]-apply(hadamard.prod(X,At_draw[2:(bigT+1),,mm]%*%diag(theta_sqrt[,mm])),1,sum)-Em_draw[,1:(mm-1),drop=FALSE]%*%t(L_draw[mm,1:(mm-1),drop=FALSE])
}
}
rownames(Am_draw) <- colnames(X)
theta_draw <- theta_sqrt^2
#----------------------------------------------------------------------------
# Step 3: Interweaving
theta_sign <- sign(theta_sqrt)
for(mm in 1:M){
A_draw[,,mm] <- matrix(Am_draw[,mm],bigT+1,k,byrow=TRUE) + At_draw[,,mm]%*%diag(theta_sqrt[,mm])
A_diff <- diff(At_draw[,,mm]%*%diag(theta_sqrt[,mm]))
for(kk in 1:k){
#theta.new
res <- do_rgig1(lambda=-bigT/2,
chi=sum(A_diff[,kk]^2)+(A_draw[1,kk,mm]-Am_draw[kk,mm])^2,
psi=1/xi2.draw[kk,mm])
theta_draw[kk,mm] <- res
theta_sqrt[kk,mm] <- sqrt(res)*theta_sign[kk,mm]
# Am_new
sigma2_A_mean <- 1/((1/tau2.draw[kk,mm]) + (1/theta_draw[kk,mm]))
mu_A_mean <- A_draw[1,kk,mm]*tau2.draw[kk,mm]/(tau2.draw[kk,mm] + theta_draw[kk,mm])
Am_draw[kk,mm] <- rnorm(1, mu_A_mean, sqrt(sigma2_A_mean))
}
At_draw[,,mm] <- sapply(1:k,function(kk)A_draw[,kk,mm]-Am_draw[kk,mm])%*%diag(1/theta_sqrt[,mm])
}
#----------------------------------------------------------------------------
# Step 4a: Shrinkage priors on state variances
kappa2 <- rgamma(1, d1+a_xi*k, d2+0.5*k*a_xi*mean(xi2.draw))
for(ii in 1:k){
for(jj in 1:M){
xi2.draw[ii,jj] <- do_rgig1(lambda=a_xi-0.5, chi=theta_draw[ii,jj], psi=a_xi*kappa2)
}
}
xi2.draw[xi2.draw<1e-7] <- 1e-7
if(sample_A){
before <- a_xi
a_xi <- MH_step(a_xi, scale_xi, k, kappa2, as.vector(theta_sqrt), b_xi, nu_xi, d1, d2)
if(before!=a_xi){
acc_xi <- acc_xi + 1
}
# scale MH proposal during the first 50% of the burn-in stage
if(irep<(0.5*burnin)){
if((acc_xi/irep)>0.30){scale_xi <- 1.01*scale_xi}
if((acc_xi/irep)<0.15){scale_xi <- 0.99*scale_xi}
}
}
# Step 4b: Shrinkage prior on mean (multiplicative Gamma prior)
for(pp in 1:p){
slct.i <- which(rownames(Am_draw)==paste("Ylag",pp,sep=""))
if(pp==1 & cons) slct.i <- c(slct.i,which(rownames(Am_draw)=="cons"))
if(pp==1 & trend) slct.i <- c(slct.i,which(rownames(Am_draw)=="trend"))
Am_lag.i <- Am_draw[slct.i,,drop=FALSE]
A_prior.i <- A_prior[slct.i,,drop=FALSE]
tau2.i <- tau2.draw[slct.i,,drop=FALSE]
if(pp==1){
lambda2[pp,1] <- rgamma(1, e1+a_tau[pp,1]*M^2, e2+0.5*a_tau[pp,1]*mean(tau2.i))
}else{
lambda2[pp,1] <- rgamma(1, e1+a_tau[pp,1]*M^2, e2+0.5*a_tau[pp,1]*prod(lambda2[1:(pp-1)])*mean(tau2.i))
}
Mend <- M + ifelse(pp==1&cons,1,0) + ifelse(pp==1&trend,1,0)
for(ii in 1:Mend){
for(jj in 1:M){
tau2.i[ii,jj] <- do_rgig1(lambda=a_tau[pp,1]-0.5, chi=(Am_lag.i[ii,jj]-A_prior.i[ii,jj])^2, psi=a_tau[pp,1]*prod(lambda2[1:pp,1]))
}
}
tau2.i[tau2.i<1e-7] <- 1e-7
if(sample_A){
before <- a_tau[pp,1]
a_tau[pp,1] <- MH_step(a_tau[pp,1], scale_xi[pp], M^2, lambda2[pp,1], as.vector(Am_lag.i), b_tau, nu_tau, e1, e2)
if(before!=a_tau[pp,1]){
acc_tau[pp] <- acc_tau[pp] + 1
}
# scale MH proposal during the first 50% of the burn-in stage
if(irep<(0.5*burnin)){
if((acc_tau[pp]/irep)>0.30){scale_xi[pp] <- 1.01*scale_xi[pp]}
if((acc_tau[pp]/irep)<0.15){scale_xi[pp] <- 0.99*scale_xi[pp]}
}
}
tau2.draw[slct.i,] <- tau2.i
}
# Step 4c: Shrinkage prior on covariances
lambda2_L <- rgamma(1, e1+a_L_tau*v, e2+0.5*v*a_L_tau*mean(L_prior[lower.tri(L_prior)]))
for(ii in 2:M){
for(jj in 1:(ii-1)){
res <- do_rgig1(lambda=a_L_tau-0.5, chi=(L_draw[mm,ii]-l_prior[mm,ii])^2, psi=a_L_tau*lambda2_L)
L_prior[ii,jj] <- ifelse(res<1e-7,1e-7,res)
}
}
if(sample_A){
before <- a_L_tau
a_L_tau <- MH_step(a_L_tau, scale_L_tau, v, lambda2_L, L_draw[lower.tri(L_draw)], b_tau, nu_tau, e1, e2)
if(before!=a_L_tau){
acc_L_tau <- acc_L_tau + 1
}
# scale MH proposal during the first 50% of the burn-in stage
if(irep<(0.5*burnin)){
if((acc_L_tau/irep)>0.30){scale_L_tau <- 1.01*scale_L_tau}
if((acc_L_tau/irep)<0.15){scale_L_tau <- 0.99*scale_L_tau}
}
}
#----------------------------------------------------------------------------
# Step 5: Sample variances
if (sv){
for (jj in 1:M){
para <- as.list(pars_var[,jj])
para$nu = Inf; para$rho=0; para$beta<-0
svdraw <- svsample_fast_cpp(y=Em_str[,jj], draws=1, burnin=0, designmatrix=matrix(NA_real_),
priorspec=Sv_priors, thinpara=1, thinlatent=1, keeptime="all",
startpara=para, startlatent=Sv_draw[,jj],
keeptau=FALSE, print_settings=list(quiet=TRUE, n_chains=1, chain=1),
correct_model_misspecification=FALSE, interweave=TRUE, myoffset=0,
fast_sv=default_fast_sv)
svl[[jj]] <- svdraw
h_ <- exp(svdraw$latent[1,])
para$mu <- svdraw$para[1,"mu"]
para$phi <- svdraw$para[1,"phi"]
para$sigma <- svdraw$para[1,"sigma"]
para$latent0 <- svdraw$latent0[1,"h_0"]
pars_var[,jj] <- unlist(para[c("mu","phi","sigma","latent0")])
Sv_draw[,jj] <- log(h_)
}
}else{
for (jj in 1:M){
S_1 <- a_1+bigT/2
S_2 <- b_1+crossprod(Em_str[,jj])/2
sig_eta <- 1/rgamma(1,S_1,S_2)
Sv_draw[,jj] <- log(sig_eta)
}
}
#-------------------------------------------------------------------------#
# STEP 6: RANDOM SIGN SWITCH
for(mm in 1:M){
for(kk in 1:k){
if(runif(1,0,1)>0.5){
theta_sqrt[kk,mm] <- -theta_sqrt[kk,mm]
}
}
}
#----------------------------------------------------------------------------
# Step 7: store draws
if(irep %in% thin.draws){
count <- count+1
A_store[count,,,] <- A_draw
L_store[count,,] <- L_draw
res_store[count,,] <- Em_draw
# SV
Sv_store[count,,] <- Sv_draw
pars_store[count,,] <- pars_var
# NG
tau2_store[count,,] <- tau2.draw
xi2_store[count,,] <- xi2.draw
lambda2_store[count,,] <- lambda2
kappa2_store[count,,] <- kappa2
a_xi_store[count,,] <- a_xi
a_tau_store[count,,] <- a_tau
}
}
#---------------------------------------------------------------------------------------------------------
# END ESTIMATION
#---------------------------------------------------------------------------------------------------------
dimnames(A_store)=list(NULL,paste("t",seq(0,bigT),sep="."),colnames(X),colnames(A_OLS))
ret <- list(Y=Y,X=X,A_store=A_store,L_store=L_store,Sv_store=Sv_store,pars_store=pars_store,res_store=res_store,
tau2_store=tau2_store,xi2_store=xi2_store,lambda2_store=lambda2_store,kappa2_store=kappa2_store,a_xi_store=a_xi_store,a_tau_store=a_tau_store)
return(ret)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/colPerc.R
\name{colPerc}
\alias{colPerc}
\title{Column Percents}
\usage{
colPerc(tab)
}
\arguments{
\item{tab}{A two way table, e.g.,
the result of \code{xtabs(~var1+var2,data=DataFrame)}.}
}
\value{
An object of class \code{table}, giving column percentages
for the input table.
}
\description{
Computes column percentages for a given twoway table.
}
\examples{
MyTable <- xtabs(~weather+crowd.behavior,data=ledgejump)
colPerc(MyTable)
}
\author{
Homer White \email{hwhite0@georgetowncollege.edu}
}
|
/man/colPerc.Rd
|
no_license
|
umeshach/tigerstats
|
R
| false
| true
| 579
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/colPerc.R
\name{colPerc}
\alias{colPerc}
\title{Column Percents}
\usage{
colPerc(tab)
}
\arguments{
\item{tab}{A two way table, e.g.,
the result of \code{xtabs(~var1+var2,data=DataFrame)}.}
}
\value{
An object of class \code{table}, giving column percentages
for the input table.
}
\description{
Computes column percentages for a given twoway table.
}
\examples{
MyTable <- xtabs(~weather+crowd.behavior,data=ledgejump)
colPerc(MyTable)
}
\author{
Homer White \email{hwhite0@georgetowncollege.edu}
}
|
\name{crmsens}
\alias{crmsens}
\alias{print.dxcrm}
\title{Model Sensitivity in the CRM}
\description{
Evaluate the model sensitivity in the CRM by indifference intervals.}
\usage{
crmsens(prior, target, model = "empiric", intcpt = 3, eps = 1e-06,
maxit = 100, detail = FALSE)
}
\arguments{
\item{prior}{A vector of initial guesses of toxicity probabilities
associated the doses.}
\item{target}{The target DLT rate.}
\item{model}{A character string to specify the working model used in
the method. The default model is ``empiric''. A one-parameter
logistic model is specified by ``logistic''.}
\item{intcpt}{The intercept of the working logistic model. The
default is 3. If \code{model}=``empiric'', this argument will be
ignored.}
\item{eps}{Error tolerance in the computation of indifference
intervals.}
\item{maxit}{Maximum number of iterations in the computation of
indifference intervals.}
\item{detail}{If TRUE, the details of the ``H sets'' will be
displayed. Default is FALSE.}
}
\value{
The function \code{crmsens} returns the model sensitivity for the
model specifications given by the user.
\item{Hset}{The ``H sets'' of the model parameter.}
\item{iint}{The indifference intervals of the dose-toxicity model
associated with the test doses.}
}
\seealso{
\code{\link{crm}}, \code{\link{getprior}}
}
\references{
Cheung, Y. K. and Chappell, R. (2002). A simple technique to evaluate
model sensitivity in the continual reassessment method. Biometrics
58:671-674.
Cheung, Y. K. (2011). Dose Finding by the Continual Reassessment Method. New York: Chapman & Hall/CRC Press.
}
\examples{
prior <- c(0.05, 0.10, 0.20, 0.35, 0.50, 0.70)
target <- 0.2
foo <- crmsens(prior, target, model="logistic", intcpt=2, detail=TRUE)
}
\keyword{datasets}
|
/man/crmsens.Rd
|
no_license
|
cran/dfcrm
|
R
| false
| false
| 1,858
|
rd
|
\name{crmsens}
\alias{crmsens}
\alias{print.dxcrm}
\title{Model Sensitivity in the CRM}
\description{
Evaluate the model sensitivity in the CRM by indifference intervals.}
\usage{
crmsens(prior, target, model = "empiric", intcpt = 3, eps = 1e-06,
maxit = 100, detail = FALSE)
}
\arguments{
\item{prior}{A vector of initial guesses of toxicity probabilities
associated the doses.}
\item{target}{The target DLT rate.}
\item{model}{A character string to specify the working model used in
the method. The default model is ``empiric''. A one-parameter
logistic model is specified by ``logistic''.}
\item{intcpt}{The intercept of the working logistic model. The
default is 3. If \code{model}=``empiric'', this argument will be
ignored.}
\item{eps}{Error tolerance in the computation of indifference
intervals.}
\item{maxit}{Maximum number of iterations in the computation of
indifference intervals.}
\item{detail}{If TRUE, the details of the ``H sets'' will be
displayed. Default is FALSE.}
}
\value{
The function \code{crmsens} returns the model sensitivity for the
model specifications given by the user.
\item{Hset}{The ``H sets'' of the model parameter.}
\item{iint}{The indifference intervals of the dose-toxicity model
associated with the test doses.}
}
\seealso{
\code{\link{crm}}, \code{\link{getprior}}
}
\references{
Cheung, Y. K. and Chappell, R. (2002). A simple technique to evaluate
model sensitivity in the continual reassessment method. Biometrics
58:671-674.
Cheung, Y. K. (2011). Dose Finding by the Continual Reassessment Method. New York: Chapman & Hall/CRC Press.
}
\examples{
prior <- c(0.05, 0.10, 0.20, 0.35, 0.50, 0.70)
target <- 0.2
foo <- crmsens(prior, target, model="logistic", intcpt=2, detail=TRUE)
}
\keyword{datasets}
|
source('modelsetup.r')
source('SummarizeImputation.r')
cores <- as.integer(commandArgs(TRUE)[1])
current.core <- as.integer(commandArgs(TRUE)[2])
mtd <- tolower(commandArgs(TRUE)[3])
if(is.na(cores)) {
cores <- 1
}
if(is.na(current.core)) {
current.core <- 1
}
if(is.na(mtd)) {
mtd <- "lwd"
}
base.dir <- getwd()
#base.dir <- 'c:/missingness'
results.dir <- paste(base.dir, "/results", sep="")
method.dir <- paste(results.dir, mtd, sep="/")
dir.create(method.dir)
execute <- function(i, method)
{
missing.file <- missing.files[[i]]
outp <- paste( method.dir, missing.file, "/", sep="/")
dir.create(outp)
if(method == "lwd")
{
output <- Summarize.Imputation(
original.frame = not.missing,
variable.to.remove = missing.variables[[i]],
input.set = read.csv(paste(missing.file, ".csv", sep="")),
technique = function(x, model.formula){ na.omit(x) },
impute.file.prefix = outp,
graph.file.prefix = outp,
Categoricals.As = "bool",
Logicals.As = "bool",
model.formula = "LnVarietyScore2~usrUrban+usrSuburban+age1+p25+y1v19",
summary.provided.by.technique = FALSE
)
write.csv( output$bias.coefficients , file=paste(method.dir, "/", missing.file, " bias.coefficients", ".csv", sep=""))
write.csv( output$t.coefficient.change , file=paste(method.dir, "/", missing.file, " t.coefficient" , ".csv", sep=""))
write.csv( output$bias.r.squared , file=paste(method.dir, "/", missing.file, " bias.r.squared" , ".csv", sep=""))
write.csv( output$t.r.squared , file=paste(method.dir, "/", missing.file, " t.r.squared" , ".csv", sep=""))
write.csv( output$runtime , file=paste(method.dir, "/", missing.file, " runtime" , ".csv", sep=""))
}
if(method == "hot")
{
output <- Summarize.Imputation(not.missing, missing.variables[[i]], read.csv(paste(missing.file, ".csv", sep="")), function(x){a <- rrp.impute( x, k=5); a$new.data }, paste("HOT", missing.file) )
write.csv( output$bias.coefficients , file=paste(method.dir, "/", missing.file, " bias.coefficients", ".csv", sep=""))
write.csv( output$t.coefficient.change , file=paste(method.dir, "/", missing.file, " t.coefficient" , ".csv", sep=""))
write.csv( output$bias.r.squared , file=paste(method.dir, "/", missing.file, " bias.r.squared" , ".csv", sep=""))
write.csv( output$t.r.squared , file=paste(method.dir, "/", missing.file, " t.r.squared" , ".csv", sep=""))
write.csv( output$runtime , file=paste(method.dir, "/", missing.file, " runtime" , ".csv", sep=""))
}
if(method == "rf")
{
output <- Summarize.Imputation(not.missing, missing.variables[[i]], read.csv(paste(missing.file, ".csv", sep="")), function(x){ rfImpute(LnVarietyScore2~usroriginalnonemissing+age1+p25+y1v19, x, ntree=500) }, paste("RF", missing.file) )
write.csv( output$bias.coefficients , file=paste(method.dir, "/", missing.file, " bias.coefficients", ".csv", sep=""))
write.csv( output$t.coefficient.change , file=paste(method.dir, "/", missing.file, " t.coefficient" , ".csv", sep=""))
write.csv( output$bias.r.squared , file=paste(method.dir, "/", missing.file, " bias.r.squared" , ".csv", sep=""))
write.csv( output$t.r.squared , file=paste(method.dir, "/", missing.file, " t.r.squared" , ".csv", sep=""))
write.csv( output$runtime , file=paste(method.dir, "/", missing.file, " runtime" , ".csv", sep=""))
}
if(method == "mi")
{
output <- Summarize.Imputation(
original.frame = not.missing,
variable.to.remove = missing.variables[[i]],
input.set = read.csv(paste(missing.file, ".csv", sep="")),
technique = function(x, model.formula){ mi.data.frame( mi(x, mi.info(x, .999), n.imp=5, n.iter = 30, add.noise=FALSE ) ) },
graph.file.prefix = paste("MI", missing.file),
summary.provided.by.technique = FALSE,
Categoricals.As = "bool",
Logicals.As = "bool",
model.formula = "LnVarietyScore2~usrUrban+usrSuburban+age1+p25+y1v19"
)
write.csv( output$bias.coefficients , file=paste(method.dir, "/", missing.file, " bias.coefficients", ".csv", sep=""))
write.csv( output$t.coefficient.change , file=paste(method.dir, "/", missing.file, " t.coefficient" , ".csv", sep=""))
write.csv( output$bias.r.squared , file=paste(method.dir, "/", missing.file, " bias.r.squared" , ".csv", sep=""))
write.csv( output$t.r.squared , file=paste(method.dir, "/", missing.file, " t.r.squared" , ".csv", sep=""))
write.csv( output$runtime , file=paste(method.dir, "/", missing.file, " runtime" , ".csv", sep=""))
}
if(method == "em")
{
}
#if(method == "")
#{
#}
}
### Standard loop over all missing files
if(cores == 1) {
for(i in 1:length(missing.files)) { execute(i, mtd) }
}
### End Std Loop
### Standard loop over all missing files
if(cores == 2) {
if(current.core == 1){
for(i in 1:13 ) { execute(i, mtd) }
}
if(current.core == 2){
for(i in 14:27 ) { execute(i, mtd) }
}
}
### End Std Loop
### Quarter Loop
if(cores == 4) {
if(current.core == 1){
for(i in 1:7 ) { execute(i, mtd) }
}
if(current.core == 2){
for(i in 8:14 ) { execute(i, mtd) }
}
if(current.core == 3){
for(i in 15:21 ) { execute(i, mtd) }
}
if(current.core == 4){
for(i in 22:27) { execute(i, mtd) }
}
}
### End Qtr Loop
### Eighth Loop
if(cores == 8)
{
if(current.core == 1){
for(i in 1:3 ) { execute(i, mtd) }
}
if(current.core == 2){
for(i in 4:7 ) { execute(i, mtd) }
}
if(current.core == 3){
for(i in 8:10 ) { execute(i, mtd) }
}
if(current.core == 4){
for(i in 11:13) { execute(i, mtd) }
}
if(current.core == 5){
for(i in 14:17) { execute(i, mtd) }
}
if(current.core == 6){
for(i in 18:20) { execute(i, mtd) }
}
if(current.core == 7){
for(i in 21:23) { execute(i, mtd) }
}
if(current.core == 8){
for(i in 24:27) { execute(i, mtd) }
}
}
### End Eighth
### 16th Loop
if(cores == 16 )
{
if(current.core == 1){
for(i in 1:1 ) { execute(i, mtd) }
}
if(current.core == 2){
for(i in 2:3 ) { execute(i, mtd) }
}
if(current.core == 3){
for(i in 4:5 ) { execute(i, mtd) }
}
if(current.core == 4){
for(i in 6:7 ) { execute(i, mtd) }
}
if(current.core == 5){
for(i in 8:8 ) { execute(i, mtd) }
}
if(current.core == 6){
for(i in 9:10 ) { execute(i, mtd) }
}
if(current.core == 7){
for(i in 11:12) { execute(i, mtd) }
}
if(current.core == 8){
for(i in 13:13) { execute(i, mtd) }
}
if(current.core == 9){
for(i in 14:15) { execute(i, mtd) }
}
if(current.core == 10){
for(i in 16:17) { execute(i, mtd) }
}
if(current.core == 11){
for(i in 18:19) { execute(i, mtd) }
}
if(current.core == 12){
for(i in 20:20) { execute(i, mtd) }
}
if(current.core == 13){
for(i in 21:22) { execute(i, mtd) }
}
if(current.core == 14){
for(i in 23:24) { execute(i, mtd) }
}
if(current.core == 15){
for(i in 25:25) { execute(i, mtd) }
}
if(current.core == 16){
for(i in 26:27) { execute(i, mtd) }
}
}
### End 16th
#Check.Missingness <- function(x) {
# input.set <- read.csv(paste(x, ".csv", sep=""))
# summaries <- sapply(input.set, summary)
# lengths <- sapply(input.set, length)
# (summaries["NA's",]/lengths)*100
#}
#Get.Missingness.Level <- function(x) {
# fred <- strsplit(x, "\\.")
# as.numeric(fred[[1]][length(fred[[1]])])
#}
#nominal <- sapply(missing.files, Get.Missingness.Level)
#actual <- sapply(missing.files, Check.Missingness)
#nominal - actual
|
/deploy/run.models.r
|
no_license
|
grstearns/Missingness
|
R
| false
| false
| 7,525
|
r
|
source('modelsetup.r')
source('SummarizeImputation.r')
cores <- as.integer(commandArgs(TRUE)[1])
current.core <- as.integer(commandArgs(TRUE)[2])
mtd <- tolower(commandArgs(TRUE)[3])
if(is.na(cores)) {
cores <- 1
}
if(is.na(current.core)) {
current.core <- 1
}
if(is.na(mtd)) {
mtd <- "lwd"
}
base.dir <- getwd()
#base.dir <- 'c:/missingness'
results.dir <- paste(base.dir, "/results", sep="")
method.dir <- paste(results.dir, mtd, sep="/")
dir.create(method.dir)
execute <- function(i, method)
{
missing.file <- missing.files[[i]]
outp <- paste( method.dir, missing.file, "/", sep="/")
dir.create(outp)
if(method == "lwd")
{
output <- Summarize.Imputation(
original.frame = not.missing,
variable.to.remove = missing.variables[[i]],
input.set = read.csv(paste(missing.file, ".csv", sep="")),
technique = function(x, model.formula){ na.omit(x) },
impute.file.prefix = outp,
graph.file.prefix = outp,
Categoricals.As = "bool",
Logicals.As = "bool",
model.formula = "LnVarietyScore2~usrUrban+usrSuburban+age1+p25+y1v19",
summary.provided.by.technique = FALSE
)
write.csv( output$bias.coefficients , file=paste(method.dir, "/", missing.file, " bias.coefficients", ".csv", sep=""))
write.csv( output$t.coefficient.change , file=paste(method.dir, "/", missing.file, " t.coefficient" , ".csv", sep=""))
write.csv( output$bias.r.squared , file=paste(method.dir, "/", missing.file, " bias.r.squared" , ".csv", sep=""))
write.csv( output$t.r.squared , file=paste(method.dir, "/", missing.file, " t.r.squared" , ".csv", sep=""))
write.csv( output$runtime , file=paste(method.dir, "/", missing.file, " runtime" , ".csv", sep=""))
}
if(method == "hot")
{
output <- Summarize.Imputation(not.missing, missing.variables[[i]], read.csv(paste(missing.file, ".csv", sep="")), function(x){a <- rrp.impute( x, k=5); a$new.data }, paste("HOT", missing.file) )
write.csv( output$bias.coefficients , file=paste(method.dir, "/", missing.file, " bias.coefficients", ".csv", sep=""))
write.csv( output$t.coefficient.change , file=paste(method.dir, "/", missing.file, " t.coefficient" , ".csv", sep=""))
write.csv( output$bias.r.squared , file=paste(method.dir, "/", missing.file, " bias.r.squared" , ".csv", sep=""))
write.csv( output$t.r.squared , file=paste(method.dir, "/", missing.file, " t.r.squared" , ".csv", sep=""))
write.csv( output$runtime , file=paste(method.dir, "/", missing.file, " runtime" , ".csv", sep=""))
}
if(method == "rf")
{
output <- Summarize.Imputation(not.missing, missing.variables[[i]], read.csv(paste(missing.file, ".csv", sep="")), function(x){ rfImpute(LnVarietyScore2~usroriginalnonemissing+age1+p25+y1v19, x, ntree=500) }, paste("RF", missing.file) )
write.csv( output$bias.coefficients , file=paste(method.dir, "/", missing.file, " bias.coefficients", ".csv", sep=""))
write.csv( output$t.coefficient.change , file=paste(method.dir, "/", missing.file, " t.coefficient" , ".csv", sep=""))
write.csv( output$bias.r.squared , file=paste(method.dir, "/", missing.file, " bias.r.squared" , ".csv", sep=""))
write.csv( output$t.r.squared , file=paste(method.dir, "/", missing.file, " t.r.squared" , ".csv", sep=""))
write.csv( output$runtime , file=paste(method.dir, "/", missing.file, " runtime" , ".csv", sep=""))
}
if(method == "mi")
{
output <- Summarize.Imputation(
original.frame = not.missing,
variable.to.remove = missing.variables[[i]],
input.set = read.csv(paste(missing.file, ".csv", sep="")),
technique = function(x, model.formula){ mi.data.frame( mi(x, mi.info(x, .999), n.imp=5, n.iter = 30, add.noise=FALSE ) ) },
graph.file.prefix = paste("MI", missing.file),
summary.provided.by.technique = FALSE,
Categoricals.As = "bool",
Logicals.As = "bool",
model.formula = "LnVarietyScore2~usrUrban+usrSuburban+age1+p25+y1v19"
)
write.csv( output$bias.coefficients , file=paste(method.dir, "/", missing.file, " bias.coefficients", ".csv", sep=""))
write.csv( output$t.coefficient.change , file=paste(method.dir, "/", missing.file, " t.coefficient" , ".csv", sep=""))
write.csv( output$bias.r.squared , file=paste(method.dir, "/", missing.file, " bias.r.squared" , ".csv", sep=""))
write.csv( output$t.r.squared , file=paste(method.dir, "/", missing.file, " t.r.squared" , ".csv", sep=""))
write.csv( output$runtime , file=paste(method.dir, "/", missing.file, " runtime" , ".csv", sep=""))
}
if(method == "em")
{
}
#if(method == "")
#{
#}
}
### Standard loop over all missing files
if(cores == 1) {
for(i in 1:length(missing.files)) { execute(i, mtd) }
}
### End Std Loop
### Standard loop over all missing files
if(cores == 2) {
if(current.core == 1){
for(i in 1:13 ) { execute(i, mtd) }
}
if(current.core == 2){
for(i in 14:27 ) { execute(i, mtd) }
}
}
### End Std Loop
### Quarter Loop
if(cores == 4) {
if(current.core == 1){
for(i in 1:7 ) { execute(i, mtd) }
}
if(current.core == 2){
for(i in 8:14 ) { execute(i, mtd) }
}
if(current.core == 3){
for(i in 15:21 ) { execute(i, mtd) }
}
if(current.core == 4){
for(i in 22:27) { execute(i, mtd) }
}
}
### End Qtr Loop
### Eighth Loop
if(cores == 8)
{
if(current.core == 1){
for(i in 1:3 ) { execute(i, mtd) }
}
if(current.core == 2){
for(i in 4:7 ) { execute(i, mtd) }
}
if(current.core == 3){
for(i in 8:10 ) { execute(i, mtd) }
}
if(current.core == 4){
for(i in 11:13) { execute(i, mtd) }
}
if(current.core == 5){
for(i in 14:17) { execute(i, mtd) }
}
if(current.core == 6){
for(i in 18:20) { execute(i, mtd) }
}
if(current.core == 7){
for(i in 21:23) { execute(i, mtd) }
}
if(current.core == 8){
for(i in 24:27) { execute(i, mtd) }
}
}
### End Eighth
### 16th Loop
if(cores == 16 )
{
if(current.core == 1){
for(i in 1:1 ) { execute(i, mtd) }
}
if(current.core == 2){
for(i in 2:3 ) { execute(i, mtd) }
}
if(current.core == 3){
for(i in 4:5 ) { execute(i, mtd) }
}
if(current.core == 4){
for(i in 6:7 ) { execute(i, mtd) }
}
if(current.core == 5){
for(i in 8:8 ) { execute(i, mtd) }
}
if(current.core == 6){
for(i in 9:10 ) { execute(i, mtd) }
}
if(current.core == 7){
for(i in 11:12) { execute(i, mtd) }
}
if(current.core == 8){
for(i in 13:13) { execute(i, mtd) }
}
if(current.core == 9){
for(i in 14:15) { execute(i, mtd) }
}
if(current.core == 10){
for(i in 16:17) { execute(i, mtd) }
}
if(current.core == 11){
for(i in 18:19) { execute(i, mtd) }
}
if(current.core == 12){
for(i in 20:20) { execute(i, mtd) }
}
if(current.core == 13){
for(i in 21:22) { execute(i, mtd) }
}
if(current.core == 14){
for(i in 23:24) { execute(i, mtd) }
}
if(current.core == 15){
for(i in 25:25) { execute(i, mtd) }
}
if(current.core == 16){
for(i in 26:27) { execute(i, mtd) }
}
}
### End 16th
#Check.Missingness <- function(x) {
# input.set <- read.csv(paste(x, ".csv", sep=""))
# summaries <- sapply(input.set, summary)
# lengths <- sapply(input.set, length)
# (summaries["NA's",]/lengths)*100
#}
#Get.Missingness.Level <- function(x) {
# fred <- strsplit(x, "\\.")
# as.numeric(fred[[1]][length(fred[[1]])])
#}
#nominal <- sapply(missing.files, Get.Missingness.Level)
#actual <- sapply(missing.files, Check.Missingness)
#nominal - actual
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/seasFolder.R
\name{seasFolder}
\alias{seasFolder}
\title{Get path to folder with seasonality analysis + data}
\usage{
seasFolder(..., warnonly = FALSE, path = NA)
}
\arguments{
\item{\dots}{Optional filename(s) that will be appended.}
\item{warnonly}{Logical: only warn instead of stopping if files are not found.
NA to not check file existence. DEFAULT: FALSE}
\item{path}{Character: path to search in. DEFAULT: NA}
}
\value{
Path(s)
}
\description{
Get path to (files in) the folder with seasonality analysis + data
}
\examples{
seasFolder()
seasFolder("dummy.txt", warnonly=TRUE)
stopifnot(length(seasFolder("dummy.txt", "dude.xyz", warnonly=TRUE))==2)
}
\author{
Berry Boessenkool, \email{berry-b@gmx.de}, Jun 2017
}
\keyword{file}
|
/man/seasFolder.Rd
|
no_license
|
kbSSR/rfs
|
R
| false
| true
| 818
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/seasFolder.R
\name{seasFolder}
\alias{seasFolder}
\title{Get path to folder with seasonality analysis + data}
\usage{
seasFolder(..., warnonly = FALSE, path = NA)
}
\arguments{
\item{\dots}{Optional filename(s) that will be appended.}
\item{warnonly}{Logical: only warn instead of stopping if files are not found.
NA to not check file existence. DEFAULT: FALSE}
\item{path}{Character: path to search in. DEFAULT: NA}
}
\value{
Path(s)
}
\description{
Get path to (files in) the folder with seasonality analysis + data
}
\examples{
seasFolder()
seasFolder("dummy.txt", warnonly=TRUE)
stopifnot(length(seasFolder("dummy.txt", "dude.xyz", warnonly=TRUE))==2)
}
\author{
Berry Boessenkool, \email{berry-b@gmx.de}, Jun 2017
}
\keyword{file}
|
library(RPostgreSQL)
con <- dbConnect(PostgreSQL(), user="manjarid", password="fath1Heim",
dbname="manjarid", host="sculptor.stat.cmu.edu")
result = dbSendQuery(con, "select hood from neighborhoodpolice;")
data = dbFetch(result)
dbClearResult(result)
hoods = unlist(data)
datafile = read.csv("stdin", header = TRUE, skip = 4)
#classes = c("integer", "factor", "character", "time", "character", "character", "integer")
write.table(NULL, file = "erroroutfile.txt", row.names = FALSE, col.names = FALSE)
for (j in 1:ncol(datafile)) {
# print(c(sum(is.na(datafile[,j])), sum(sapply(datafile[,j], "class") != classes[j])))
if (sum(is.na(datafile[,j])) > 0 ) {#|| sum(sapply(datafile[,j], "class") != classes[j]) > 0) {
cat("Rows with missing value in column ",file="erroroutfile.txt",append=TRUE)
cat(j, file = "erroroutfile.txt", append = TRUE)
cat(" : ", file = "erroroutfile.txt", append = TRUE)
cat(which(is.na(datafile[,j])), file = "erroroutfile.txt", append = TRUE)
cat('\n', file = "erroroutfile.txt", append = TRUE)
# cat("Rows with value not from the required class in column ",file="erroroutfile.txt",append=TRUE)
# cat(j, file = "erroroutfile.txt", append = TRUE)
# cat(" : ", file = "erroroutfile.txt", append = TRUE)
# cat(which(sapply(datafile[,j], "class") != classes[j]), file = "erroroutfile.txt", append = TRUE)
# cat('\n', file = "erroroutfile.txt", append = TRUE)
}
}
result = dbSendStatement(con, "Delete from blotter;")
dbClearResult(result)
colnames(datafile) = c("_id", "report_name", "section", "description", "arrest_time", "address", "neighborhood", "zone")
if (dbExistsTable(con, "blottertemp")) {
dbRemoveTable(con, "blottertemp")
}
dbWriteTable(con, name = "blottertemp", value = datafile, row.names = FALSE)
#print("\n datafile from blottertemp : \n")
#print(data)
for (hood in hoods) {
result = dbSendQuery(con, paste("UPDATE blottertemp SET NEIGHBORHOOD = '", hood, "' WHERE NEIGHBORHOOD ILIKE FORMAT('%%s%%', '", hood, "') or '", hood, "' ilike format ('%%s%%', neighborhood);", sep = ''))
}
#print("hood done")
dbClearResult(result)
dbSendStatement(con, "ALTER table blottertemp alter column report_name type report_type USING report_name::report_type;")
dbSendStatement(con, "ALTER table blottertemp alter column arrest_time type timestamp USING arrest_time::timestamp;")
dbSendStatement(con, "ALTER table blottertemp alter column _id type integer USING _id::integer;")
dbSendStatement(con, "ALTER table blottertemp alter column zone type integer USING zone::integer;")
dbSendStatement(con, "INSERT INTO weeklycrime (SELECT blottertemp.* from blottertemp LEFT OUTER JOIN weeklycrime on weeklycrime._id = blottertemp._id where weeklycrime.* IS NULL);")
|
/crime_data/ingest_data.R
|
no_license
|
mqnjqrid/assignments
|
R
| false
| false
| 2,757
|
r
|
library(RPostgreSQL)
con <- dbConnect(PostgreSQL(), user="manjarid", password="fath1Heim",
dbname="manjarid", host="sculptor.stat.cmu.edu")
result = dbSendQuery(con, "select hood from neighborhoodpolice;")
data = dbFetch(result)
dbClearResult(result)
hoods = unlist(data)
datafile = read.csv("stdin", header = TRUE, skip = 4)
#classes = c("integer", "factor", "character", "time", "character", "character", "integer")
write.table(NULL, file = "erroroutfile.txt", row.names = FALSE, col.names = FALSE)
for (j in 1:ncol(datafile)) {
# print(c(sum(is.na(datafile[,j])), sum(sapply(datafile[,j], "class") != classes[j])))
if (sum(is.na(datafile[,j])) > 0 ) {#|| sum(sapply(datafile[,j], "class") != classes[j]) > 0) {
cat("Rows with missing value in column ",file="erroroutfile.txt",append=TRUE)
cat(j, file = "erroroutfile.txt", append = TRUE)
cat(" : ", file = "erroroutfile.txt", append = TRUE)
cat(which(is.na(datafile[,j])), file = "erroroutfile.txt", append = TRUE)
cat('\n', file = "erroroutfile.txt", append = TRUE)
# cat("Rows with value not from the required class in column ",file="erroroutfile.txt",append=TRUE)
# cat(j, file = "erroroutfile.txt", append = TRUE)
# cat(" : ", file = "erroroutfile.txt", append = TRUE)
# cat(which(sapply(datafile[,j], "class") != classes[j]), file = "erroroutfile.txt", append = TRUE)
# cat('\n', file = "erroroutfile.txt", append = TRUE)
}
}
result = dbSendStatement(con, "Delete from blotter;")
dbClearResult(result)
colnames(datafile) = c("_id", "report_name", "section", "description", "arrest_time", "address", "neighborhood", "zone")
if (dbExistsTable(con, "blottertemp")) {
dbRemoveTable(con, "blottertemp")
}
dbWriteTable(con, name = "blottertemp", value = datafile, row.names = FALSE)
#print("\n datafile from blottertemp : \n")
#print(data)
for (hood in hoods) {
result = dbSendQuery(con, paste("UPDATE blottertemp SET NEIGHBORHOOD = '", hood, "' WHERE NEIGHBORHOOD ILIKE FORMAT('%%s%%', '", hood, "') or '", hood, "' ilike format ('%%s%%', neighborhood);", sep = ''))
}
#print("hood done")
dbClearResult(result)
dbSendStatement(con, "ALTER table blottertemp alter column report_name type report_type USING report_name::report_type;")
dbSendStatement(con, "ALTER table blottertemp alter column arrest_time type timestamp USING arrest_time::timestamp;")
dbSendStatement(con, "ALTER table blottertemp alter column _id type integer USING _id::integer;")
dbSendStatement(con, "ALTER table blottertemp alter column zone type integer USING zone::integer;")
dbSendStatement(con, "INSERT INTO weeklycrime (SELECT blottertemp.* from blottertemp LEFT OUTER JOIN weeklycrime on weeklycrime._id = blottertemp._id where weeklycrime.* IS NULL);")
|
##################################################################################################################
######### WHO 2007 #####
######### Department of Nutrition for Health and Development #####
######### World Health Organization #####
######### Last modified on 08/10/2013 - Developed using R version 3.0.1 (2013-05-16) #####
######### This code corcerns the the calculation of prevalences using all vallid z-scores (non-missing) #####
######### for three indicators: weight-for-age (5 to 10 years), height-for-age (5 to 19 years) and BMI-for- #####
######### age (5 to 19 years) based on the WHO 2007 references. #####
######### Exact age must be given in months (no rounding necessary), height in centimeters and weight in #####
######### kilograms. #####
##################################################################################################################
##################################################################################################################
##################################################################################################################
######### Functions for calculating the z-scores and prevalences for a nutritional survey #####
##################################################################################################################
########################################################################
#### Auxiliar functions
########################################################################
#############################################################################
##### Prevalence calculation for the upper bound and corresponding 95% C.I.
#############################################################################
prevph.L <- function(a,x,w) {
ph <- sum((x > a)*w,na.rm=T)/sum((!is.na(x))*w,na.rm=T)
aux <- 1.96*sqrt(ph*(1-ph)/sum((!is.na(x))*w,na.rm=T))+(1/(2*sum((!is.na(x))*w,na.rm=T)))
vec <- c(rounde(sum((!is.na(x))*w,na.rm=T),digits=0),rounde(c(ph, max(0,ph-aux), min(1,ph+aux))*100,digits=1))
return(vec)
}
#### With oedema (only for weight-for-age and bmi-for-age)
prevph <- function(a,x,w,f) {
f<-as.character(f)
ph <- sum((x > a)*w,na.rm=T)/sum(((!is.na(x)) | (f=="y"))*w,na.rm=T)
aux <- 1.96*sqrt(ph*(1-ph)/sum(((!is.na(x)) | (f=="y"))*w,na.rm=T))+(1/(2*sum(((!is.na(x)) | (f=="y"))*w,na.rm=T)))
vec <- c(rounde(sum(((!is.na(x)) | (f=="y"))*w,na.rm=T),digits=0),rounde(c(ph, max(0,ph-aux), min(1,ph+aux))*100,digits=1))
return(vec)
}
#############################################################################
##### Prevalence calculation for the lower bound and corresponding 95% C.I.
#############################################################################
#### Without oedema (for height-for-age)
prevnh.L <- function(a,x,w) {
ph <- sum((x < a)*w,na.rm=T)/sum((!is.na(x))*w,na.rm=T)
aux <- 1.96*sqrt(ph*(1-ph)/sum((!is.na(x))*w,na.rm=T))+(1/(2*sum((!is.na(x))*w,na.rm=T)))
vec <- c(rounde(sum((!is.na(x))*w,na.rm=T),digits=0),rounde(c(ph, max(0,ph-aux), min(1,ph+aux))*100,digits=1))
return(vec)
}
#### With oedema (for all weight-related indicators)
prevnh <- function(a,x,w,f) {
f<-as.character(f)
ph <- sum((x < a | f=="y")*w,na.rm=T)/sum(((!is.na(x)) | (f=="y"))*w,na.rm=T)
aux <- 1.96*sqrt(ph*(1-ph)/sum(((!is.na(x)) | (f=="y"))*w,na.rm=T))+(1/(2*sum(((!is.na(x)) | (f=="y"))*w,na.rm=T)))
vec <- c(rounde(sum(((!is.na(x)) | (f=="y"))*w,na.rm=T),digits=0),rounde(c(ph, max(0,ph-aux), min(1,ph+aux))*100,digits=1))
return(vec)
}
###########################################
#### Weighted mean and standard deviation
###########################################
wmean <- function(x,w) { return(rounde(sum(x*w,na.rm=T)/sum(w[!is.na(x)]),digits=2) ) }
wsd <- function(x,w) {
mh <- sum(x*w,na.rm=T)/sum((!is.na(x))*w,na.rm=T)
sdh<-ifelse(length(x[!is.na(x)])>0,rounde(sqrt(sum(((x-mh)^2)*w,na.rm=T)/(sum((!is.na(x))*w,na.rm=T) - 1)),digits=2),NA)
return( sdh )
}
###########################################################################################
#### Rounding function - SPlus default rounding function uses the nearest even number rule
###########################################################################################
rounde <- function(x,digits=0) {
expo<-10^digits
return(ifelse(abs(x*expo) - floor(abs(x*expo)) < 0.5, sign(x*expo) * floor(abs(x*expo)), sign(x*expo) * (floor(abs(x*expo)) + 1))/expo)
}
######################################################################################
### Function for calculating individual height-for-age z-scores
######################################################################################
calc.zhfa<-function(mat,hfawho2007){
for(i in 1:length(mat$age.mo)) {
if(!is.na(mat$age.mo[i]) & mat$age.mo[i]>=61 & mat$age.mo[i]<229) {
### Interpolated l,m,s values
low.age<-trunc(mat$age.mo[i])
upp.age<-trunc(mat$age.mo[i]+1)
diff.age<-(mat$age.mo[i]-low.age)
if(diff.age>0) {
l.val<-hfawho2007$l[hfawho2007$age==low.age & hfawho2007$sex==mat$sex[i]]+diff.age*( hfawho2007$l[hfawho2007$age==upp.age & hfawho2007$sex==mat$sex[i]]-hfawho2007$l[hfawho2007$age==low.age & hfawho2007$sex==mat$sex[i]] )
m.val<-hfawho2007$m[hfawho2007$age==low.age & hfawho2007$sex==mat$sex[i]]+diff.age*( hfawho2007$m[hfawho2007$age==upp.age & hfawho2007$sex==mat$sex[i]]-hfawho2007$m[hfawho2007$age==low.age & hfawho2007$sex==mat$sex[i]] )
s.val<-hfawho2007$s[hfawho2007$age==low.age & hfawho2007$sex==mat$sex[i]]+diff.age*( hfawho2007$s[hfawho2007$age==upp.age & hfawho2007$sex==mat$sex[i]]-hfawho2007$s[hfawho2007$age==low.age & hfawho2007$sex==mat$sex[i]] )
} else {
l.val<-hfawho2007$l[hfawho2007$age==low.age & hfawho2007$sex==mat$sex[i]]
m.val<-hfawho2007$m[hfawho2007$age==low.age & hfawho2007$sex==mat$sex[i]]
s.val<-hfawho2007$s[hfawho2007$age==low.age & hfawho2007$sex==mat$sex[i]]
}
mat$zhfa[i]<-(((mat$height[i]/m.val)^l.val)-1)/(s.val*l.val)
} else mat$zhfa[i]<- NA
}
return(mat)
}
######################################################################################
### Function for calculating individual weight-for-age z-scores
######################################################################################
calc.zwei<-function(mat,wfawho2007){
for(i in 1:length(mat$age.mo)) {
if(!is.na(mat$age.mo[i]) & mat$age.mo[i]>=61 & mat$age.mo[i]<121 & mat$oedema[i]!="y") {
### Interpolated l,m,s values
low.age<-trunc(mat$age.mo[i])
upp.age<-trunc(mat$age.mo[i]+1)
diff.age<-(mat$age.mo[i]-low.age)
if(diff.age>0) {
l.val<-wfawho2007$l[wfawho2007$age==low.age & wfawho2007$sex==mat$sex[i]]+diff.age*( wfawho2007$l[wfawho2007$age==upp.age & wfawho2007$sex==mat$sex[i]]-wfawho2007$l[wfawho2007$age==low.age & wfawho2007$sex==mat$sex[i]] )
m.val<-wfawho2007$m[wfawho2007$age==low.age & wfawho2007$sex==mat$sex[i]]+diff.age*( wfawho2007$m[wfawho2007$age==upp.age & wfawho2007$sex==mat$sex[i]]-wfawho2007$m[wfawho2007$age==low.age & wfawho2007$sex==mat$sex[i]] )
s.val<-wfawho2007$s[wfawho2007$age==low.age & wfawho2007$sex==mat$sex[i]]+diff.age*( wfawho2007$s[wfawho2007$age==upp.age & wfawho2007$sex==mat$sex[i]]-wfawho2007$s[wfawho2007$age==low.age & wfawho2007$sex==mat$sex[i]] )
} else {
l.val<-wfawho2007$l[wfawho2007$age==low.age & wfawho2007$sex==mat$sex[i]]
m.val<-wfawho2007$m[wfawho2007$age==low.age & wfawho2007$sex==mat$sex[i]]
s.val<-wfawho2007$s[wfawho2007$age==low.age & wfawho2007$sex==mat$sex[i]]
}
mat$zwfa[i]<-(((mat$weight[i]/m.val)^l.val)-1)/(s.val*l.val)
if(!is.na(mat$zwfa[i]) & mat$zwfa[i]>3) {
sd3pos<- m.val*((1+l.val*s.val*3)^(1/l.val))
sd23pos<- sd3pos- m.val*((1+l.val*s.val*2)^(1/l.val))
mat$zwfa[i]<- 3+((mat$weight[i]-sd3pos)/sd23pos)
}
if(!is.na(mat$zwfa[i]) & mat$zwfa[i]< (-3)) {
sd3neg<- m.val*((1+l.val*s.val*(-3))**(1/l.val))
sd23neg<- m.val*((1+l.val*s.val*(-2))**(1/l.val))-sd3neg
mat$zwfa[i]<- (-3)+((mat$weight[i]-sd3neg)/sd23neg)
}
} else mat$zwfa[i]<-NA
}
return(mat)
}
######################################################################################
### Function for calulating individual BMI-for-age z-scores
######################################################################################
calc.zbmi<-function(mat,bfawho2007){
for(i in 1:length(mat$age.mo)) {
if(!is.na(mat$age.mo[i]) & mat$age.mo[i]>=61 & mat$age.mo[i]<229 & mat$oedema[i]!="y") {
### Interpolated l,m,s values
low.age<-trunc(mat$age.mo[i])
upp.age<-trunc(mat$age.mo[i]+1)
diff.age<-(mat$age.mo[i]-low.age)
if(diff.age>0) {
l.val<-bfawho2007$l[bfawho2007$age==low.age & bfawho2007$sex==mat$sex[i]]+diff.age*( bfawho2007$l[bfawho2007$age==upp.age & bfawho2007$sex==mat$sex[i]]-bfawho2007$l[bfawho2007$age==low.age & bfawho2007$sex==mat$sex[i]] )
m.val<-bfawho2007$m[bfawho2007$age==low.age & bfawho2007$sex==mat$sex[i]]+diff.age*( bfawho2007$m[bfawho2007$age==upp.age & bfawho2007$sex==mat$sex[i]]-bfawho2007$m[bfawho2007$age==low.age & bfawho2007$sex==mat$sex[i]] )
s.val<-bfawho2007$s[bfawho2007$age==low.age & bfawho2007$sex==mat$sex[i]]+diff.age*( bfawho2007$s[bfawho2007$age==upp.age & bfawho2007$sex==mat$sex[i]]-bfawho2007$s[bfawho2007$age==low.age & bfawho2007$sex==mat$sex[i]] )
} else {
l.val<-bfawho2007$l[bfawho2007$age==low.age & bfawho2007$sex==mat$sex[i]]
m.val<-bfawho2007$m[bfawho2007$age==low.age & bfawho2007$sex==mat$sex[i]]
s.val<-bfawho2007$s[bfawho2007$age==low.age & bfawho2007$sex==mat$sex[i]]
}
mat$zbfa[i]<-(((mat$cbmi[i]/m.val)^l.val)-1)/(s.val*l.val)
if(!is.na(mat$zbfa[i]) & mat$zbfa[i]>3) {
sd3pos<- m.val*((1+l.val*s.val*3)^(1/l.val))
sd23pos<- sd3pos- m.val*((1+l.val*s.val*2)^(1/l.val))
mat$zbfa[i]<- 3+((mat$cbmi[i]-sd3pos)/sd23pos)
}
if(!is.na(mat$zbfa[i]) & mat$zbfa[i]< (-3)) {
sd3neg<- m.val*((1+l.val*s.val*(-3))**(1/l.val))
sd23neg<- m.val*((1+l.val*s.val*(-2))**(1/l.val))-sd3neg
mat$zbfa[i]<- (-3)+((mat$cbmi[i]-sd3neg)/sd23neg)
}
} else mat$zbfa[i]<-NA
}
return(mat)
}
###################################################################################
#### Main function starts here: who2007
###################################################################################
###############################################################################################################################################
#### This function can be used to:
#### 1. Calculate the z-scores for the indicators: height-for-age, weight-for-age and body mass index-for-age
#### The output file with z-scores values is exported the file to an Excel spreadsheet (see readme file);
#### 2. Calculate the prevalence rates of stunting, underweight, wasting and overweight, and z-scores means and standard deviations. Results
#### are exported to an Excel spreadsheet, displayed by age group.
###############################################################################################################################################
#############################################################################
##### Function for calculating the z-scores for all indicators
#############################################################################
who2007 <- function(FileLab="Temp",FilePath=getwd(),mydf,sex,age,weight,height,oedema=rep("n",dim(mydf)[1]),sw=rep(1,dim(mydf)[1])) {
#############################################################################
########### Calculating the z-scores for all indicators
#############################################################################
old <- options(warn=(-1))
# sex.x<-as.character(get(deparse(substitute(mydf)))[,deparse(substitute(sex))])
# age.x<-as.double(get(deparse(substitute(mydf)))[,deparse(substitute(age))])
# weight.x<-as.double(get(deparse(substitute(mydf)))[,deparse(substitute(weight))])
# height.x<-as.double(get(deparse(substitute(mydf)))[,deparse(substitute(height))])
# if(!missing(oedema)) oedema.vec<-as.character(get(deparse(substitute(mydf)))[,deparse(substitute(oedema))]) else oedema.vec<-oedema
# if(!missing(sw)) sw<-as.double(get(deparse(substitute(mydf)))[,deparse(substitute(sw))]) else sw<-as.double(sw)
sex.x<-as.character(mydf[[sex]])
age.x<-as.double(mydf[[age]])
weight.x<-as.double(mydf[[weight]])
height.x<-as.double(mydf[[height]])
if(!missing(oedema)) oedema.vec<-as.character(mydf[[oedema]]) else oedema.vec<-oedema
if(!missing(sw)) sw<-as.double(mydf[[sw]]) else sw<-as.double(sw)
sw<-ifelse(is.na(sw),0,sw)
sex.vec<-NULL
sex.vec<-ifelse(sex.x!="NA" & (sex.x=="m" | sex.x=="M" | sex.x=="1"),1,ifelse(sex.x!="NA" & (sex.x=="f" | sex.x=="F" | sex.x=="2"),2,NA))
age.vec<-age.x
height.vec<-height.x
oedema.vec<-ifelse(oedema.vec=="n" | oedema.vec=="N","n",ifelse(oedema.vec=="y" | oedema.vec=="Y","y","n"))
mat<-cbind.data.frame(age.x,as.double(sex.vec),weight.x,height.x,oedema.vec,sw,stringsAsFactors=F)
names(mat)<-c("age.mo","sex","weight","height","oedema","sw")
mat$cbmi<-mat$weight/((height.vec/100)^2)
mat$zhfa<-NULL
mat$fhfa<-NULL
mat$zwfa<-NULL
mat$fwfa<-NULL
mat$zbfa<-NULL
mat$fbfa<-NULL
#############################################################################
########### Calculating the z-scores for all indicators
#############################################################################
# cat("Please wait while calculating z-scores...\n")
### Height-for-age z-score
mat<-calc.zhfa(mat,hfawho2007)
### Weight-for-age z-score
mat<-calc.zwei(mat,wfawho2007)
### BMI-for-age z-score
mat<-calc.zbmi(mat,bfawho2007)
#### Rounding the z-scores to two decimals
mat$zhfa<-rounde(mat$zhfa,digits=2)
mat$zwfa<-rounde(mat$zwfa,digits=2)
mat$zbfa<-rounde(mat$zbfa,digits=2)
#### Flagging z-score values for individual indicators
mat$fhfa<-ifelse(abs(mat$zhfa) > 6,1,0)
mat$fwfa<-ifelse(mat$zwfa > 5 | mat$zwfa < (-6),1,0)
mat$fbfa<-ifelse(abs(mat$zbfa) > 5,1,0)
if(is.na(mat$age.mo) & mat$oedema=="y") {
mat$fhfa<-NA
mat$zwfa<-NA
mat$zbfa<-NA
}
mat<-cbind.data.frame(mydf,mat[,-c(2:6)])
###################################################################################################
######### Export data frame with z-scores and flag variables
###################################################################################################
return(mat)
# assign("matz",mat,envir = .GlobalEnv)
# write.table(matz, file=paste(FilePath,"\\",FileLab,"_z.csv",sep=""),na="",row.names = FALSE,sep=",",quote = TRUE)
# cat(paste("Z-scores calculated and exported to ",FilePath,"\\",FileLab,"_z.csv\n\n",sep=""))
#######################################################################################################
#### Calculating prevalences and summary statistics.
#######################################################################################################
if(any(sw <0)) stop("Negative weights are not allowed and program will stop. Prevalence tables will not be produced.")
mat.out<-mat
mat.out$sw.vec<-sw
mat.out$sex.vec<-as.double(sex.vec)
mat.out$oedema.vec<-as.character(oedema.vec)
mat.out$oedema.vec1<-as.character(oedema.vec)
mat.out<-mat.out[!is.na(mat.out$age.mo) & mat.out$age.mo>=61 & mat.out$age.mo<229,]
####################################################
#### Creating age group variable in completed years
####################################################
mat.out$agegr <- floor(mat.out$age.mo/12)
##############################################
#### Make z-score as missing if it is flagged
##############################################
mat.out$zhfa<-ifelse(!is.na(mat.out$fhfa) & mat.out$fhfa!=0,NA,mat.out$zhfa)
mat.out$zwfa<-ifelse(!is.na(mat.out$fwfa) & mat.out$fwfa!=0,NA,mat.out$zwfa)
mat.out$zbfa<-ifelse(!is.na(mat.out$fbfa) & mat.out$fbfa!=0,NA,mat.out$zbfa)
if(dim(mat.out)[1]==0) stop("\n\nNo non-missing z-score values are available for calculating prevalences. Program will stop!\n\n.")
##############################################
#### Include all levels of age group variable
##############################################
mat.aux<-as.data.frame(cbind(array(rep(NA,((dim(mat.out)[2]-1)*15)),dim=c(15,(dim(mat.out)[2]-1)) ),seq(5,19,1)))
names(mat.aux)<-names(mat.out)
mat.out<-rbind(mat.out,mat.aux)
cat(" starting ")
########################################################################################################
#### Make Oedema variable to be "n" if age smaller than 61 mo or greater than 120 mo (for weight-for-age)
#### or smaller than 61 mo or greater than 228 mo (for bmi-for-age).
#### This is because children with oedema counts in the prevalence even if z-score is missing
#### for weight related indicators.
########################################################################################################
mat.out$oedema.vec<-ifelse((!is.na(mat.out$age.mo) & (mat.out$age.mo<61 | mat.out$age.mo>=229)) | mat.out$oedema.vec=="NA","n",mat.out$oedema.vec)
mat.out$oedema.vec1<-ifelse((!is.na(mat.out$age.mo) & (mat.out$age.mo<61 | mat.out$age.mo>=121)) | mat.out$oedema.vec=="NA","n",mat.out$oedema.vec)
#####################################################################################################################################################
#### Creating matrix with estimated prevalences, confidence intervals, and means and standard deviations of z-scores and exporting it to Excel file.
#####################################################################################################################################################
cat("\nPlease wait while calculating prevalences and z-score summary statistics...\n")
#### Sexes combined
#### % < -3 SD for all the indicators
mat<- t(cbind.data.frame(#
prevnh(-3,mat.out$zwfa,mat.out$sw.vec,mat.out$oedema.vec1),lapply(split(mat.out, mat.out$agegr),function(z) prevnh(-3, z$zwfa, z$sw.vec, z$oedema.vec1)),#
prevnh.L(-3,mat.out$zhfa,mat.out$sw.vec),lapply(split(mat.out, mat.out$agegr), function(z) prevnh.L(-3, z$zhfa, z$sw.vec)),#
prevnh(-3,mat.out$zbfa,mat.out$sw.vec,mat.out$oedema.vec),lapply(split(mat.out, mat.out$agegr),function(z) prevnh(-3, z$zbfa, z$sw.vec, z$oedema.vec))))
#### % < -2 SD for all the indicators
mat<-cbind.data.frame(mat,t(cbind.data.frame(#
prevnh(-2,mat.out$zwfa,mat.out$sw.vec,mat.out$oedema.vec1),lapply(split(mat.out, mat.out$agegr),function(z) prevnh(-2, z$zwfa, z$sw.vec, z$oedema.vec1)),#
prevnh.L(-2,mat.out$zhfa,mat.out$sw.vec),lapply(split(mat.out, mat.out$agegr), function(z) prevnh.L(-2, z$zhfa, z$sw.vec)),#
prevnh(-2,mat.out$zbfa,mat.out$sw.vec,mat.out$oedema.vec),lapply(split(mat.out, mat.out$agegr),function(z) prevnh(-2, z$zbfa, z$sw.vec, z$oedema.vec))))[,-1])
#### % > +1 SD for all the indicators
mat<-cbind.data.frame(mat,t(cbind.data.frame(#
prevph(1,mat.out$zwfa,mat.out$sw.vec,mat.out$oedema.vec1),lapply(split(mat.out, mat.out$agegr),function(z) prevph(1, z$zwfa, z$sw.vec, z$oedema.vec1)),#
prevph.L(1,mat.out$zhfa,mat.out$sw.vec),lapply(split(mat.out, mat.out$agegr), function(z) prevph.L(1, z$zhfa, z$sw.vec)),#
prevph(1,mat.out$zbfa,mat.out$sw.vec,mat.out$oedema.vec),lapply(split(mat.out, mat.out$agegr),function(z) prevph(1, z$zbfa, z$sw.vec, z$oedema.vec))))[,-1])
#### % > +2 SD for all the indicators
mat<-cbind.data.frame(mat,t(cbind.data.frame(#
prevph(2,mat.out$zwfa,mat.out$sw.vec,mat.out$oedema.vec1),lapply(split(mat.out, mat.out$agegr),function(z) prevph(2, z$zwfa, z$sw.vec, z$oedema.vec1)),#
prevph.L(2,mat.out$zhfa,mat.out$sw.vec),lapply(split(mat.out, mat.out$agegr), function(z) prevph.L(2, z$zhfa, z$sw.vec)),#
prevph(2,mat.out$zbfa,mat.out$sw.vec,mat.out$oedema.vec),lapply(split(mat.out, mat.out$agegr),function(z) prevph(2, z$zbfa, z$sw.vec, z$oedema.vec))))[,-1])
#### % > +3 SD for all the indicators
mat<-cbind.data.frame(mat,t(cbind.data.frame(#
prevph(3,mat.out$zwfa,mat.out$sw.vec,mat.out$oedema.vec1),lapply(split(mat.out, mat.out$agegr),function(z) prevph(3, z$zwfa, z$sw.vec, z$oedema.vec1)),#
prevph.L(3,mat.out$zhfa,mat.out$sw.vec),lapply(split(mat.out, mat.out$agegr), function(z) prevph.L(3, z$zhfa, z$sw.vec)),#
prevph(3,mat.out$zbfa,mat.out$sw.vec,mat.out$oedema.vec),lapply(split(mat.out, mat.out$agegr),function(z) prevph(3, z$zbfa, z$sw.vec, z$oedema.vec))))[,-1])
#### Means of z-scores for all the indicators
mat<-cbind.data.frame(mat,t(cbind.data.frame(#
wmean(mat.out$zwfa,mat.out$sw.vec),lapply(split(mat.out, mat.out$agegr),function(z) wmean(z$zwfa,z$sw.vec)),#
wmean(mat.out$zhfa,mat.out$sw.vec),lapply(split(mat.out, mat.out$agegr),function(z) wmean(z$zhfa,z$sw.vec)),#
wmean(mat.out$zbfa,mat.out$sw.vec),lapply(split(mat.out, mat.out$agegr),function(z) wmean(z$zbfa,z$sw.vec)))))
#### Standard deviations of z-scores for all the indicators
mat<-cbind.data.frame(mat,t(cbind.data.frame(#
wsd(mat.out$zwfa,mat.out$sw.vec),lapply(split(mat.out, mat.out$agegr),function(z) wsd(z$zwfa,z$sw.vec)),#
wsd(mat.out$zhfa,mat.out$sw.vec),lapply(split(mat.out, mat.out$agegr),function(z) wsd(z$zhfa,z$sw.vec)),#
wsd(mat.out$zbfa,mat.out$sw.vec),lapply(split(mat.out, mat.out$agegr),function(z) wsd(z$zbfa,z$sw.vec)))))
####################################################################################################################
##### Exporting matrix to Excel file
rm(mat1)
mat1<-rbind(c("Set 1:","Sexes","combined",rep("",15)),
c("Weight","-for-","age",rep("",15)),
c("N","% < -3 SD","95%", "C.I.","% < -2 SD","95%", "C.I.","% > +1 SD","95%", "C.I.","% > +2 SD","95%", "C.I.","% > +3 SD","95%", "C.I.","Mean","SD"),mat[1:16,],#
c("Height","-for-","age",rep("",15)),#
c("N","% < -3 SD","95%", "C.I.","% < -2 SD","95%", "C.I.","% > +1 SD","95%", "C.I.","% > +2 SD","95%", "C.I.","% > +3 SD","95%", "C.I.","Mean","SD"),mat[17:32,],#
c("BMI","-for-","age",rep("",15)),
c("N","% < -3 SD","95%", "C.I.","% < -2 SD","95%", "C.I.","% > +1 SD","95%", "C.I.","% > +2 SD","95%", "C.I.","% > +3 SD","95%", "C.I.","Mean","SD"),mat[33:48,])
for(j in 1:dim(mat1)[2]) mat1[,j]<-ifelse(mat1[,j]=="NA" | mat1[,j]=="NaN","",mat1[,j])
mat1<-cbind(c("",rep(c("","Age","5-19",as.character(5:19)),3)),mat1)
####################################################################################################################
##### For boys and girls
for(i in 1:2) {
mat.out.sex<-mat.out[!is.na(mat.out$sex.vec) & mat.out$sex.vec==i,]
mat.aux<-as.data.frame(cbind(array(rep(NA,((dim(mat.out.sex)[2]-1)*15)),dim=c(15,(dim(mat.out.sex)[2]-1)) ),seq(5,19,1)))
names(mat.aux)<-names(mat.out.sex)
mat.out.sex<-rbind.data.frame(mat.out.sex,mat.aux)
mat.out.sex$oedema.vec<-ifelse((!is.na(mat.out.sex$age.mo) & (mat.out.sex$age.mo<61 | mat.out.sex$age.mo>=229)) | mat.out.sex$oedema.vec=="NA","n",mat.out.sex$oedema.vec)
#### % < -3 SD for all the indicators
mat<- t(cbind.data.frame(#
prevnh(-3,mat.out.sex$zwfa,mat.out.sex$sw.vec,mat.out.sex$oedema.vec1),lapply(split(mat.out.sex, mat.out.sex$agegr),function(z) prevnh(-3, z$zwfa, z$sw.vec, z$oedema.vec1)),#
prevnh.L(-3,mat.out.sex$zhfa,mat.out.sex$sw.vec),lapply(split(mat.out.sex, mat.out.sex$agegr), function(z) prevnh.L(-3, z$zhfa, z$sw.vec)),#
prevnh(-3,mat.out.sex$zbfa,mat.out.sex$sw.vec,mat.out.sex$oedema.vec),lapply(split(mat.out.sex, mat.out.sex$agegr),function(z) prevnh(-3, z$zbfa, z$sw.vec, z$oedema.vec))))
#### % < -2 SD for all the indicators
mat<-cbind.data.frame(mat,t(cbind.data.frame(#
prevnh(-2,mat.out.sex$zwfa,mat.out.sex$sw.vec,mat.out.sex$oedema.vec1),lapply(split(mat.out.sex, mat.out.sex$agegr),function(z) prevnh(-2, z$zwfa, z$sw.vec, z$oedema.vec1)),#
prevnh.L(-2,mat.out.sex$zhfa,mat.out.sex$sw.vec),lapply(split(mat.out.sex, mat.out.sex$agegr), function(z) prevnh.L(-2, z$zhfa, z$sw.vec)),#
prevnh(-2,mat.out.sex$zbfa,mat.out.sex$sw.vec,mat.out.sex$oedema.vec),lapply(split(mat.out.sex, mat.out.sex$agegr),function(z) prevnh(-2, z$zbfa, z$sw.vec, z$oedema.vec))))[,-1])
#### % > +1 SD for all the indicators
mat<-cbind.data.frame(mat,t(cbind.data.frame(#
prevph(1,mat.out.sex$zwfa,mat.out.sex$sw.vec,mat.out.sex$oedema.vec1),lapply(split(mat.out.sex, mat.out.sex$agegr),function(z) prevph(1, z$zwfa, z$sw.vec, z$oedema.vec1)),#
prevph.L(1,mat.out.sex$zhfa,mat.out.sex$sw.vec),lapply(split(mat.out.sex, mat.out.sex$agegr), function(z) prevph.L(1, z$zhfa, z$sw.vec)),#
prevph(1,mat.out.sex$zbfa,mat.out.sex$sw.vec,mat.out.sex$oedema.vec),lapply(split(mat.out.sex, mat.out.sex$agegr),function(z) prevph(1, z$zbfa, z$sw.vec, z$oedema.vec))))[,-1])
#### % > +2 SD for all the indicators
mat<-cbind.data.frame(mat,t(cbind.data.frame(#
prevph(2,mat.out.sex$zwfa,mat.out.sex$sw.vec,mat.out.sex$oedema.vec1),lapply(split(mat.out.sex, mat.out.sex$agegr),function(z) prevph(2, z$zwfa, z$sw.vec, z$oedema.vec1)),#
prevph.L(2,mat.out.sex$zhfa,mat.out.sex$sw.vec),lapply(split(mat.out.sex, mat.out.sex$agegr), function(z) prevph.L(2, z$zhfa, z$sw.vec)),#
prevph(2,mat.out.sex$zbfa,mat.out.sex$sw.vec,mat.out.sex$oedema.vec),lapply(split(mat.out.sex, mat.out.sex$agegr),function(z) prevph(2, z$zbfa, z$sw.vec, z$oedema.vec))))[,-1])
#### % > +3 SD for all the indicators
mat<-cbind.data.frame(mat,t(cbind.data.frame(#
prevph(3,mat.out.sex$zwfa,mat.out.sex$sw.vec,mat.out.sex$oedema.vec1),lapply(split(mat.out.sex, mat.out.sex$agegr),function(z) prevph(3, z$zwfa, z$sw.vec, z$oedema.vec1)),#
prevph.L(3,mat.out.sex$zhfa,mat.out.sex$sw.vec),lapply(split(mat.out.sex, mat.out.sex$agegr), function(z) prevph.L(3, z$zhfa, z$sw.vec)),#
prevph(3,mat.out.sex$zbfa,mat.out.sex$sw.vec,mat.out.sex$oedema.vec),lapply(split(mat.out.sex, mat.out.sex$agegr),function(z) prevph(3, z$zbfa, z$sw.vec, z$oedema.vec))))[,-1])
#### Means of z-scores for all the indicators
mat<-cbind.data.frame(mat,t(cbind.data.frame(#
wmean(mat.out.sex$zwfa,mat.out.sex$sw.vec),lapply(split(mat.out.sex, mat.out.sex$agegr),function(z) wmean(z$zwfa,z$sw.vec)),#
wmean(mat.out.sex$zhfa,mat.out.sex$sw.vec),lapply(split(mat.out.sex, mat.out.sex$agegr),function(z) wmean(z$zhfa,z$sw.vec)),#
wmean(mat.out.sex$zbfa,mat.out.sex$sw.vec),lapply(split(mat.out.sex, mat.out.sex$agegr),function(z) wmean(z$zbfa,z$sw.vec)))))
#### Standard deviations of z-scores for all the indicators
mat<-cbind.data.frame(mat,t(cbind.data.frame(#
wsd(mat.out.sex$zwfa,mat.out.sex$sw.vec),lapply(split(mat.out.sex, mat.out.sex$agegr),function(z) wsd(z$zwfa,z$sw.vec)),#
wsd(mat.out.sex$zhfa,mat.out.sex$sw.vec),lapply(split(mat.out.sex, mat.out.sex$agegr),function(z) wsd(z$zhfa,z$sw.vec)),#
wsd(mat.out.sex$zbfa,mat.out.sex$sw.vec),lapply(split(mat.out.sex, mat.out.sex$agegr),function(z) wsd(z$zbfa,z$sw.vec)))))
####################################################################################################################
##### Exporting matrix to Excel file
mat2<-rbind.data.frame(c(paste("Set ",i+1,":",sep=""),c("Males","Females")[i],rep("",16)),
c("Weight","-for-","age",rep("",15)),
c("N","% < -3 SD","95%", "C.I.","% < -2 SD","95%", "C.I.","% > +1 SD","95%", "C.I.","% > +2 SD","95%", "C.I.","% > +3 SD","95%", "C.I.","Mean","SD"),mat[1:16,],#
c("Height","-for-","age",rep("",15)),#
c("N","% < -3 SD","95%", "C.I.","% < -2 SD","95%", "C.I.","% > +1 SD","95%", "C.I.","% > +2 SD","95%", "C.I.","% > +3 SD","95%", "C.I.","Mean","SD"),mat[17:32,],#
c("BMI","-for-","age",rep("",15)),
c("N","% < -3 SD","95%", "C.I.","% < -2 SD","95%", "C.I.","% > +1 SD","95%", "C.I.","% > +2 SD","95%", "C.I.","% > +3 SD","95%", "C.I.","Mean","SD"),mat[33:48,])
for(j in 1:dim(mat2)[2]) mat2[,j]<-ifelse(mat2[,j]=="NA" | mat2[,j]=="NaN","",mat2[,j])
mat2<-cbind(c("",rep(c("","Age","5-19",as.character(5:19)),3)),mat2)
names(mat2)<-names(mat1)
mat1<-rbind(mat1,mat2)
} #### End of loop for sex
mat1<-mat1[-c(11:19,66:74,121:129),]
###################################################################################################################
######### Export table with prevalence values and their confidence intervals, and mean and SD of the z-scores
###################################################################################################################
assign("matprev",mat1,envir = .GlobalEnv)
write.table(matprev, file=paste(FilePath,"\\",FileLab,"_prev.csv",sep=""),na=" ",row.names = FALSE,col.names=F,sep=",",quote = TRUE)
cat(paste("Prevalences and z-score summary statistics calculated and exported to ",FilePath,"\\",FileLab,"_prev.csv\n",sep=""))
on.exit(options(old))
invisible()
} #### End of main function who2007
# wfawho2007<-read.table("D:\\References 5-20y\\Macro R\\who2007_R\\wfawho2007.txt",header=T,sep="",skip=0)
# hfawho2007<-read.table("D:\\References 5-20y\\Macro R\\who2007_R\\hfawho2007.txt",header=T,sep="",skip=0)
# bfawho2007<-read.table("D:\\References 5-20y\\Macro R\\who2007_R\\bfawho2007.txt",header=T,sep="",skip=0)
# survey.who2007<-read.csv("D:\\References 5-20y\\Macro R\\who2007_R\\survey_who2007.csv",header=T,sep=",",skip=0,na.strings="")
# source("D:\\References 5-20y\\Macro R\\who2007_R\\who2007.r")
# who2007(FileLab = "survey_who2007", FilePath = "D:\\References 5-20y\\Macro R\\who2007_R", mydf = survey.who2007,sex = sex, age = agemons, weight = weight, height = height, sw=sw, oedema=oedema)
|
/who2007_r/who2007.r
|
no_license
|
calzzone/Dr-szbo-pedi-2
|
R
| false
| false
| 30,395
|
r
|
##################################################################################################################
######### WHO 2007 #####
######### Department of Nutrition for Health and Development #####
######### World Health Organization #####
######### Last modified on 08/10/2013 - Developed using R version 3.0.1 (2013-05-16) #####
######### This code corcerns the the calculation of prevalences using all vallid z-scores (non-missing) #####
######### for three indicators: weight-for-age (5 to 10 years), height-for-age (5 to 19 years) and BMI-for- #####
######### age (5 to 19 years) based on the WHO 2007 references. #####
######### Exact age must be given in months (no rounding necessary), height in centimeters and weight in #####
######### kilograms. #####
##################################################################################################################
##################################################################################################################
##################################################################################################################
######### Functions for calculating the z-scores and prevalences for a nutritional survey #####
##################################################################################################################
########################################################################
#### Auxiliar functions
########################################################################
#############################################################################
##### Prevalence calculation for the upper bound and corresponding 95% C.I.
#############################################################################
prevph.L <- function(a,x,w) {
ph <- sum((x > a)*w,na.rm=T)/sum((!is.na(x))*w,na.rm=T)
aux <- 1.96*sqrt(ph*(1-ph)/sum((!is.na(x))*w,na.rm=T))+(1/(2*sum((!is.na(x))*w,na.rm=T)))
vec <- c(rounde(sum((!is.na(x))*w,na.rm=T),digits=0),rounde(c(ph, max(0,ph-aux), min(1,ph+aux))*100,digits=1))
return(vec)
}
#### With oedema (only for weight-for-age and bmi-for-age)
prevph <- function(a,x,w,f) {
f<-as.character(f)
ph <- sum((x > a)*w,na.rm=T)/sum(((!is.na(x)) | (f=="y"))*w,na.rm=T)
aux <- 1.96*sqrt(ph*(1-ph)/sum(((!is.na(x)) | (f=="y"))*w,na.rm=T))+(1/(2*sum(((!is.na(x)) | (f=="y"))*w,na.rm=T)))
vec <- c(rounde(sum(((!is.na(x)) | (f=="y"))*w,na.rm=T),digits=0),rounde(c(ph, max(0,ph-aux), min(1,ph+aux))*100,digits=1))
return(vec)
}
#############################################################################
##### Prevalence calculation for the lower bound and corresponding 95% C.I.
#############################################################################
#### Without oedema (for height-for-age)
prevnh.L <- function(a,x,w) {
ph <- sum((x < a)*w,na.rm=T)/sum((!is.na(x))*w,na.rm=T)
aux <- 1.96*sqrt(ph*(1-ph)/sum((!is.na(x))*w,na.rm=T))+(1/(2*sum((!is.na(x))*w,na.rm=T)))
vec <- c(rounde(sum((!is.na(x))*w,na.rm=T),digits=0),rounde(c(ph, max(0,ph-aux), min(1,ph+aux))*100,digits=1))
return(vec)
}
#### With oedema (for all weight-related indicators)
prevnh <- function(a,x,w,f) {
f<-as.character(f)
ph <- sum((x < a | f=="y")*w,na.rm=T)/sum(((!is.na(x)) | (f=="y"))*w,na.rm=T)
aux <- 1.96*sqrt(ph*(1-ph)/sum(((!is.na(x)) | (f=="y"))*w,na.rm=T))+(1/(2*sum(((!is.na(x)) | (f=="y"))*w,na.rm=T)))
vec <- c(rounde(sum(((!is.na(x)) | (f=="y"))*w,na.rm=T),digits=0),rounde(c(ph, max(0,ph-aux), min(1,ph+aux))*100,digits=1))
return(vec)
}
###########################################
#### Weighted mean and standard deviation
###########################################
wmean <- function(x,w) { return(rounde(sum(x*w,na.rm=T)/sum(w[!is.na(x)]),digits=2) ) }
wsd <- function(x,w) {
mh <- sum(x*w,na.rm=T)/sum((!is.na(x))*w,na.rm=T)
sdh<-ifelse(length(x[!is.na(x)])>0,rounde(sqrt(sum(((x-mh)^2)*w,na.rm=T)/(sum((!is.na(x))*w,na.rm=T) - 1)),digits=2),NA)
return( sdh )
}
###########################################################################################
#### Rounding function - SPlus default rounding function uses the nearest even number rule
###########################################################################################
rounde <- function(x,digits=0) {
expo<-10^digits
return(ifelse(abs(x*expo) - floor(abs(x*expo)) < 0.5, sign(x*expo) * floor(abs(x*expo)), sign(x*expo) * (floor(abs(x*expo)) + 1))/expo)
}
######################################################################################
### Function for calculating individual height-for-age z-scores
######################################################################################
calc.zhfa<-function(mat,hfawho2007){
for(i in 1:length(mat$age.mo)) {
if(!is.na(mat$age.mo[i]) & mat$age.mo[i]>=61 & mat$age.mo[i]<229) {
### Interpolated l,m,s values
low.age<-trunc(mat$age.mo[i])
upp.age<-trunc(mat$age.mo[i]+1)
diff.age<-(mat$age.mo[i]-low.age)
if(diff.age>0) {
l.val<-hfawho2007$l[hfawho2007$age==low.age & hfawho2007$sex==mat$sex[i]]+diff.age*( hfawho2007$l[hfawho2007$age==upp.age & hfawho2007$sex==mat$sex[i]]-hfawho2007$l[hfawho2007$age==low.age & hfawho2007$sex==mat$sex[i]] )
m.val<-hfawho2007$m[hfawho2007$age==low.age & hfawho2007$sex==mat$sex[i]]+diff.age*( hfawho2007$m[hfawho2007$age==upp.age & hfawho2007$sex==mat$sex[i]]-hfawho2007$m[hfawho2007$age==low.age & hfawho2007$sex==mat$sex[i]] )
s.val<-hfawho2007$s[hfawho2007$age==low.age & hfawho2007$sex==mat$sex[i]]+diff.age*( hfawho2007$s[hfawho2007$age==upp.age & hfawho2007$sex==mat$sex[i]]-hfawho2007$s[hfawho2007$age==low.age & hfawho2007$sex==mat$sex[i]] )
} else {
l.val<-hfawho2007$l[hfawho2007$age==low.age & hfawho2007$sex==mat$sex[i]]
m.val<-hfawho2007$m[hfawho2007$age==low.age & hfawho2007$sex==mat$sex[i]]
s.val<-hfawho2007$s[hfawho2007$age==low.age & hfawho2007$sex==mat$sex[i]]
}
mat$zhfa[i]<-(((mat$height[i]/m.val)^l.val)-1)/(s.val*l.val)
} else mat$zhfa[i]<- NA
}
return(mat)
}
######################################################################################
### Function for calculating individual weight-for-age z-scores
######################################################################################
calc.zwei<-function(mat,wfawho2007){
for(i in 1:length(mat$age.mo)) {
if(!is.na(mat$age.mo[i]) & mat$age.mo[i]>=61 & mat$age.mo[i]<121 & mat$oedema[i]!="y") {
### Interpolated l,m,s values
low.age<-trunc(mat$age.mo[i])
upp.age<-trunc(mat$age.mo[i]+1)
diff.age<-(mat$age.mo[i]-low.age)
if(diff.age>0) {
l.val<-wfawho2007$l[wfawho2007$age==low.age & wfawho2007$sex==mat$sex[i]]+diff.age*( wfawho2007$l[wfawho2007$age==upp.age & wfawho2007$sex==mat$sex[i]]-wfawho2007$l[wfawho2007$age==low.age & wfawho2007$sex==mat$sex[i]] )
m.val<-wfawho2007$m[wfawho2007$age==low.age & wfawho2007$sex==mat$sex[i]]+diff.age*( wfawho2007$m[wfawho2007$age==upp.age & wfawho2007$sex==mat$sex[i]]-wfawho2007$m[wfawho2007$age==low.age & wfawho2007$sex==mat$sex[i]] )
s.val<-wfawho2007$s[wfawho2007$age==low.age & wfawho2007$sex==mat$sex[i]]+diff.age*( wfawho2007$s[wfawho2007$age==upp.age & wfawho2007$sex==mat$sex[i]]-wfawho2007$s[wfawho2007$age==low.age & wfawho2007$sex==mat$sex[i]] )
} else {
l.val<-wfawho2007$l[wfawho2007$age==low.age & wfawho2007$sex==mat$sex[i]]
m.val<-wfawho2007$m[wfawho2007$age==low.age & wfawho2007$sex==mat$sex[i]]
s.val<-wfawho2007$s[wfawho2007$age==low.age & wfawho2007$sex==mat$sex[i]]
}
mat$zwfa[i]<-(((mat$weight[i]/m.val)^l.val)-1)/(s.val*l.val)
if(!is.na(mat$zwfa[i]) & mat$zwfa[i]>3) {
sd3pos<- m.val*((1+l.val*s.val*3)^(1/l.val))
sd23pos<- sd3pos- m.val*((1+l.val*s.val*2)^(1/l.val))
mat$zwfa[i]<- 3+((mat$weight[i]-sd3pos)/sd23pos)
}
if(!is.na(mat$zwfa[i]) & mat$zwfa[i]< (-3)) {
sd3neg<- m.val*((1+l.val*s.val*(-3))**(1/l.val))
sd23neg<- m.val*((1+l.val*s.val*(-2))**(1/l.val))-sd3neg
mat$zwfa[i]<- (-3)+((mat$weight[i]-sd3neg)/sd23neg)
}
} else mat$zwfa[i]<-NA
}
return(mat)
}
######################################################################################
### Function for calulating individual BMI-for-age z-scores
######################################################################################
calc.zbmi<-function(mat,bfawho2007){
for(i in 1:length(mat$age.mo)) {
if(!is.na(mat$age.mo[i]) & mat$age.mo[i]>=61 & mat$age.mo[i]<229 & mat$oedema[i]!="y") {
### Interpolated l,m,s values
low.age<-trunc(mat$age.mo[i])
upp.age<-trunc(mat$age.mo[i]+1)
diff.age<-(mat$age.mo[i]-low.age)
if(diff.age>0) {
l.val<-bfawho2007$l[bfawho2007$age==low.age & bfawho2007$sex==mat$sex[i]]+diff.age*( bfawho2007$l[bfawho2007$age==upp.age & bfawho2007$sex==mat$sex[i]]-bfawho2007$l[bfawho2007$age==low.age & bfawho2007$sex==mat$sex[i]] )
m.val<-bfawho2007$m[bfawho2007$age==low.age & bfawho2007$sex==mat$sex[i]]+diff.age*( bfawho2007$m[bfawho2007$age==upp.age & bfawho2007$sex==mat$sex[i]]-bfawho2007$m[bfawho2007$age==low.age & bfawho2007$sex==mat$sex[i]] )
s.val<-bfawho2007$s[bfawho2007$age==low.age & bfawho2007$sex==mat$sex[i]]+diff.age*( bfawho2007$s[bfawho2007$age==upp.age & bfawho2007$sex==mat$sex[i]]-bfawho2007$s[bfawho2007$age==low.age & bfawho2007$sex==mat$sex[i]] )
} else {
l.val<-bfawho2007$l[bfawho2007$age==low.age & bfawho2007$sex==mat$sex[i]]
m.val<-bfawho2007$m[bfawho2007$age==low.age & bfawho2007$sex==mat$sex[i]]
s.val<-bfawho2007$s[bfawho2007$age==low.age & bfawho2007$sex==mat$sex[i]]
}
mat$zbfa[i]<-(((mat$cbmi[i]/m.val)^l.val)-1)/(s.val*l.val)
if(!is.na(mat$zbfa[i]) & mat$zbfa[i]>3) {
sd3pos<- m.val*((1+l.val*s.val*3)^(1/l.val))
sd23pos<- sd3pos- m.val*((1+l.val*s.val*2)^(1/l.val))
mat$zbfa[i]<- 3+((mat$cbmi[i]-sd3pos)/sd23pos)
}
if(!is.na(mat$zbfa[i]) & mat$zbfa[i]< (-3)) {
sd3neg<- m.val*((1+l.val*s.val*(-3))**(1/l.val))
sd23neg<- m.val*((1+l.val*s.val*(-2))**(1/l.val))-sd3neg
mat$zbfa[i]<- (-3)+((mat$cbmi[i]-sd3neg)/sd23neg)
}
} else mat$zbfa[i]<-NA
}
return(mat)
}
###################################################################################
#### Main function starts here: who2007
###################################################################################
###############################################################################################################################################
#### This function can be used to:
#### 1. Calculate the z-scores for the indicators: height-for-age, weight-for-age and body mass index-for-age
#### The output file with z-scores values is exported the file to an Excel spreadsheet (see readme file);
#### 2. Calculate the prevalence rates of stunting, underweight, wasting and overweight, and z-scores means and standard deviations. Results
#### are exported to an Excel spreadsheet, displayed by age group.
###############################################################################################################################################
#############################################################################
##### Function for calculating the z-scores for all indicators
#############################################################################
who2007 <- function(FileLab="Temp",FilePath=getwd(),mydf,sex,age,weight,height,oedema=rep("n",dim(mydf)[1]),sw=rep(1,dim(mydf)[1])) {
#############################################################################
########### Calculating the z-scores for all indicators
#############################################################################
old <- options(warn=(-1))
# sex.x<-as.character(get(deparse(substitute(mydf)))[,deparse(substitute(sex))])
# age.x<-as.double(get(deparse(substitute(mydf)))[,deparse(substitute(age))])
# weight.x<-as.double(get(deparse(substitute(mydf)))[,deparse(substitute(weight))])
# height.x<-as.double(get(deparse(substitute(mydf)))[,deparse(substitute(height))])
# if(!missing(oedema)) oedema.vec<-as.character(get(deparse(substitute(mydf)))[,deparse(substitute(oedema))]) else oedema.vec<-oedema
# if(!missing(sw)) sw<-as.double(get(deparse(substitute(mydf)))[,deparse(substitute(sw))]) else sw<-as.double(sw)
sex.x<-as.character(mydf[[sex]])
age.x<-as.double(mydf[[age]])
weight.x<-as.double(mydf[[weight]])
height.x<-as.double(mydf[[height]])
if(!missing(oedema)) oedema.vec<-as.character(mydf[[oedema]]) else oedema.vec<-oedema
if(!missing(sw)) sw<-as.double(mydf[[sw]]) else sw<-as.double(sw)
sw<-ifelse(is.na(sw),0,sw)
sex.vec<-NULL
sex.vec<-ifelse(sex.x!="NA" & (sex.x=="m" | sex.x=="M" | sex.x=="1"),1,ifelse(sex.x!="NA" & (sex.x=="f" | sex.x=="F" | sex.x=="2"),2,NA))
age.vec<-age.x
height.vec<-height.x
oedema.vec<-ifelse(oedema.vec=="n" | oedema.vec=="N","n",ifelse(oedema.vec=="y" | oedema.vec=="Y","y","n"))
mat<-cbind.data.frame(age.x,as.double(sex.vec),weight.x,height.x,oedema.vec,sw,stringsAsFactors=F)
names(mat)<-c("age.mo","sex","weight","height","oedema","sw")
mat$cbmi<-mat$weight/((height.vec/100)^2)
mat$zhfa<-NULL
mat$fhfa<-NULL
mat$zwfa<-NULL
mat$fwfa<-NULL
mat$zbfa<-NULL
mat$fbfa<-NULL
#############################################################################
########### Calculating the z-scores for all indicators
#############################################################################
# cat("Please wait while calculating z-scores...\n")
### Height-for-age z-score
mat<-calc.zhfa(mat,hfawho2007)
### Weight-for-age z-score
mat<-calc.zwei(mat,wfawho2007)
### BMI-for-age z-score
mat<-calc.zbmi(mat,bfawho2007)
#### Rounding the z-scores to two decimals
mat$zhfa<-rounde(mat$zhfa,digits=2)
mat$zwfa<-rounde(mat$zwfa,digits=2)
mat$zbfa<-rounde(mat$zbfa,digits=2)
#### Flagging z-score values for individual indicators
mat$fhfa<-ifelse(abs(mat$zhfa) > 6,1,0)
mat$fwfa<-ifelse(mat$zwfa > 5 | mat$zwfa < (-6),1,0)
mat$fbfa<-ifelse(abs(mat$zbfa) > 5,1,0)
if(is.na(mat$age.mo) & mat$oedema=="y") {
mat$fhfa<-NA
mat$zwfa<-NA
mat$zbfa<-NA
}
mat<-cbind.data.frame(mydf,mat[,-c(2:6)])
###################################################################################################
######### Export data frame with z-scores and flag variables
###################################################################################################
return(mat)
# assign("matz",mat,envir = .GlobalEnv)
# write.table(matz, file=paste(FilePath,"\\",FileLab,"_z.csv",sep=""),na="",row.names = FALSE,sep=",",quote = TRUE)
# cat(paste("Z-scores calculated and exported to ",FilePath,"\\",FileLab,"_z.csv\n\n",sep=""))
#######################################################################################################
#### Calculating prevalences and summary statistics.
#######################################################################################################
if(any(sw <0)) stop("Negative weights are not allowed and program will stop. Prevalence tables will not be produced.")
mat.out<-mat
mat.out$sw.vec<-sw
mat.out$sex.vec<-as.double(sex.vec)
mat.out$oedema.vec<-as.character(oedema.vec)
mat.out$oedema.vec1<-as.character(oedema.vec)
mat.out<-mat.out[!is.na(mat.out$age.mo) & mat.out$age.mo>=61 & mat.out$age.mo<229,]
####################################################
#### Creating age group variable in completed years
####################################################
mat.out$agegr <- floor(mat.out$age.mo/12)
##############################################
#### Make z-score as missing if it is flagged
##############################################
mat.out$zhfa<-ifelse(!is.na(mat.out$fhfa) & mat.out$fhfa!=0,NA,mat.out$zhfa)
mat.out$zwfa<-ifelse(!is.na(mat.out$fwfa) & mat.out$fwfa!=0,NA,mat.out$zwfa)
mat.out$zbfa<-ifelse(!is.na(mat.out$fbfa) & mat.out$fbfa!=0,NA,mat.out$zbfa)
if(dim(mat.out)[1]==0) stop("\n\nNo non-missing z-score values are available for calculating prevalences. Program will stop!\n\n.")
##############################################
#### Include all levels of age group variable
##############################################
mat.aux<-as.data.frame(cbind(array(rep(NA,((dim(mat.out)[2]-1)*15)),dim=c(15,(dim(mat.out)[2]-1)) ),seq(5,19,1)))
names(mat.aux)<-names(mat.out)
mat.out<-rbind(mat.out,mat.aux)
cat(" starting ")
########################################################################################################
#### Make Oedema variable to be "n" if age smaller than 61 mo or greater than 120 mo (for weight-for-age)
#### or smaller than 61 mo or greater than 228 mo (for bmi-for-age).
#### This is because children with oedema counts in the prevalence even if z-score is missing
#### for weight related indicators.
########################################################################################################
mat.out$oedema.vec<-ifelse((!is.na(mat.out$age.mo) & (mat.out$age.mo<61 | mat.out$age.mo>=229)) | mat.out$oedema.vec=="NA","n",mat.out$oedema.vec)
mat.out$oedema.vec1<-ifelse((!is.na(mat.out$age.mo) & (mat.out$age.mo<61 | mat.out$age.mo>=121)) | mat.out$oedema.vec=="NA","n",mat.out$oedema.vec)
#####################################################################################################################################################
#### Creating matrix with estimated prevalences, confidence intervals, and means and standard deviations of z-scores and exporting it to Excel file.
#####################################################################################################################################################
cat("\nPlease wait while calculating prevalences and z-score summary statistics...\n")
#### Sexes combined
#### % < -3 SD for all the indicators
mat<- t(cbind.data.frame(#
prevnh(-3,mat.out$zwfa,mat.out$sw.vec,mat.out$oedema.vec1),lapply(split(mat.out, mat.out$agegr),function(z) prevnh(-3, z$zwfa, z$sw.vec, z$oedema.vec1)),#
prevnh.L(-3,mat.out$zhfa,mat.out$sw.vec),lapply(split(mat.out, mat.out$agegr), function(z) prevnh.L(-3, z$zhfa, z$sw.vec)),#
prevnh(-3,mat.out$zbfa,mat.out$sw.vec,mat.out$oedema.vec),lapply(split(mat.out, mat.out$agegr),function(z) prevnh(-3, z$zbfa, z$sw.vec, z$oedema.vec))))
#### % < -2 SD for all the indicators
mat<-cbind.data.frame(mat,t(cbind.data.frame(#
prevnh(-2,mat.out$zwfa,mat.out$sw.vec,mat.out$oedema.vec1),lapply(split(mat.out, mat.out$agegr),function(z) prevnh(-2, z$zwfa, z$sw.vec, z$oedema.vec1)),#
prevnh.L(-2,mat.out$zhfa,mat.out$sw.vec),lapply(split(mat.out, mat.out$agegr), function(z) prevnh.L(-2, z$zhfa, z$sw.vec)),#
prevnh(-2,mat.out$zbfa,mat.out$sw.vec,mat.out$oedema.vec),lapply(split(mat.out, mat.out$agegr),function(z) prevnh(-2, z$zbfa, z$sw.vec, z$oedema.vec))))[,-1])
#### % > +1 SD for all the indicators
mat<-cbind.data.frame(mat,t(cbind.data.frame(#
prevph(1,mat.out$zwfa,mat.out$sw.vec,mat.out$oedema.vec1),lapply(split(mat.out, mat.out$agegr),function(z) prevph(1, z$zwfa, z$sw.vec, z$oedema.vec1)),#
prevph.L(1,mat.out$zhfa,mat.out$sw.vec),lapply(split(mat.out, mat.out$agegr), function(z) prevph.L(1, z$zhfa, z$sw.vec)),#
prevph(1,mat.out$zbfa,mat.out$sw.vec,mat.out$oedema.vec),lapply(split(mat.out, mat.out$agegr),function(z) prevph(1, z$zbfa, z$sw.vec, z$oedema.vec))))[,-1])
#### % > +2 SD for all the indicators
mat<-cbind.data.frame(mat,t(cbind.data.frame(#
prevph(2,mat.out$zwfa,mat.out$sw.vec,mat.out$oedema.vec1),lapply(split(mat.out, mat.out$agegr),function(z) prevph(2, z$zwfa, z$sw.vec, z$oedema.vec1)),#
prevph.L(2,mat.out$zhfa,mat.out$sw.vec),lapply(split(mat.out, mat.out$agegr), function(z) prevph.L(2, z$zhfa, z$sw.vec)),#
prevph(2,mat.out$zbfa,mat.out$sw.vec,mat.out$oedema.vec),lapply(split(mat.out, mat.out$agegr),function(z) prevph(2, z$zbfa, z$sw.vec, z$oedema.vec))))[,-1])
#### % > +3 SD for all the indicators
mat<-cbind.data.frame(mat,t(cbind.data.frame(#
prevph(3,mat.out$zwfa,mat.out$sw.vec,mat.out$oedema.vec1),lapply(split(mat.out, mat.out$agegr),function(z) prevph(3, z$zwfa, z$sw.vec, z$oedema.vec1)),#
prevph.L(3,mat.out$zhfa,mat.out$sw.vec),lapply(split(mat.out, mat.out$agegr), function(z) prevph.L(3, z$zhfa, z$sw.vec)),#
prevph(3,mat.out$zbfa,mat.out$sw.vec,mat.out$oedema.vec),lapply(split(mat.out, mat.out$agegr),function(z) prevph(3, z$zbfa, z$sw.vec, z$oedema.vec))))[,-1])
#### Means of z-scores for all the indicators
mat<-cbind.data.frame(mat,t(cbind.data.frame(#
wmean(mat.out$zwfa,mat.out$sw.vec),lapply(split(mat.out, mat.out$agegr),function(z) wmean(z$zwfa,z$sw.vec)),#
wmean(mat.out$zhfa,mat.out$sw.vec),lapply(split(mat.out, mat.out$agegr),function(z) wmean(z$zhfa,z$sw.vec)),#
wmean(mat.out$zbfa,mat.out$sw.vec),lapply(split(mat.out, mat.out$agegr),function(z) wmean(z$zbfa,z$sw.vec)))))
#### Standard deviations of z-scores for all the indicators
mat<-cbind.data.frame(mat,t(cbind.data.frame(#
wsd(mat.out$zwfa,mat.out$sw.vec),lapply(split(mat.out, mat.out$agegr),function(z) wsd(z$zwfa,z$sw.vec)),#
wsd(mat.out$zhfa,mat.out$sw.vec),lapply(split(mat.out, mat.out$agegr),function(z) wsd(z$zhfa,z$sw.vec)),#
wsd(mat.out$zbfa,mat.out$sw.vec),lapply(split(mat.out, mat.out$agegr),function(z) wsd(z$zbfa,z$sw.vec)))))
####################################################################################################################
##### Exporting matrix to Excel file
rm(mat1)
mat1<-rbind(c("Set 1:","Sexes","combined",rep("",15)),
c("Weight","-for-","age",rep("",15)),
c("N","% < -3 SD","95%", "C.I.","% < -2 SD","95%", "C.I.","% > +1 SD","95%", "C.I.","% > +2 SD","95%", "C.I.","% > +3 SD","95%", "C.I.","Mean","SD"),mat[1:16,],#
c("Height","-for-","age",rep("",15)),#
c("N","% < -3 SD","95%", "C.I.","% < -2 SD","95%", "C.I.","% > +1 SD","95%", "C.I.","% > +2 SD","95%", "C.I.","% > +3 SD","95%", "C.I.","Mean","SD"),mat[17:32,],#
c("BMI","-for-","age",rep("",15)),
c("N","% < -3 SD","95%", "C.I.","% < -2 SD","95%", "C.I.","% > +1 SD","95%", "C.I.","% > +2 SD","95%", "C.I.","% > +3 SD","95%", "C.I.","Mean","SD"),mat[33:48,])
for(j in 1:dim(mat1)[2]) mat1[,j]<-ifelse(mat1[,j]=="NA" | mat1[,j]=="NaN","",mat1[,j])
mat1<-cbind(c("",rep(c("","Age","5-19",as.character(5:19)),3)),mat1)
####################################################################################################################
##### For boys and girls
for(i in 1:2) {
mat.out.sex<-mat.out[!is.na(mat.out$sex.vec) & mat.out$sex.vec==i,]
mat.aux<-as.data.frame(cbind(array(rep(NA,((dim(mat.out.sex)[2]-1)*15)),dim=c(15,(dim(mat.out.sex)[2]-1)) ),seq(5,19,1)))
names(mat.aux)<-names(mat.out.sex)
mat.out.sex<-rbind.data.frame(mat.out.sex,mat.aux)
mat.out.sex$oedema.vec<-ifelse((!is.na(mat.out.sex$age.mo) & (mat.out.sex$age.mo<61 | mat.out.sex$age.mo>=229)) | mat.out.sex$oedema.vec=="NA","n",mat.out.sex$oedema.vec)
#### % < -3 SD for all the indicators
mat<- t(cbind.data.frame(#
prevnh(-3,mat.out.sex$zwfa,mat.out.sex$sw.vec,mat.out.sex$oedema.vec1),lapply(split(mat.out.sex, mat.out.sex$agegr),function(z) prevnh(-3, z$zwfa, z$sw.vec, z$oedema.vec1)),#
prevnh.L(-3,mat.out.sex$zhfa,mat.out.sex$sw.vec),lapply(split(mat.out.sex, mat.out.sex$agegr), function(z) prevnh.L(-3, z$zhfa, z$sw.vec)),#
prevnh(-3,mat.out.sex$zbfa,mat.out.sex$sw.vec,mat.out.sex$oedema.vec),lapply(split(mat.out.sex, mat.out.sex$agegr),function(z) prevnh(-3, z$zbfa, z$sw.vec, z$oedema.vec))))
#### % < -2 SD for all the indicators
mat<-cbind.data.frame(mat,t(cbind.data.frame(#
prevnh(-2,mat.out.sex$zwfa,mat.out.sex$sw.vec,mat.out.sex$oedema.vec1),lapply(split(mat.out.sex, mat.out.sex$agegr),function(z) prevnh(-2, z$zwfa, z$sw.vec, z$oedema.vec1)),#
prevnh.L(-2,mat.out.sex$zhfa,mat.out.sex$sw.vec),lapply(split(mat.out.sex, mat.out.sex$agegr), function(z) prevnh.L(-2, z$zhfa, z$sw.vec)),#
prevnh(-2,mat.out.sex$zbfa,mat.out.sex$sw.vec,mat.out.sex$oedema.vec),lapply(split(mat.out.sex, mat.out.sex$agegr),function(z) prevnh(-2, z$zbfa, z$sw.vec, z$oedema.vec))))[,-1])
#### % > +1 SD for all the indicators
mat<-cbind.data.frame(mat,t(cbind.data.frame(#
prevph(1,mat.out.sex$zwfa,mat.out.sex$sw.vec,mat.out.sex$oedema.vec1),lapply(split(mat.out.sex, mat.out.sex$agegr),function(z) prevph(1, z$zwfa, z$sw.vec, z$oedema.vec1)),#
prevph.L(1,mat.out.sex$zhfa,mat.out.sex$sw.vec),lapply(split(mat.out.sex, mat.out.sex$agegr), function(z) prevph.L(1, z$zhfa, z$sw.vec)),#
prevph(1,mat.out.sex$zbfa,mat.out.sex$sw.vec,mat.out.sex$oedema.vec),lapply(split(mat.out.sex, mat.out.sex$agegr),function(z) prevph(1, z$zbfa, z$sw.vec, z$oedema.vec))))[,-1])
#### % > +2 SD for all the indicators
mat<-cbind.data.frame(mat,t(cbind.data.frame(#
prevph(2,mat.out.sex$zwfa,mat.out.sex$sw.vec,mat.out.sex$oedema.vec1),lapply(split(mat.out.sex, mat.out.sex$agegr),function(z) prevph(2, z$zwfa, z$sw.vec, z$oedema.vec1)),#
prevph.L(2,mat.out.sex$zhfa,mat.out.sex$sw.vec),lapply(split(mat.out.sex, mat.out.sex$agegr), function(z) prevph.L(2, z$zhfa, z$sw.vec)),#
prevph(2,mat.out.sex$zbfa,mat.out.sex$sw.vec,mat.out.sex$oedema.vec),lapply(split(mat.out.sex, mat.out.sex$agegr),function(z) prevph(2, z$zbfa, z$sw.vec, z$oedema.vec))))[,-1])
#### % > +3 SD for all the indicators
mat<-cbind.data.frame(mat,t(cbind.data.frame(#
prevph(3,mat.out.sex$zwfa,mat.out.sex$sw.vec,mat.out.sex$oedema.vec1),lapply(split(mat.out.sex, mat.out.sex$agegr),function(z) prevph(3, z$zwfa, z$sw.vec, z$oedema.vec1)),#
prevph.L(3,mat.out.sex$zhfa,mat.out.sex$sw.vec),lapply(split(mat.out.sex, mat.out.sex$agegr), function(z) prevph.L(3, z$zhfa, z$sw.vec)),#
prevph(3,mat.out.sex$zbfa,mat.out.sex$sw.vec,mat.out.sex$oedema.vec),lapply(split(mat.out.sex, mat.out.sex$agegr),function(z) prevph(3, z$zbfa, z$sw.vec, z$oedema.vec))))[,-1])
#### Means of z-scores for all the indicators
mat<-cbind.data.frame(mat,t(cbind.data.frame(#
wmean(mat.out.sex$zwfa,mat.out.sex$sw.vec),lapply(split(mat.out.sex, mat.out.sex$agegr),function(z) wmean(z$zwfa,z$sw.vec)),#
wmean(mat.out.sex$zhfa,mat.out.sex$sw.vec),lapply(split(mat.out.sex, mat.out.sex$agegr),function(z) wmean(z$zhfa,z$sw.vec)),#
wmean(mat.out.sex$zbfa,mat.out.sex$sw.vec),lapply(split(mat.out.sex, mat.out.sex$agegr),function(z) wmean(z$zbfa,z$sw.vec)))))
#### Standard deviations of z-scores for all the indicators
mat<-cbind.data.frame(mat,t(cbind.data.frame(#
wsd(mat.out.sex$zwfa,mat.out.sex$sw.vec),lapply(split(mat.out.sex, mat.out.sex$agegr),function(z) wsd(z$zwfa,z$sw.vec)),#
wsd(mat.out.sex$zhfa,mat.out.sex$sw.vec),lapply(split(mat.out.sex, mat.out.sex$agegr),function(z) wsd(z$zhfa,z$sw.vec)),#
wsd(mat.out.sex$zbfa,mat.out.sex$sw.vec),lapply(split(mat.out.sex, mat.out.sex$agegr),function(z) wsd(z$zbfa,z$sw.vec)))))
####################################################################################################################
##### Exporting matrix to Excel file
mat2<-rbind.data.frame(c(paste("Set ",i+1,":",sep=""),c("Males","Females")[i],rep("",16)),
c("Weight","-for-","age",rep("",15)),
c("N","% < -3 SD","95%", "C.I.","% < -2 SD","95%", "C.I.","% > +1 SD","95%", "C.I.","% > +2 SD","95%", "C.I.","% > +3 SD","95%", "C.I.","Mean","SD"),mat[1:16,],#
c("Height","-for-","age",rep("",15)),#
c("N","% < -3 SD","95%", "C.I.","% < -2 SD","95%", "C.I.","% > +1 SD","95%", "C.I.","% > +2 SD","95%", "C.I.","% > +3 SD","95%", "C.I.","Mean","SD"),mat[17:32,],#
c("BMI","-for-","age",rep("",15)),
c("N","% < -3 SD","95%", "C.I.","% < -2 SD","95%", "C.I.","% > +1 SD","95%", "C.I.","% > +2 SD","95%", "C.I.","% > +3 SD","95%", "C.I.","Mean","SD"),mat[33:48,])
for(j in 1:dim(mat2)[2]) mat2[,j]<-ifelse(mat2[,j]=="NA" | mat2[,j]=="NaN","",mat2[,j])
mat2<-cbind(c("",rep(c("","Age","5-19",as.character(5:19)),3)),mat2)
names(mat2)<-names(mat1)
mat1<-rbind(mat1,mat2)
} #### End of loop for sex
mat1<-mat1[-c(11:19,66:74,121:129),]
###################################################################################################################
######### Export table with prevalence values and their confidence intervals, and mean and SD of the z-scores
###################################################################################################################
assign("matprev",mat1,envir = .GlobalEnv)
write.table(matprev, file=paste(FilePath,"\\",FileLab,"_prev.csv",sep=""),na=" ",row.names = FALSE,col.names=F,sep=",",quote = TRUE)
cat(paste("Prevalences and z-score summary statistics calculated and exported to ",FilePath,"\\",FileLab,"_prev.csv\n",sep=""))
on.exit(options(old))
invisible()
} #### End of main function who2007
# wfawho2007<-read.table("D:\\References 5-20y\\Macro R\\who2007_R\\wfawho2007.txt",header=T,sep="",skip=0)
# hfawho2007<-read.table("D:\\References 5-20y\\Macro R\\who2007_R\\hfawho2007.txt",header=T,sep="",skip=0)
# bfawho2007<-read.table("D:\\References 5-20y\\Macro R\\who2007_R\\bfawho2007.txt",header=T,sep="",skip=0)
# survey.who2007<-read.csv("D:\\References 5-20y\\Macro R\\who2007_R\\survey_who2007.csv",header=T,sep=",",skip=0,na.strings="")
# source("D:\\References 5-20y\\Macro R\\who2007_R\\who2007.r")
# who2007(FileLab = "survey_who2007", FilePath = "D:\\References 5-20y\\Macro R\\who2007_R", mydf = survey.who2007,sex = sex, age = agemons, weight = weight, height = height, sw=sw, oedema=oedema)
|
random_numbers <- c(-2, -8, 5, -3, 5, 10, 13, 1, 1)
positive_numbers <- random_numbers > 0
positive_numbers
|
/1 - Basic/2 - Vectors/14.personalizado.R
|
no_license
|
TopicosSelectos/tutoriales-2019-2-abel-rodriguez
|
R
| false
| false
| 108
|
r
|
random_numbers <- c(-2, -8, 5, -3, 5, 10, 13, 1, 1)
positive_numbers <- random_numbers > 0
positive_numbers
|
\alias{gtkWindowResize}
\name{gtkWindowResize}
\title{gtkWindowResize}
\description{Resizes the window as if the user had done so, obeying geometry
constraints. The default geometry constraint is that windows may
not be smaller than their size request; to override this
constraint, call \code{\link{gtkWidgetSetSizeRequest}} to set the window's
request to a smaller value.}
\usage{gtkWindowResize(object, width, height)}
\arguments{
\item{\code{object}}{[\code{\link{GtkWindow}}] a \code{\link{GtkWindow}}}
\item{\code{width}}{[integer] width in pixels to resize the window to}
\item{\code{height}}{[integer] height in pixels to resize the window to}
}
\details{If \code{\link{gtkWindowResize}} is called before showing a window for the
first time, it overrides any default size set with
\code{\link{gtkWindowSetDefaultSize}}.
Windows may not be resized smaller than 1 by 1 pixels. }
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
/man/gtkWindowResize.Rd
|
no_license
|
cran/RGtk2.10
|
R
| false
| false
| 962
|
rd
|
\alias{gtkWindowResize}
\name{gtkWindowResize}
\title{gtkWindowResize}
\description{Resizes the window as if the user had done so, obeying geometry
constraints. The default geometry constraint is that windows may
not be smaller than their size request; to override this
constraint, call \code{\link{gtkWidgetSetSizeRequest}} to set the window's
request to a smaller value.}
\usage{gtkWindowResize(object, width, height)}
\arguments{
\item{\code{object}}{[\code{\link{GtkWindow}}] a \code{\link{GtkWindow}}}
\item{\code{width}}{[integer] width in pixels to resize the window to}
\item{\code{height}}{[integer] height in pixels to resize the window to}
}
\details{If \code{\link{gtkWindowResize}} is called before showing a window for the
first time, it overrides any default size set with
\code{\link{gtkWindowSetDefaultSize}}.
Windows may not be resized smaller than 1 by 1 pixels. }
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.