content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
# Financial Analytics Case Study
# Finding how people take decisions to buy products
# Creating Decision Tree
library(rpart)
library(rplart.plot)
#Students : Gender - (Male & Female) buy a product
#Rownames
rollno = paste('S',1:1000, sep='')
rollno
#Variable Gender
set.seed(100)
gender = sample(x=c('Male','Female'), size=1000, replace=T, prob=c(0.5,0.5) )
head(gender)
table(gender)
#Variable- Buy : Decision
set.seed(3000)
buy = sample(x=c('Buy','NotBuy'), size=1000, replace=T, prob=c(.49,.51) )
head(buy)
table(buy)
prop.table(table(buy))
#create Data Frame
students1 = data.frame(gender, buy)
rownames(students1) = rollno
head(students1)
#Table
table(students1)
prop.table(table(students1))
addmargins(prop.table(table(students1)))
(t1= table(students1$gender, students1$buy))
addmargins(t1)
prop.table(table(students1$gender, students1$buy))
addmargins(prop.table(table(students1$gender, students1$buy))
)
#Model1
fit1 = rpart(buy ~ gender, data=students1,minsplit=4, minbucket=2)
#'minsplit' is 20 and determines the minimal number of observations per leaf ('minbucket')
fit1 #print(fit1)
table(students1$gender, students1$buy)
students1
library(rpart.plot)
rpart.plot(fit1, main='Classification Tree', nn=T, type=4, extra=104)
fit1
predict(fit1, newdata = data.frame(gender='Female'))
predict(fit1, newdata = data.frame(gender='Female'), type='class')
predict(fit1, newdata = data.frame(gender=c('Male','Female','Male')), type='class')
#---- Part -2 Add another column
set.seed(5000)
married = sample(x=c('Married','Single'), size=1000, replace=T, prob=c(0.5,0.5) )
table(married)
students2 = data.frame(gender, married, buy)
rownames(students2) = rollno
head(students2)
str(students2)
prop.table(ftable(students2))
#addmargins(prop.table(ftable(students2)))
#write.csv(students2, 'dtdata.csv')
# Model2
#library(rpart)
fit2 = rpart(buy ~ gender + married, data=students2, minsplit=12)
summary(fit2)
fit2
rpart.plot(fit2,type=2,extra=104, tweak=1.2, under=T, shadow=c('brown', 'green','red'), nn=T)
fit2
prp(fit2)
prp(fit2, main="An Example",
type=4, fallen=T, branch=.3, round=0, leaf.round=9,
clip.right.labs=F, under.cex=1,
box.palette="GnYlRd",
prefix="Student\n", branch.col="gray", branch.lwd=2,
extra=101, under=T, lt=" < ", ge=" >= ", cex.main=1.5)
prp(fit2, branch.type=5)
labels(fit2)
new.tree <- prp(fit2, snip=TRUE)$obj # interactively trim the tree
prp(new.tree) # display the new tree
#Plot----
library(RColorBrewer)
#library(rattle)
rpart.plot::rpart.plot(fit2, main='Classification Tree')
rpart.plot::rpart.plot(fit2, extra=104, box.palette="GnBu", branch.lty=3, shadow.col="gray", nn=TRUE)
rpart.plot::prp(fit2,fallen.leaves = F)
prp(fit2, type=2)
#Predict
#
predict(fit2, newdata = data.frame(gender='Male', married='Married'), type='prob')
predict(fit2, newdata = data.frame(gender='Male', married='Married'), type='class')
predict(fit2, newdata = data.frame(gender='Male', married='Married'), type='vector')
predict(fit2, newdata = data.frame(gender='Male', married='Married'))
?predict
testdata = data.frame(gender=c('Male','Male','Female','Female'), married=c('Married','Single','Married','Single'))
testdata
(p1 = predict(fit2, newdata = testdata, type='vector')) #node/level
#play=2, notplay=1
(p2 = predict(fit2, newdata = testdata, type='class')) #factor
(p3 = predict(fit2, newdata = testdata, type='prob')) # prob
cbind(testdata, p1, p2, p3)
#level number, class frequencies, probabilities
predict(fit2, newdata= testdata, type = "matrix")
head(students2)
#Parameters Setting : CP
printcp(fit2)
printcp(fit2, digits = getOption("digits") - 5)
plotcp(fit2)
names(fit2)
?
fit2$where #which row at which nodeno
?fit2$where
students2[1:5,]
cbind(students2, nodeno=rownames(fit2$frame) [ fit2$where])
pfit= prune(fit2, cp=0.077) # from cptable
pfit
rpart.plot(pfit)
#do some changes to pfit
#--------------------------------------------------------
#add column with 3 classes and numeric and logical
set.seed(105)
education = sample(x=c('school','graduate', 'pg'), size=1000, replace=T, prob=c(0.3,0.4,.3) )
education; table(education)
set.seed(106)
hostel = sample(x=c(TRUE, FALSE), size=1000, replace=T, prob=c(.3,.7))
table(hostel)
set.seed(107)
marks = floor(runif(1000,50,100))
mean(marks)
students3 = data.frame(gender, married, education, hostel,marks,buy)
with(students3, ftable(education, hostel, gender, married,buy))
# Model3
fit3a = rpart(buy ~ ., data=students3)
fit3a
rpart.plot::rpart.plot(fit3a, main='Classification Tree')
#Model3b : change some parameters minbucket, minsplit
fit3b = rpart(buy ~ ., data=students3, minsplit=4, minbucket=2) #control= rpart.control(cp=0.00001))#use low cp
fit3b
rpart.plot::rpart.plot(fit3b, main='Classification Tree', cex=.6, type=3, extra=104, nn=T)
rpart.plot::prp(fit3b)
#rattle::fancyRpartPlot(model = fit3b, main = "Final CART Regression Tree", cex = 0.6, sub = "Model3")
prp(fit3b,box.col=c("Grey", "Orange")[fit3b$frame$yval],varlen=0,faclen=0, type=1,extra=4,under=TRUE, tweak=1.2)
#Lets see CP
plotcp(fit3b)
printcp(fit3b, digits = getOption("digits") - 5)
(bestcp= fit3b$cptable[which.min(fit3b$cptable[,'xerror']),'CP'])
#but this is at root node only, select next better
bestcp = 0.01
prp(fit3b)
fit3b2 = rpart(buy ~ ., data=students3, minsplit=4, minbucket=2, control= rpart.control(cp=0.001))
fit3b2
prp(fit3b2)
fit3b.pruned = prune(fit3b, cp=bestcp)
fit3b.pruned
prp(fit3b.pruned)
rpart.plot(fit3b.pruned,cex=.6, extra=101, type=1)
#Predict Model3
fit3b.pruned$where
fit3b.pruned
path.rpart(fit3b.pruned, nodes=c(1,4,10,43), pretty = 0, print.it = TRUE)
testdata1= data.frame(gender='Male', married='Married', education='school', hostel=TRUE, marks=60)
testdata1
predict(fit3b.pruned, newdata = testdata1 )
# now practise with Marketing Data
| /CLS/CART/dt-rpart-du1.R | no_license | mohitmirajkar/analytics-1 | R | false | false | 5,837 | r | # Financial Analytics Case Study
# Finding how people take decisions to buy products
# Creating Decision Tree
library(rpart)
library(rplart.plot)
#Students : Gender - (Male & Female) buy a product
#Rownames
rollno = paste('S',1:1000, sep='')
rollno
#Variable Gender
set.seed(100)
gender = sample(x=c('Male','Female'), size=1000, replace=T, prob=c(0.5,0.5) )
head(gender)
table(gender)
#Variable- Buy : Decision
set.seed(3000)
buy = sample(x=c('Buy','NotBuy'), size=1000, replace=T, prob=c(.49,.51) )
head(buy)
table(buy)
prop.table(table(buy))
#create Data Frame
students1 = data.frame(gender, buy)
rownames(students1) = rollno
head(students1)
#Table
table(students1)
prop.table(table(students1))
addmargins(prop.table(table(students1)))
(t1= table(students1$gender, students1$buy))
addmargins(t1)
prop.table(table(students1$gender, students1$buy))
addmargins(prop.table(table(students1$gender, students1$buy))
)
#Model1
fit1 = rpart(buy ~ gender, data=students1,minsplit=4, minbucket=2)
#'minsplit' is 20 and determines the minimal number of observations per leaf ('minbucket')
fit1 #print(fit1)
table(students1$gender, students1$buy)
students1
library(rpart.plot)
rpart.plot(fit1, main='Classification Tree', nn=T, type=4, extra=104)
fit1
predict(fit1, newdata = data.frame(gender='Female'))
predict(fit1, newdata = data.frame(gender='Female'), type='class')
predict(fit1, newdata = data.frame(gender=c('Male','Female','Male')), type='class')
#---- Part -2 Add another column
set.seed(5000)
married = sample(x=c('Married','Single'), size=1000, replace=T, prob=c(0.5,0.5) )
table(married)
students2 = data.frame(gender, married, buy)
rownames(students2) = rollno
head(students2)
str(students2)
prop.table(ftable(students2))
#addmargins(prop.table(ftable(students2)))
#write.csv(students2, 'dtdata.csv')
# Model2
#library(rpart)
fit2 = rpart(buy ~ gender + married, data=students2, minsplit=12)
summary(fit2)
fit2
rpart.plot(fit2,type=2,extra=104, tweak=1.2, under=T, shadow=c('brown', 'green','red'), nn=T)
fit2
prp(fit2)
prp(fit2, main="An Example",
type=4, fallen=T, branch=.3, round=0, leaf.round=9,
clip.right.labs=F, under.cex=1,
box.palette="GnYlRd",
prefix="Student\n", branch.col="gray", branch.lwd=2,
extra=101, under=T, lt=" < ", ge=" >= ", cex.main=1.5)
prp(fit2, branch.type=5)
labels(fit2)
new.tree <- prp(fit2, snip=TRUE)$obj # interactively trim the tree
prp(new.tree) # display the new tree
#Plot----
library(RColorBrewer)
#library(rattle)
rpart.plot::rpart.plot(fit2, main='Classification Tree')
rpart.plot::rpart.plot(fit2, extra=104, box.palette="GnBu", branch.lty=3, shadow.col="gray", nn=TRUE)
rpart.plot::prp(fit2,fallen.leaves = F)
prp(fit2, type=2)
#Predict
#
predict(fit2, newdata = data.frame(gender='Male', married='Married'), type='prob')
predict(fit2, newdata = data.frame(gender='Male', married='Married'), type='class')
predict(fit2, newdata = data.frame(gender='Male', married='Married'), type='vector')
predict(fit2, newdata = data.frame(gender='Male', married='Married'))
?predict
testdata = data.frame(gender=c('Male','Male','Female','Female'), married=c('Married','Single','Married','Single'))
testdata
(p1 = predict(fit2, newdata = testdata, type='vector')) #node/level
#play=2, notplay=1
(p2 = predict(fit2, newdata = testdata, type='class')) #factor
(p3 = predict(fit2, newdata = testdata, type='prob')) # prob
cbind(testdata, p1, p2, p3)
#level number, class frequencies, probabilities
predict(fit2, newdata= testdata, type = "matrix")
head(students2)
#Parameters Setting : CP
printcp(fit2)
printcp(fit2, digits = getOption("digits") - 5)
plotcp(fit2)
names(fit2)
?
fit2$where #which row at which nodeno
?fit2$where
students2[1:5,]
cbind(students2, nodeno=rownames(fit2$frame) [ fit2$where])
pfit= prune(fit2, cp=0.077) # from cptable
pfit
rpart.plot(pfit)
#do some changes to pfit
#--------------------------------------------------------
#add column with 3 classes and numeric and logical
set.seed(105)
education = sample(x=c('school','graduate', 'pg'), size=1000, replace=T, prob=c(0.3,0.4,.3) )
education; table(education)
set.seed(106)
hostel = sample(x=c(TRUE, FALSE), size=1000, replace=T, prob=c(.3,.7))
table(hostel)
set.seed(107)
marks = floor(runif(1000,50,100))
mean(marks)
students3 = data.frame(gender, married, education, hostel,marks,buy)
with(students3, ftable(education, hostel, gender, married,buy))
# Model3
fit3a = rpart(buy ~ ., data=students3)
fit3a
rpart.plot::rpart.plot(fit3a, main='Classification Tree')
#Model3b : change some parameters minbucket, minsplit
fit3b = rpart(buy ~ ., data=students3, minsplit=4, minbucket=2) #control= rpart.control(cp=0.00001))#use low cp
fit3b
rpart.plot::rpart.plot(fit3b, main='Classification Tree', cex=.6, type=3, extra=104, nn=T)
rpart.plot::prp(fit3b)
#rattle::fancyRpartPlot(model = fit3b, main = "Final CART Regression Tree", cex = 0.6, sub = "Model3")
prp(fit3b,box.col=c("Grey", "Orange")[fit3b$frame$yval],varlen=0,faclen=0, type=1,extra=4,under=TRUE, tweak=1.2)
#Lets see CP
plotcp(fit3b)
printcp(fit3b, digits = getOption("digits") - 5)
(bestcp= fit3b$cptable[which.min(fit3b$cptable[,'xerror']),'CP'])
#but this is at root node only, select next better
bestcp = 0.01
prp(fit3b)
fit3b2 = rpart(buy ~ ., data=students3, minsplit=4, minbucket=2, control= rpart.control(cp=0.001))
fit3b2
prp(fit3b2)
fit3b.pruned = prune(fit3b, cp=bestcp)
fit3b.pruned
prp(fit3b.pruned)
rpart.plot(fit3b.pruned,cex=.6, extra=101, type=1)
#Predict Model3
fit3b.pruned$where
fit3b.pruned
path.rpart(fit3b.pruned, nodes=c(1,4,10,43), pretty = 0, print.it = TRUE)
testdata1= data.frame(gender='Male', married='Married', education='school', hostel=TRUE, marks=60)
testdata1
predict(fit3b.pruned, newdata = testdata1 )
# now practise with Marketing Data
|
library(hbsae)
### Name: sae-class
### Title: S3 class for the fitted model and SAE outcomes.
### Aliases: cAIC coef.sae COR COV CV EST fitted.sae MSE R2 raneff
### raneff.se relSE residuals.sae sae-class SE se2 sv2 synthetic vcov.sae
### wDirect
### ** Examples
d <- generateFakeData()
# compute small area estimates
sae <- fSAE(y0 ~ x + area2, data=d$sam, area="area", popdata=d$Xpop)
coef(sae) # fixed effects
raneff(sae) # random effects
sv2(sae) # between-area variance
se2(sae) # within-area variance
cAIC(sae) # conditional AIC
| /data/genthat_extracted_code/hbsae/examples/sae-class.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 553 | r | library(hbsae)
### Name: sae-class
### Title: S3 class for the fitted model and SAE outcomes.
### Aliases: cAIC coef.sae COR COV CV EST fitted.sae MSE R2 raneff
### raneff.se relSE residuals.sae sae-class SE se2 sv2 synthetic vcov.sae
### wDirect
### ** Examples
d <- generateFakeData()
# compute small area estimates
sae <- fSAE(y0 ~ x + area2, data=d$sam, area="area", popdata=d$Xpop)
coef(sae) # fixed effects
raneff(sae) # random effects
sv2(sae) # between-area variance
se2(sae) # within-area variance
cAIC(sae) # conditional AIC
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rl_occ_country.R, R/rl_search.R
\name{rl_occ_country_}
\alias{rl_occ_country_}
\alias{rl_search}
\alias{rl_search_}
\title{Search by taxon name, IUCN id, and region}
\usage{
rl_occ_country_(name = NULL, id = NULL, region = NULL, key = NULL, ...)
rl_search(name = NULL, id = NULL, region = NULL, key = NULL,
parse = TRUE, ...)
rl_search_(name = NULL, id = NULL, region = NULL, key = NULL, ...)
}
\arguments{
\item{name}{(character) A taxonomic name}
\item{id}{(character) An IUCN identifier}
\item{region}{(character) A region name, see \code{\link{rl_regions}} for
acceptable region identifiers (use the entries in the \code{identifier}
column)}
\item{key}{A IUCN API token. See
\url{http://apiv3.iucnredlist.org/api/v3/token} to get a token}
\item{...}{Curl options passed to \code{\link[crul]{HttpClient}}}
\item{parse}{(logical) Whether to parse to list (\code{FALSE}) or
data.frame (\code{TRUE}). Default: \code{TRUE}}
}
\value{
A list, with the data in the \code{result} slot, unless using
a function with a trailing underscore, in which case json as character
string is returned.
}
\description{
Search by taxon name, IUCN id, and region
}
\examples{
\dontrun{
rl_search('Fratercula arctica')
rl_search('Fratercula arctica', region = 'europe')
rl_search(id = 12392)
rl_search(id = 22694927, region = 'europe')
rl_search('Fratercula arctica', parse = FALSE)
rl_search_('Fratercula arctica')
rl_search_('Fratercula arctica', region = 'europe')
}
}
\references{
API docs at \url{http://apiv3.iucnredlist.org/api/v3/docs}
}
| /man/rl_search.Rd | permissive | MoisesExpositoAlonso/rredlist | R | false | true | 1,614 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rl_occ_country.R, R/rl_search.R
\name{rl_occ_country_}
\alias{rl_occ_country_}
\alias{rl_search}
\alias{rl_search_}
\title{Search by taxon name, IUCN id, and region}
\usage{
rl_occ_country_(name = NULL, id = NULL, region = NULL, key = NULL, ...)
rl_search(name = NULL, id = NULL, region = NULL, key = NULL,
parse = TRUE, ...)
rl_search_(name = NULL, id = NULL, region = NULL, key = NULL, ...)
}
\arguments{
\item{name}{(character) A taxonomic name}
\item{id}{(character) An IUCN identifier}
\item{region}{(character) A region name, see \code{\link{rl_regions}} for
acceptable region identifiers (use the entries in the \code{identifier}
column)}
\item{key}{A IUCN API token. See
\url{http://apiv3.iucnredlist.org/api/v3/token} to get a token}
\item{...}{Curl options passed to \code{\link[crul]{HttpClient}}}
\item{parse}{(logical) Whether to parse to list (\code{FALSE}) or
data.frame (\code{TRUE}). Default: \code{TRUE}}
}
\value{
A list, with the data in the \code{result} slot, unless using
a function with a trailing underscore, in which case json as character
string is returned.
}
\description{
Search by taxon name, IUCN id, and region
}
\examples{
\dontrun{
rl_search('Fratercula arctica')
rl_search('Fratercula arctica', region = 'europe')
rl_search(id = 12392)
rl_search(id = 22694927, region = 'europe')
rl_search('Fratercula arctica', parse = FALSE)
rl_search_('Fratercula arctica')
rl_search_('Fratercula arctica', region = 'europe')
}
}
\references{
API docs at \url{http://apiv3.iucnredlist.org/api/v3/docs}
}
|
# source by "ui.R"
| /ui/tcga_ctrp_ui.R | permissive | COMODr/GSCALite | R | false | false | 19 | r | # source by "ui.R"
|
##### 08/10/2020 - ETHZ - Fabio Benedetti © UP Group, IBP, ETH Zürich
##### Script for :
# - Loading the phytoplankton and zooplankton niche traits from Script#27.6
# - Check their overall distribution and then per group
# - Test variance in niche traits across groups, and phyto vs. zooplankton for common variables
# - Ordinate phytoplankton and zooplankton specie sin niche traits space?
# - Try to compute and plot a group-level mean response curves
### Last update: 21/10/2020
# --------------------------------------------------------------------------------------------------------------------------------
library("tidyverse")
library("reshape2")
library("biomod2")
library("viridis")
library("vegan")
library("FactoMineR")
WD <- getwd()
world2 <- map_data("world2")
world <- map_data("world")
# --------------------------------------------------------------------------------------------------------------------------------
### 1°) Phytoplankton
# Go to working directories
setwd("/net/kryo/work/fabioben/OVERSEE/data/biology/data_for_group_studies/phytoplankton/group_background/resp_curves/niche_traits/")
files <- dir()[grep("niche_traits_GAM",dir())] # files
# Concatenate all niche traits
require("parallel")
res <- mclapply(files, function(f) {
t <- get(load(f))
return(t)
}, mc.cores = 20
) # eo mclapply - f in files
# Rbind
traits_phy <- bind_rows(res)
rm(res,files) ; gc()
head(traits_phy) ; dim(traits_phy)
### Plot distrbution of niche traits (center and center2, width and width2)
ggplot(aes(x = factor(group), y = center, fill = factor(group)), data = traits_phy[traits_phy$var == "SST",]) +
geom_boxplot(colour = "black") + scale_fill_brewer(name = "") +
xlab("") + ylab("SST niche center (°C)") + theme_classic()
ggplot(aes(x = factor(group), y = width, fill = factor(group)), data = traits_phy[traits_phy$var == "SST",]) +
geom_boxplot(colour = "black") + scale_fill_brewer(name = "") +
xlab("") + ylab("SST niche width (°C)") + theme_classic()
ggplot(aes(x = center, y = width, fill = factor(group)), data = traits_phy[traits_phy$var == "SST",]) +
geom_point(colour = "black", pch = 21) + scale_fill_brewer(name = "", palette = "Spectral") +
ylab("SST niche width (°C)") + xlab("SST niche center (°C)") + theme_classic()
### Print those plots per variable
vars <- unique(traits_phy$var) ; vars
v <- "SST"
for(v in vars) {
message(paste("Plotting niche traits ditsribution across groups for ",v, sep = ""))
p1 <- ggplot(aes(x = factor(group), y = center2, fill = factor(group)), data = traits_phy[traits_phy$var == v,]) +
geom_boxplot(colour = "black") + scale_fill_brewer(name = "", palette = "Paired") +
xlab("") + ylab(paste(v," niche center",sep="")) + scale_x_discrete(labels = NULL) +
theme_classic()
#
p2 <- ggplot(aes(x = factor(group), y = width2, fill = factor(group)), data = traits_phy[traits_phy$var == v,]) +
geom_boxplot(colour = "black") + scale_fill_brewer(name = "", palette = "Paired") +
xlab("") + ylab(paste(v," niche width",sep="")) + scale_x_discrete(labels = NULL) +
theme_classic()
p3 <- ggplot(aes(x = center2, y = width2, fill = factor(group)), data = traits_phy[traits_phy$var == v,]) +
geom_point(colour = "black", pch = 21) + scale_fill_brewer(name = "", palette = "Paired") +
ylab(paste(v," niche width",sep="")) + xlab(paste(v," niche center",sep="")) + theme_classic()
p4 <- ggplot(aes(x = factor(group), y = prob.range, fill = factor(group)), data = traits_phy[traits_phy$var == v,]) +
geom_boxplot(colour = "black") + scale_fill_brewer(name = "", palette = "Paired") +
xlab("") + ylab(paste("HSI range (importance)",sep="")) + scale_x_discrete(labels = NULL) +
scale_y_continuous(limits = c(0,1)) + theme_classic()
require("ggpubr")
panel <- ggarrange(p1,p2,p3,p4, ncol = 2, nrow = 2, align = "hv")
# panel
setwd(WD)
ggsave(plot = panel, filename = paste("panel_niche_traits_phyto_",v,".jpg", sep = ""), width = 8, height = 5, dpi = 300)
} # eo v in vars
### Summarize variations in those niche traits (let's keep center2 and width2) in a PCA
### But first...need to reformat the data.frame so columns look like: SST_center; SST_width; etc. etc.
### Need to use melt and dcast I guess
#head(traits_phy)
m_traits_phy <- melt(traits_phy, id.vars = c("group","species","var"))
unique(m_traits_phy$variable)
# Create every combination of var x variable in a factor column and dcast
m_traits_phy$new.col <- factor(paste(m_traits_phy$var ,m_traits_phy$variable, sep = "_"))
unique(m_traits_phy$new.col)
# Dcast to put those new.col as columns (return to wide format)
d_traits_phy <- dcast(m_traits_phy[,c(1,2,5,6)], group + species ~ new.col, value.var = "value")
# Select the columns corresponding to: center2, width2 and weight.pca (weights to give to the columns, not the individuals)
cols2keep <- c(colnames(d_traits_phy)[grep("center2", colnames(d_traits_phy))],
colnames(d_traits_phy)[grep("width2", colnames(d_traits_phy))],
colnames(d_traits_phy)[grep("weight.pca", colnames(d_traits_phy))]
) # eo cols2keep
cols2keep
# Retai columns used for PCA
data4pca <- d_traits_phy[,c("group","species",cols2keep)]
dim(data4pca) ; head(data4pca)
### 21/10/2020: Use data4pca to test variations of niche traits across groups
colnames(data4pca) <- str_replace(as.character(colnames(data4pca)), "center2", "center")
colnames(data4pca) <- str_replace(as.character(colnames(data4pca)), "width2", "width")
# In a lapply, perform non parametric tests of variance (K-W) and return pvalue and Chi2 values
res.tests <- lapply(colnames(data4pca)[c(3:18)], function(v) {
# v <- "SST_center"
test <- kruskal.test(get(v) ~ factor(group), data = data4pca) # str(summary(test))
# test$p.value
return( data.frame(niche.trait = v, Chi2 = test$stat, pval = test$p.value) )
} # eo fun
) # eo lapply
tests <- bind_rows(res.tests) ; rm(res.tests) ; gc()
rownames(tests) <- NULL
tests
### And select those with pval < 0.01
tests[tests$pval <= 0.01,]
### Check how to add weights properly in PCA
# ?PCA
# Determine the predictors mean weight.pca (from)
vars.weights <- data.frame(traits_phy %>% group_by(var) %>% summarize(w = mean(weight.pca), probs = mean(prob.range)) )
vars.weights
vars.weights$w2 <- vars.weights$w/max(vars.weights$w)
#vars.weights[order(vars.weights$w2, decreasing = T),] # OK
### colnames in data4pca and variables in vars.weight$w2 follow the same order, so just double the vars.weight$w2 vector
col.weights <- c(vars.weights$w2,vars.weights$w2) ; col.weights
pca.phy <- PCA(data4pca[,c(3:18)], scale.unit = T, ncp = 10, graph = F, col.w = col.weights)
summary(pca.phy)
str(pca.phy)
# Provide the PC coords to data4pca
data4pca[,paste("PC",c(1:5),sep="")] <- pca.phy$ind$coord[,c(1:5)]
summary(data4pca)
eig <- data.frame(perc = pca.phy$eig[,"percentage of variance"], nb = c(1:nrow(pca.phy$eig)) ) # eig
pca1 <- paste0("PC1 (",floor(eig$perc[1]*100)/100,"%)")
pca2 <- paste0("PC2 (",floor(eig$perc[2]*100)/100,"%)")
pca3 <- paste0("PC3 (",floor(eig$perc[3]*100)/100,"%)")
pca4 <- paste0("PC4 (",floor(eig$perc[4]*100)/100,"%)")
pca5 <- paste0("PC5 (",floor(eig$perc[5]*100)/100,"%)")
ggplot(aes(x = factor(group), y = PC1, fill = factor(group)), data = data4pca) +
geom_boxplot(colour = "black") + scale_fill_brewer(name = "", palette = "Paired") +
xlab("") + ylab(pca1) + scale_x_discrete(labels = NULL) +
geom_hline(yintercept = 0, linetype = "dashed") + theme_classic()
#
ggplot(aes(x = factor(group), y = PC2, fill = factor(group)), data = data4pca) +
geom_boxplot(colour = "black") + scale_fill_brewer(name = "", palette = "Paired") +
xlab("") + ylab(pca2) + scale_x_discrete(labels = NULL) +
geom_hline(yintercept = 0, linetype = "dashed") + theme_classic()
#
ggplot(aes(x = factor(group), y = PC3, fill = factor(group)), data = data4pca) +
geom_boxplot(colour = "black") + scale_fill_brewer(name = "", palette = "Paired") +
xlab("") + ylab(pca3) + scale_x_discrete(labels = NULL) +
geom_hline(yintercept = 0, linetype = "dashed") + theme_classic()
#
ggplot(aes(x = factor(group), y = PC4, fill = factor(group)), data = data4pca) +
geom_boxplot(colour = "black") + scale_fill_brewer(name = "", palette = "Paired") +
xlab("") + ylab(pca4) + scale_x_discrete(labels = NULL) +
geom_hline(yintercept = 0, linetype = "dashed") + theme_classic()
### Compute the groups' centroid in PCA space
groups.coords <- data.frame(data4pca %>% group_by(group) %>% summarize(PC1 = mean(PC1), PC2 = mean(PC2), PC3 = mean(PC3), PC4 = mean(PC4)) )
groups.coords
### PC plot: plot the species coordinates in PC space and report the centroid of the 3 groups
ggplot() + geom_point(aes(x = PC1, y = PC2, fill = factor(group)), data = data4pca, pch = 21, colour = "black", alpha = .5) +
geom_point(aes(x = PC1, y = PC2, fill = factor(group)), data = groups.coords, pch = 21, colour = "black", size = 4) +
scale_fill_brewer(name = "", palette = "Paired") + xlab(pca1) + ylab(pca2) +
geom_hline(yintercept = 0, linetype = "dashed") + geom_vline(xintercept = 0, linetype = "dashed") +
theme_classic()
#
### Same with PC3 and PC4
ggplot() + geom_point(aes(x = PC3, y = PC4, fill = factor(group)), data = data4pca, pch = 21, colour = "black", alpha = .5) +
geom_point(aes(x = PC3, y = PC4, fill = factor(group)), data = groups.coords, pch = 21, colour = "black", size = 4) +
scale_fill_brewer(name = "", palette = "Paired") + xlab(pca3) + ylab(pca4) +
geom_hline(yintercept = 0, linetype = "dashed") + geom_vline(xintercept = 0, linetype = "dashed") +
theme_classic()
### 09/10/2020: Replot PCA space with variables
library("ggrepel")
library("broom")
### Make sure the dims arguments is as long as the nb of PCs retained
augment.PCA <- function(x, dims = c(1:6), which="col") {
.get <- function(x, element, dims) {
y <- as.data.frame(x[[element]]$coord[,dims])
if (nrow(y) == 0) {
y <- NULL
} else {
y$type <- element
}
return(y)
}
if (which == "col") {
y <- rbind(.get(x, "var", dims), .get(x, "quanti.sup", dims))
} else {
y <- rbind(.get(x, "ind", dims), .get(x, "quali.sup", dims))
}
y$var <- row.names(y)
row.names(y) <- NULL
return(y)
}
pcad <- augment.PCA(pca.phy)
pcad$var # Re-name properly
pcad$var <- str_replace_all(pcad$var, "_", " ")
#pcad$var <- str_replace_all(pcad$var, "center2", "center")
#pcad$var <- str_replace_all(pcad$var, "width2", "width")
ggplot(pcad) +
coord_fixed() + scale_x_continuous(breaks=0) + scale_y_continuous(breaks=0) +
annotate(geom="path", colour="black", x=cos(seq(0, 2*pi, length.out=100)), y=sin(seq(0, 2*pi, length.out=100)), colour = "grey30") +
geom_segment(aes(x=0, xend = Dim.1, y=0, yend = Dim.2), arrow=arrow(angle=20, length=unit(0.01, "npc"))) +
scale_colour_manual(name = "", values = c("#4d9221","#c51b7d"), guide = F) +
geom_text_repel(aes(x=Dim.1, y=Dim.2, label=var),
data=filter(pcad, (Dim.1^2+Dim.2^2) > 0.2^2), segment.alpha=0.5) +
xlab(pca1) + ylab(pca2) + theme_bw()
ggplot(pcad) +
coord_fixed() + scale_x_continuous(breaks=0) + scale_y_continuous(breaks=0) +
annotate(geom="path", colour="black", x=cos(seq(0, 2*pi, length.out=100)), y=sin(seq(0, 2*pi, length.out=100)), colour = "grey30") +
geom_segment(aes(x=0, xend = Dim.2, y=0, yend = Dim.3), arrow=arrow(angle=20, length=unit(0.01,"npc"))) +
scale_colour_manual(name = "", values = c("#4d9221","#c51b7d"), guide = F) +
geom_text_repel(aes(x=Dim.2, y=Dim.3, label = var),
data=filter(pcad, (Dim.2^2+Dim.3^2) > 0.2^2), segment.alpha=0.5) +
xlab(pca3) + ylab(pca4) + theme_bw()
# ----------------------------------------------------------------
### 2°) Zooplankton
setwd("/net/kryo/work/fabioben/OVERSEE/data/biology/data_for_group_studies/zooplankton/group_background/resp_curves/niche_traits/")
files <- dir()[grep("niche_traits_GAM",dir())] ; files
require("parallel")
res <- mclapply(files, function(f) {
t <- get(load(f))
return(t)
}, mc.cores = 20
) # eo mclapply - f in files
# Rbind
traits_zoo <- bind_rows(res)
rm(res,files) ; gc()
head(traits_zoo) ; dim(traits_zoo)
### Print those plots per variable
vars <- unique(traits_zoo$var) ; vars
v <- "SST"
for(v in vars) {
message(paste("Plotting niche traits ditsribution for ",v, sep = ""))
p1 <- ggplot(aes(x = factor(group), y = center2, fill = factor(group)), data = traits_zoo[traits_zoo$var == v,]) +
geom_boxplot(colour = "black") + scale_fill_brewer(name = "", palette = "Paired") +
xlab("") + ylab(paste(v," niche center",sep="")) + scale_x_discrete(labels = NULL) +
theme_classic()
#
p2 <- ggplot(aes(x = factor(group), y = width2, fill = factor(group)), data = traits_zoo[traits_zoo$var == v,]) +
geom_boxplot(colour = "black") + scale_fill_brewer(name = "", palette = "Paired") +
xlab("") + ylab(paste(v," niche width",sep="")) + scale_x_discrete(labels = NULL) +
theme_classic()
p3 <- ggplot(aes(x = center2, y = width2, fill = factor(group)), data = traits_zoo[traits_zoo$var == v,]) +
geom_point(colour = "black", pch = 21) + scale_fill_brewer(name = "", palette = "Paired") +
ylab(paste(v," niche width",sep="")) + xlab(paste(v," niche center",sep="")) + theme_classic()
p4 <- ggplot(aes(x = factor(group), y = prob.range, fill = factor(group)), data = traits_zoo[traits_zoo$var == v,]) +
geom_boxplot(colour = "black") + scale_fill_brewer(name = "", palette = "Paired") +
xlab("") + ylab(paste("HSI range (importance)",sep="")) + scale_x_discrete(labels = NULL) +
scale_y_continuous(limits = c(0,1)) + theme_classic()
require("ggpubr")
panel <- ggarrange(p1,p2,p3,p4, ncol = 2, nrow = 2, align = "hv")
# panel
setwd(WD)
ggsave(plot = panel, filename = paste("panel_niche_traits_zoo_",v,".jpg", sep = ""), width = 10, height = 6, dpi = 300)
} # eo v in vars
### Plot range if HSI per var and group (facet per vars)
# head(traits_zoo)
# ggplot(aes(x = factor(group), y = weight.pca, fill = factor(group)), data = traits_zoo) +
# geom_boxplot(colour = "black") + scale_fill_brewer(name = "", palette = "Spectral") +
# ylab("Importance in constraining HSI") + xlab("") + theme_classic() +
# scale_x_discrete(labels = NULL) + scale_y_continuous(limits = c(0,1)) +
# facet_wrap(~factor(var), nrow = 2, ncol = 4, scales = "fixed")
### Prepare data for PCA (analysis in multivariate env space)
m_traits_zoo <- melt(traits_zoo, id.vars = c("group","species","var"))
# Create every combination of var x variable in a factor column and dcast
m_traits_zoo$new.col <- factor(paste(m_traits_zoo$var ,m_traits_zoo$variable, sep = "_"))
unique(m_traits_zoo$new.col)
# Dcast to put those new.col as columns (return to wide format)
d_traits_zoo <- dcast(m_traits_zoo[,c(1,2,5,6)], group + species ~ new.col, value.var = "value")
# Select the columns corresponding to: center2, width2 and weight.pca (weights to give to the columns, not the individuals)
cols2keep <- c(colnames(d_traits_zoo)[grep("center2", colnames(d_traits_zoo))],
colnames(d_traits_zoo)[grep("width2", colnames(d_traits_zoo))],
colnames(d_traits_zoo)[grep("weight.pca", colnames(d_traits_zoo))]
) # eo cols2keep
cols2keep
data4pca <- d_traits_zoo[,c("group","species",cols2keep)]
dim(data4pca)
head(data4pca)
### 21/10/2020: Use 'data4pca' to test variantions of niche traits across groups of zooplankton
colnames(data4pca) <- str_replace(as.character(colnames(data4pca)), "center2", "center")
colnames(data4pca) <- str_replace(as.character(colnames(data4pca)), "width2", "width")
# In a lapply, perform non parametric tests of variance (K-W) and return pvalue and Chi2 values
res.tests <- lapply(colnames(data4pca)[c(3:18)], function(v) {
# v <- "SST_center"
test <- kruskal.test(get(v) ~ factor(group), data = data4pca) # str(summary(test))
# test$p.value
return( data.frame(niche.trait = v, Chi2 = test$stat, pval = test$p.value) )
} # eo fun
) # eo lapply
tests <- bind_rows(res.tests) ; rm(res.tests) ; gc()
rownames(tests) <- NULL
tests
### And select those with pval < 0.01
tests[tests$pval <= 0.001,]
### Check how to add weights properly in PCA
vars.weights <- data.frame(traits_zoo %>% group_by(var) %>% summarize(w = mean(weight.pca)) )
vars.weights$w2 <- vars.weights$w/max(vars.weights$w)
#vars.weights[order(vars.weights$w2, decreasing = T),] # OK
### colnames in data4pca and variables in vars.weight$w2 follow the same order, so just double the vars.weight$w2 vector
col.weights <- c(vars.weights$w2,vars.weights$w2) ; col.weights
pca.zoo <- PCA(data4pca[,c(3:18)], scale.unit = T, ncp = 10, graph = F, col.w = col.weights)
summary(pca.zoo)
eig <- data.frame(perc = pca.zoo$eig[,"percentage of variance"], nb = c(1:nrow(pca.zoo$eig)) ) # eig
pca1 <- paste0("PC1 (",floor(eig$perc[1]*100)/100,"%)")
pca2 <- paste0("PC2 (",floor(eig$perc[2]*100)/100,"%)")
pca3 <- paste0("PC3 (",floor(eig$perc[3]*100)/100,"%)")
pca4 <- paste0("PC4 (",floor(eig$perc[4]*100)/100,"%)")
pca5 <- paste0("PC5 (",floor(eig$perc[5]*100)/100,"%)")
# Provide the PC coords to data4pca
data4pca[,paste("PC",c(1:5),sep="")] <- pca.zoo$ind$coord[,c(1:5)]
summary(data4pca)
ggplot(aes(x = factor(group), y = PC1, fill = factor(group)), data = data4pca) +
geom_boxplot(colour = "black") + scale_fill_brewer(name = "", palette = "Paired") +
xlab("") + ylab("PC1") + scale_x_discrete(labels = NULL) +
geom_hline(yintercept = 0, linetype = "dashed") + theme_classic()
#
ggplot(aes(x = factor(group), y = PC2, fill = factor(group)), data = data4pca) +
geom_boxplot(colour = "black") + scale_fill_brewer(name = "", palette = "Paired") +
xlab("") + ylab("PC2") + scale_x_discrete(labels = NULL) +
geom_hline(yintercept = 0, linetype = "dashed") + theme_classic()
#
ggplot(aes(x = factor(group), y = PC3, fill = factor(group)), data = data4pca) +
geom_boxplot(colour = "black") + scale_fill_brewer(name = "", palette = "Paired") +
xlab("") + ylab("PC3") + scale_x_discrete(labels = NULL) +
geom_hline(yintercept = 0, linetype = "dashed") + theme_classic()
#
ggplot(aes(x = factor(group), y = PC4, fill = factor(group)), data = data4pca) +
geom_boxplot(colour = "black") + scale_fill_brewer(name = "", palette = "Paired") +
xlab("") + ylab("PC4") + scale_x_discrete(labels = NULL) +
geom_hline(yintercept = 0, linetype = "dashed") + theme_classic()
### Compute the groups' centroid in PCA space
groups.coords <- data.frame(data4pca %>% group_by(group) %>% summarize(PC1 = mean(PC1), PC2 = mean(PC2), PC3 = mean(PC3), PC4 = mean(PC4)) )
groups.coords
### PC plot: plot the species coordinates in PC space and report the centroid of the 3 groups
ggplot() + geom_point(aes(x = PC1, y = PC2, fill = factor(group)), data = data4pca, pch = 21, colour = "black", alpha = .5) +
scale_fill_brewer(name = "", palette = "Paired") +
geom_point(aes(x = PC1, y = PC2, fill = factor(group)), data = groups.coords, pch = 21, colour = "black", size = 4) +
xlab("PC1") + ylab("PC2") + geom_hline(yintercept = 0, linetype = "dashed") + geom_vline(xintercept = 0, linetype = "dashed") +
theme_classic()
### Same with PC3 and PC4
ggplot() + geom_point(aes(x = PC3, y = PC4, fill = factor(group)), data = data4pca, pch = 21, colour = "black", alpha = .5) +
scale_fill_brewer(name = "", palette = "Paired") +
geom_point(aes(x = PC3, y = PC4, fill = factor(group)), data = groups.coords, pch = 21, colour = "black", size = 4) +
xlab("PC3") + ylab("PC4") + geom_hline(yintercept = 0, linetype = "dashed") + geom_vline(xintercept = 0, linetype = "dashed") +
theme_classic()
###
pcad <- augment.PCA(pca.zoo)
pcad$var # Re-name properly
pcad$var <- str_replace_all(pcad$var, "_", " ")
pcad$var <- str_replace_all(pcad$var, "center2", "center")
pcad$var <- str_replace_all(pcad$var, "width2", "width")
ggplot(pcad) +
coord_fixed() + scale_x_continuous(breaks=0) + scale_y_continuous(breaks=0) +
annotate(geom="path", colour="black", x=cos(seq(0, 2*pi, length.out=100)), y=sin(seq(0, 2*pi, length.out=100)), colour = "grey30") +
geom_segment(aes(x=0, xend = Dim.1, y=0, yend = Dim.2), arrow=arrow(angle=20, length=unit(0.01, "npc"))) +
scale_colour_manual(name = "", values = c("#4d9221","#c51b7d"), guide = F) +
geom_text_repel(aes(x=Dim.1, y=Dim.2, label=var),
data=filter(pcad, (Dim.1^2+Dim.2^2) > 0.2^2), segment.alpha=0.5) +
xlab(pca1) + ylab(pca2) + theme_bw()
#
ggplot(pcad) +
coord_fixed() + scale_x_continuous(breaks=0) + scale_y_continuous(breaks=0) +
annotate(geom="path", colour="black", x=cos(seq(0, 2*pi, length.out=100)), y=sin(seq(0, 2*pi, length.out=100)), colour = "grey30") +
geom_segment(aes(x=0, xend = Dim.2, y=0, yend = Dim.3), arrow=arrow(angle=20, length=unit(0.01,"npc"))) +
scale_colour_manual(name = "", values = c("#4d9221","#c51b7d"), guide = F) +
geom_text_repel(aes(x=Dim.2, y=Dim.3, label = var),
data=filter(pcad, (Dim.2^2+Dim.3^2) > 0.2^2), segment.alpha=0.5) +
xlab(pca3) + ylab(pca4) + theme_bw()
# --------------------------------------------------------------------------------------------------------------------------------
### 21/10/2020:
setwd("/net/kryo/work/fabioben/OVERSEE/data/biology/data_for_group_studies/phytoplankton/group_background/resp_curves/niche_traits/")
files <- dir()[grep("resp_curves_GAM",dir())] # files
# Concatenate all niche traits
require("parallel")
res <- mclapply(files, function(f) {
t <- get(load(f))
return(t)
}, mc.cores = 20
) # eo mclapply - f in files
# Rbind
traits_phy <- bind_rows(res)
rm(res,files) ; gc()
head(traits_phy) ; dim(traits_phy)
### Make sur SST (or another variable) ranges are the same across species
unique(traits_phy$species)
summary(traits_phy[traits_phy$species == "Chaetoceros_laciniosus","SST"])
summary(traits_phy[traits_phy$species == "Tripos_pulchellus","SST"])
| /Script#27.7_OVERSEE_groups_niche_traits_analyze.R | permissive | benfabio/Benedetti-et-al.-Putting-the-diversity-of-plankton-functional-groups-on-the-global-map-in-prep.- | R | false | false | 22,526 | r |
##### 08/10/2020 - ETHZ - Fabio Benedetti © UP Group, IBP, ETH Zürich
##### Script for :
# - Loading the phytoplankton and zooplankton niche traits from Script#27.6
# - Check their overall distribution and then per group
# - Test variance in niche traits across groups, and phyto vs. zooplankton for common variables
# - Ordinate phytoplankton and zooplankton specie sin niche traits space?
# - Try to compute and plot a group-level mean response curves
### Last update: 21/10/2020
# --------------------------------------------------------------------------------------------------------------------------------
library("tidyverse")
library("reshape2")
library("biomod2")
library("viridis")
library("vegan")
library("FactoMineR")
WD <- getwd()
world2 <- map_data("world2")
world <- map_data("world")
# --------------------------------------------------------------------------------------------------------------------------------
### 1°) Phytoplankton
# Go to working directories
setwd("/net/kryo/work/fabioben/OVERSEE/data/biology/data_for_group_studies/phytoplankton/group_background/resp_curves/niche_traits/")
files <- dir()[grep("niche_traits_GAM",dir())] # files
# Concatenate all niche traits
require("parallel")
res <- mclapply(files, function(f) {
t <- get(load(f))
return(t)
}, mc.cores = 20
) # eo mclapply - f in files
# Rbind
traits_phy <- bind_rows(res)
rm(res,files) ; gc()
head(traits_phy) ; dim(traits_phy)
### Plot distrbution of niche traits (center and center2, width and width2)
ggplot(aes(x = factor(group), y = center, fill = factor(group)), data = traits_phy[traits_phy$var == "SST",]) +
geom_boxplot(colour = "black") + scale_fill_brewer(name = "") +
xlab("") + ylab("SST niche center (°C)") + theme_classic()
ggplot(aes(x = factor(group), y = width, fill = factor(group)), data = traits_phy[traits_phy$var == "SST",]) +
geom_boxplot(colour = "black") + scale_fill_brewer(name = "") +
xlab("") + ylab("SST niche width (°C)") + theme_classic()
ggplot(aes(x = center, y = width, fill = factor(group)), data = traits_phy[traits_phy$var == "SST",]) +
geom_point(colour = "black", pch = 21) + scale_fill_brewer(name = "", palette = "Spectral") +
ylab("SST niche width (°C)") + xlab("SST niche center (°C)") + theme_classic()
### Print those plots per variable
vars <- unique(traits_phy$var) ; vars
v <- "SST"
for(v in vars) {
message(paste("Plotting niche traits ditsribution across groups for ",v, sep = ""))
p1 <- ggplot(aes(x = factor(group), y = center2, fill = factor(group)), data = traits_phy[traits_phy$var == v,]) +
geom_boxplot(colour = "black") + scale_fill_brewer(name = "", palette = "Paired") +
xlab("") + ylab(paste(v," niche center",sep="")) + scale_x_discrete(labels = NULL) +
theme_classic()
#
p2 <- ggplot(aes(x = factor(group), y = width2, fill = factor(group)), data = traits_phy[traits_phy$var == v,]) +
geom_boxplot(colour = "black") + scale_fill_brewer(name = "", palette = "Paired") +
xlab("") + ylab(paste(v," niche width",sep="")) + scale_x_discrete(labels = NULL) +
theme_classic()
p3 <- ggplot(aes(x = center2, y = width2, fill = factor(group)), data = traits_phy[traits_phy$var == v,]) +
geom_point(colour = "black", pch = 21) + scale_fill_brewer(name = "", palette = "Paired") +
ylab(paste(v," niche width",sep="")) + xlab(paste(v," niche center",sep="")) + theme_classic()
p4 <- ggplot(aes(x = factor(group), y = prob.range, fill = factor(group)), data = traits_phy[traits_phy$var == v,]) +
geom_boxplot(colour = "black") + scale_fill_brewer(name = "", palette = "Paired") +
xlab("") + ylab(paste("HSI range (importance)",sep="")) + scale_x_discrete(labels = NULL) +
scale_y_continuous(limits = c(0,1)) + theme_classic()
require("ggpubr")
panel <- ggarrange(p1,p2,p3,p4, ncol = 2, nrow = 2, align = "hv")
# panel
setwd(WD)
ggsave(plot = panel, filename = paste("panel_niche_traits_phyto_",v,".jpg", sep = ""), width = 8, height = 5, dpi = 300)
} # eo v in vars
### Summarize variations in those niche traits (let's keep center2 and width2) in a PCA
### But first...need to reformat the data.frame so columns look like: SST_center; SST_width; etc. etc.
### Need to use melt and dcast I guess
#head(traits_phy)
m_traits_phy <- melt(traits_phy, id.vars = c("group","species","var"))
unique(m_traits_phy$variable)
# Create every combination of var x variable in a factor column and dcast
m_traits_phy$new.col <- factor(paste(m_traits_phy$var ,m_traits_phy$variable, sep = "_"))
unique(m_traits_phy$new.col)
# Dcast to put those new.col as columns (return to wide format)
d_traits_phy <- dcast(m_traits_phy[,c(1,2,5,6)], group + species ~ new.col, value.var = "value")
# Select the columns corresponding to: center2, width2 and weight.pca (weights to give to the columns, not the individuals)
cols2keep <- c(colnames(d_traits_phy)[grep("center2", colnames(d_traits_phy))],
colnames(d_traits_phy)[grep("width2", colnames(d_traits_phy))],
colnames(d_traits_phy)[grep("weight.pca", colnames(d_traits_phy))]
) # eo cols2keep
cols2keep
# Retai columns used for PCA
data4pca <- d_traits_phy[,c("group","species",cols2keep)]
dim(data4pca) ; head(data4pca)
### 21/10/2020: Use data4pca to test variations of niche traits across groups
colnames(data4pca) <- str_replace(as.character(colnames(data4pca)), "center2", "center")
colnames(data4pca) <- str_replace(as.character(colnames(data4pca)), "width2", "width")
# In a lapply, perform non parametric tests of variance (K-W) and return pvalue and Chi2 values
res.tests <- lapply(colnames(data4pca)[c(3:18)], function(v) {
# v <- "SST_center"
test <- kruskal.test(get(v) ~ factor(group), data = data4pca) # str(summary(test))
# test$p.value
return( data.frame(niche.trait = v, Chi2 = test$stat, pval = test$p.value) )
} # eo fun
) # eo lapply
tests <- bind_rows(res.tests) ; rm(res.tests) ; gc()
rownames(tests) <- NULL
tests
### And select those with pval < 0.01
tests[tests$pval <= 0.01,]
### Check how to add weights properly in PCA
# ?PCA
# Determine the predictors mean weight.pca (from)
vars.weights <- data.frame(traits_phy %>% group_by(var) %>% summarize(w = mean(weight.pca), probs = mean(prob.range)) )
vars.weights
vars.weights$w2 <- vars.weights$w/max(vars.weights$w)
#vars.weights[order(vars.weights$w2, decreasing = T),] # OK
### colnames in data4pca and variables in vars.weight$w2 follow the same order, so just double the vars.weight$w2 vector
col.weights <- c(vars.weights$w2,vars.weights$w2) ; col.weights
pca.phy <- PCA(data4pca[,c(3:18)], scale.unit = T, ncp = 10, graph = F, col.w = col.weights)
summary(pca.phy)
str(pca.phy)
# Provide the PC coords to data4pca
data4pca[,paste("PC",c(1:5),sep="")] <- pca.phy$ind$coord[,c(1:5)]
summary(data4pca)
eig <- data.frame(perc = pca.phy$eig[,"percentage of variance"], nb = c(1:nrow(pca.phy$eig)) ) # eig
pca1 <- paste0("PC1 (",floor(eig$perc[1]*100)/100,"%)")
pca2 <- paste0("PC2 (",floor(eig$perc[2]*100)/100,"%)")
pca3 <- paste0("PC3 (",floor(eig$perc[3]*100)/100,"%)")
pca4 <- paste0("PC4 (",floor(eig$perc[4]*100)/100,"%)")
pca5 <- paste0("PC5 (",floor(eig$perc[5]*100)/100,"%)")
ggplot(aes(x = factor(group), y = PC1, fill = factor(group)), data = data4pca) +
geom_boxplot(colour = "black") + scale_fill_brewer(name = "", palette = "Paired") +
xlab("") + ylab(pca1) + scale_x_discrete(labels = NULL) +
geom_hline(yintercept = 0, linetype = "dashed") + theme_classic()
#
ggplot(aes(x = factor(group), y = PC2, fill = factor(group)), data = data4pca) +
geom_boxplot(colour = "black") + scale_fill_brewer(name = "", palette = "Paired") +
xlab("") + ylab(pca2) + scale_x_discrete(labels = NULL) +
geom_hline(yintercept = 0, linetype = "dashed") + theme_classic()
#
ggplot(aes(x = factor(group), y = PC3, fill = factor(group)), data = data4pca) +
geom_boxplot(colour = "black") + scale_fill_brewer(name = "", palette = "Paired") +
xlab("") + ylab(pca3) + scale_x_discrete(labels = NULL) +
geom_hline(yintercept = 0, linetype = "dashed") + theme_classic()
#
ggplot(aes(x = factor(group), y = PC4, fill = factor(group)), data = data4pca) +
geom_boxplot(colour = "black") + scale_fill_brewer(name = "", palette = "Paired") +
xlab("") + ylab(pca4) + scale_x_discrete(labels = NULL) +
geom_hline(yintercept = 0, linetype = "dashed") + theme_classic()
### Compute the groups' centroid in PCA space
groups.coords <- data.frame(data4pca %>% group_by(group) %>% summarize(PC1 = mean(PC1), PC2 = mean(PC2), PC3 = mean(PC3), PC4 = mean(PC4)) )
groups.coords
### PC plot: plot the species coordinates in PC space and report the centroid of the 3 groups
ggplot() + geom_point(aes(x = PC1, y = PC2, fill = factor(group)), data = data4pca, pch = 21, colour = "black", alpha = .5) +
geom_point(aes(x = PC1, y = PC2, fill = factor(group)), data = groups.coords, pch = 21, colour = "black", size = 4) +
scale_fill_brewer(name = "", palette = "Paired") + xlab(pca1) + ylab(pca2) +
geom_hline(yintercept = 0, linetype = "dashed") + geom_vline(xintercept = 0, linetype = "dashed") +
theme_classic()
#
### Same with PC3 and PC4
ggplot() + geom_point(aes(x = PC3, y = PC4, fill = factor(group)), data = data4pca, pch = 21, colour = "black", alpha = .5) +
geom_point(aes(x = PC3, y = PC4, fill = factor(group)), data = groups.coords, pch = 21, colour = "black", size = 4) +
scale_fill_brewer(name = "", palette = "Paired") + xlab(pca3) + ylab(pca4) +
geom_hline(yintercept = 0, linetype = "dashed") + geom_vline(xintercept = 0, linetype = "dashed") +
theme_classic()
### 09/10/2020: Replot PCA space with variables
library("ggrepel")
library("broom")
### Make sure the dims arguments is as long as the nb of PCs retained
augment.PCA <- function(x, dims = c(1:6), which="col") {
.get <- function(x, element, dims) {
y <- as.data.frame(x[[element]]$coord[,dims])
if (nrow(y) == 0) {
y <- NULL
} else {
y$type <- element
}
return(y)
}
if (which == "col") {
y <- rbind(.get(x, "var", dims), .get(x, "quanti.sup", dims))
} else {
y <- rbind(.get(x, "ind", dims), .get(x, "quali.sup", dims))
}
y$var <- row.names(y)
row.names(y) <- NULL
return(y)
}
pcad <- augment.PCA(pca.phy)
pcad$var # Re-name properly
pcad$var <- str_replace_all(pcad$var, "_", " ")
#pcad$var <- str_replace_all(pcad$var, "center2", "center")
#pcad$var <- str_replace_all(pcad$var, "width2", "width")
ggplot(pcad) +
coord_fixed() + scale_x_continuous(breaks=0) + scale_y_continuous(breaks=0) +
annotate(geom="path", colour="black", x=cos(seq(0, 2*pi, length.out=100)), y=sin(seq(0, 2*pi, length.out=100)), colour = "grey30") +
geom_segment(aes(x=0, xend = Dim.1, y=0, yend = Dim.2), arrow=arrow(angle=20, length=unit(0.01, "npc"))) +
scale_colour_manual(name = "", values = c("#4d9221","#c51b7d"), guide = F) +
geom_text_repel(aes(x=Dim.1, y=Dim.2, label=var),
data=filter(pcad, (Dim.1^2+Dim.2^2) > 0.2^2), segment.alpha=0.5) +
xlab(pca1) + ylab(pca2) + theme_bw()
ggplot(pcad) +
coord_fixed() + scale_x_continuous(breaks=0) + scale_y_continuous(breaks=0) +
annotate(geom="path", colour="black", x=cos(seq(0, 2*pi, length.out=100)), y=sin(seq(0, 2*pi, length.out=100)), colour = "grey30") +
geom_segment(aes(x=0, xend = Dim.2, y=0, yend = Dim.3), arrow=arrow(angle=20, length=unit(0.01,"npc"))) +
scale_colour_manual(name = "", values = c("#4d9221","#c51b7d"), guide = F) +
geom_text_repel(aes(x=Dim.2, y=Dim.3, label = var),
data=filter(pcad, (Dim.2^2+Dim.3^2) > 0.2^2), segment.alpha=0.5) +
xlab(pca3) + ylab(pca4) + theme_bw()
# ----------------------------------------------------------------
### 2°) Zooplankton
setwd("/net/kryo/work/fabioben/OVERSEE/data/biology/data_for_group_studies/zooplankton/group_background/resp_curves/niche_traits/")
files <- dir()[grep("niche_traits_GAM",dir())] ; files
require("parallel")
res <- mclapply(files, function(f) {
t <- get(load(f))
return(t)
}, mc.cores = 20
) # eo mclapply - f in files
# Rbind
traits_zoo <- bind_rows(res)
rm(res,files) ; gc()
head(traits_zoo) ; dim(traits_zoo)
### Print those plots per variable
vars <- unique(traits_zoo$var) ; vars
v <- "SST"
for(v in vars) {
message(paste("Plotting niche traits ditsribution for ",v, sep = ""))
p1 <- ggplot(aes(x = factor(group), y = center2, fill = factor(group)), data = traits_zoo[traits_zoo$var == v,]) +
geom_boxplot(colour = "black") + scale_fill_brewer(name = "", palette = "Paired") +
xlab("") + ylab(paste(v," niche center",sep="")) + scale_x_discrete(labels = NULL) +
theme_classic()
#
p2 <- ggplot(aes(x = factor(group), y = width2, fill = factor(group)), data = traits_zoo[traits_zoo$var == v,]) +
geom_boxplot(colour = "black") + scale_fill_brewer(name = "", palette = "Paired") +
xlab("") + ylab(paste(v," niche width",sep="")) + scale_x_discrete(labels = NULL) +
theme_classic()
p3 <- ggplot(aes(x = center2, y = width2, fill = factor(group)), data = traits_zoo[traits_zoo$var == v,]) +
geom_point(colour = "black", pch = 21) + scale_fill_brewer(name = "", palette = "Paired") +
ylab(paste(v," niche width",sep="")) + xlab(paste(v," niche center",sep="")) + theme_classic()
p4 <- ggplot(aes(x = factor(group), y = prob.range, fill = factor(group)), data = traits_zoo[traits_zoo$var == v,]) +
geom_boxplot(colour = "black") + scale_fill_brewer(name = "", palette = "Paired") +
xlab("") + ylab(paste("HSI range (importance)",sep="")) + scale_x_discrete(labels = NULL) +
scale_y_continuous(limits = c(0,1)) + theme_classic()
require("ggpubr")
panel <- ggarrange(p1,p2,p3,p4, ncol = 2, nrow = 2, align = "hv")
# panel
setwd(WD)
ggsave(plot = panel, filename = paste("panel_niche_traits_zoo_",v,".jpg", sep = ""), width = 10, height = 6, dpi = 300)
} # eo v in vars
### Plot range if HSI per var and group (facet per vars)
# head(traits_zoo)
# ggplot(aes(x = factor(group), y = weight.pca, fill = factor(group)), data = traits_zoo) +
# geom_boxplot(colour = "black") + scale_fill_brewer(name = "", palette = "Spectral") +
# ylab("Importance in constraining HSI") + xlab("") + theme_classic() +
# scale_x_discrete(labels = NULL) + scale_y_continuous(limits = c(0,1)) +
# facet_wrap(~factor(var), nrow = 2, ncol = 4, scales = "fixed")
### Prepare data for PCA (analysis in multivariate env space)
m_traits_zoo <- melt(traits_zoo, id.vars = c("group","species","var"))
# Create every combination of var x variable in a factor column and dcast
m_traits_zoo$new.col <- factor(paste(m_traits_zoo$var ,m_traits_zoo$variable, sep = "_"))
unique(m_traits_zoo$new.col)
# Dcast to put those new.col as columns (return to wide format)
d_traits_zoo <- dcast(m_traits_zoo[,c(1,2,5,6)], group + species ~ new.col, value.var = "value")
# Select the columns corresponding to: center2, width2 and weight.pca (weights to give to the columns, not the individuals)
cols2keep <- c(colnames(d_traits_zoo)[grep("center2", colnames(d_traits_zoo))],
colnames(d_traits_zoo)[grep("width2", colnames(d_traits_zoo))],
colnames(d_traits_zoo)[grep("weight.pca", colnames(d_traits_zoo))]
) # eo cols2keep
cols2keep
data4pca <- d_traits_zoo[,c("group","species",cols2keep)]
dim(data4pca)
head(data4pca)
### 21/10/2020: Use 'data4pca' to test variantions of niche traits across groups of zooplankton
colnames(data4pca) <- str_replace(as.character(colnames(data4pca)), "center2", "center")
colnames(data4pca) <- str_replace(as.character(colnames(data4pca)), "width2", "width")
# In a lapply, perform non parametric tests of variance (K-W) and return pvalue and Chi2 values
res.tests <- lapply(colnames(data4pca)[c(3:18)], function(v) {
# v <- "SST_center"
test <- kruskal.test(get(v) ~ factor(group), data = data4pca) # str(summary(test))
# test$p.value
return( data.frame(niche.trait = v, Chi2 = test$stat, pval = test$p.value) )
} # eo fun
) # eo lapply
tests <- bind_rows(res.tests) ; rm(res.tests) ; gc()
rownames(tests) <- NULL
tests
### And select those with pval < 0.01
tests[tests$pval <= 0.001,]
### Check how to add weights properly in PCA
vars.weights <- data.frame(traits_zoo %>% group_by(var) %>% summarize(w = mean(weight.pca)) )
vars.weights$w2 <- vars.weights$w/max(vars.weights$w)
#vars.weights[order(vars.weights$w2, decreasing = T),] # OK
### colnames in data4pca and variables in vars.weight$w2 follow the same order, so just double the vars.weight$w2 vector
col.weights <- c(vars.weights$w2,vars.weights$w2) ; col.weights
pca.zoo <- PCA(data4pca[,c(3:18)], scale.unit = T, ncp = 10, graph = F, col.w = col.weights)
summary(pca.zoo)
eig <- data.frame(perc = pca.zoo$eig[,"percentage of variance"], nb = c(1:nrow(pca.zoo$eig)) ) # eig
pca1 <- paste0("PC1 (",floor(eig$perc[1]*100)/100,"%)")
pca2 <- paste0("PC2 (",floor(eig$perc[2]*100)/100,"%)")
pca3 <- paste0("PC3 (",floor(eig$perc[3]*100)/100,"%)")
pca4 <- paste0("PC4 (",floor(eig$perc[4]*100)/100,"%)")
pca5 <- paste0("PC5 (",floor(eig$perc[5]*100)/100,"%)")
# Provide the PC coords to data4pca
data4pca[,paste("PC",c(1:5),sep="")] <- pca.zoo$ind$coord[,c(1:5)]
summary(data4pca)
ggplot(aes(x = factor(group), y = PC1, fill = factor(group)), data = data4pca) +
geom_boxplot(colour = "black") + scale_fill_brewer(name = "", palette = "Paired") +
xlab("") + ylab("PC1") + scale_x_discrete(labels = NULL) +
geom_hline(yintercept = 0, linetype = "dashed") + theme_classic()
#
ggplot(aes(x = factor(group), y = PC2, fill = factor(group)), data = data4pca) +
geom_boxplot(colour = "black") + scale_fill_brewer(name = "", palette = "Paired") +
xlab("") + ylab("PC2") + scale_x_discrete(labels = NULL) +
geom_hline(yintercept = 0, linetype = "dashed") + theme_classic()
#
ggplot(aes(x = factor(group), y = PC3, fill = factor(group)), data = data4pca) +
geom_boxplot(colour = "black") + scale_fill_brewer(name = "", palette = "Paired") +
xlab("") + ylab("PC3") + scale_x_discrete(labels = NULL) +
geom_hline(yintercept = 0, linetype = "dashed") + theme_classic()
#
ggplot(aes(x = factor(group), y = PC4, fill = factor(group)), data = data4pca) +
geom_boxplot(colour = "black") + scale_fill_brewer(name = "", palette = "Paired") +
xlab("") + ylab("PC4") + scale_x_discrete(labels = NULL) +
geom_hline(yintercept = 0, linetype = "dashed") + theme_classic()
### Compute the groups' centroid in PCA space
groups.coords <- data.frame(data4pca %>% group_by(group) %>% summarize(PC1 = mean(PC1), PC2 = mean(PC2), PC3 = mean(PC3), PC4 = mean(PC4)) )
groups.coords
### PC plot: plot the species coordinates in PC space and report the centroid of the 3 groups
ggplot() + geom_point(aes(x = PC1, y = PC2, fill = factor(group)), data = data4pca, pch = 21, colour = "black", alpha = .5) +
scale_fill_brewer(name = "", palette = "Paired") +
geom_point(aes(x = PC1, y = PC2, fill = factor(group)), data = groups.coords, pch = 21, colour = "black", size = 4) +
xlab("PC1") + ylab("PC2") + geom_hline(yintercept = 0, linetype = "dashed") + geom_vline(xintercept = 0, linetype = "dashed") +
theme_classic()
### Same with PC3 and PC4
ggplot() + geom_point(aes(x = PC3, y = PC4, fill = factor(group)), data = data4pca, pch = 21, colour = "black", alpha = .5) +
scale_fill_brewer(name = "", palette = "Paired") +
geom_point(aes(x = PC3, y = PC4, fill = factor(group)), data = groups.coords, pch = 21, colour = "black", size = 4) +
xlab("PC3") + ylab("PC4") + geom_hline(yintercept = 0, linetype = "dashed") + geom_vline(xintercept = 0, linetype = "dashed") +
theme_classic()
###
pcad <- augment.PCA(pca.zoo)
pcad$var # Re-name properly
pcad$var <- str_replace_all(pcad$var, "_", " ")
pcad$var <- str_replace_all(pcad$var, "center2", "center")
pcad$var <- str_replace_all(pcad$var, "width2", "width")
ggplot(pcad) +
coord_fixed() + scale_x_continuous(breaks=0) + scale_y_continuous(breaks=0) +
annotate(geom="path", colour="black", x=cos(seq(0, 2*pi, length.out=100)), y=sin(seq(0, 2*pi, length.out=100)), colour = "grey30") +
geom_segment(aes(x=0, xend = Dim.1, y=0, yend = Dim.2), arrow=arrow(angle=20, length=unit(0.01, "npc"))) +
scale_colour_manual(name = "", values = c("#4d9221","#c51b7d"), guide = F) +
geom_text_repel(aes(x=Dim.1, y=Dim.2, label=var),
data=filter(pcad, (Dim.1^2+Dim.2^2) > 0.2^2), segment.alpha=0.5) +
xlab(pca1) + ylab(pca2) + theme_bw()
#
ggplot(pcad) +
coord_fixed() + scale_x_continuous(breaks=0) + scale_y_continuous(breaks=0) +
annotate(geom="path", colour="black", x=cos(seq(0, 2*pi, length.out=100)), y=sin(seq(0, 2*pi, length.out=100)), colour = "grey30") +
geom_segment(aes(x=0, xend = Dim.2, y=0, yend = Dim.3), arrow=arrow(angle=20, length=unit(0.01,"npc"))) +
scale_colour_manual(name = "", values = c("#4d9221","#c51b7d"), guide = F) +
geom_text_repel(aes(x=Dim.2, y=Dim.3, label = var),
data=filter(pcad, (Dim.2^2+Dim.3^2) > 0.2^2), segment.alpha=0.5) +
xlab(pca3) + ylab(pca4) + theme_bw()
# --------------------------------------------------------------------------------------------------------------------------------
### 21/10/2020:
setwd("/net/kryo/work/fabioben/OVERSEE/data/biology/data_for_group_studies/phytoplankton/group_background/resp_curves/niche_traits/")
files <- dir()[grep("resp_curves_GAM",dir())] # files
# Concatenate all niche traits
require("parallel")
res <- mclapply(files, function(f) {
t <- get(load(f))
return(t)
}, mc.cores = 20
) # eo mclapply - f in files
# Rbind
traits_phy <- bind_rows(res)
rm(res,files) ; gc()
head(traits_phy) ; dim(traits_phy)
### Make sur SST (or another variable) ranges are the same across species
unique(traits_phy$species)
summary(traits_phy[traits_phy$species == "Chaetoceros_laciniosus","SST"])
summary(traits_phy[traits_phy$species == "Tripos_pulchellus","SST"])
|
#' Construct a transmission network from a phylogenetic tree (phylo object)
#' Using the branch lengths of leaves (tips), join leaves (tips) with approximately equal branch lengths
#'
#'
#' @param phylo.tree a phylo object
#' @param epsilon cut-off value as difference between two branch lengths
#' @return a transmission matrix
#' @examples
#' transm.mat <- ConnectNearBy(phylo.tree = tree)
#' @import igraph
#' @import ape
#' @import phytools
ConnectNearBy <- function(phylo.tree = tree, epsilon=0.1/2){
# look on branching time
branch.time <- branching.times(phylo.tree)
# Branch length of the tips
tree <- phylo.tree
# tree$edge.length<-round(tree$edge.length,3)
tree$edge.length<-tree$edge.length # remove rounding
n<-length(tree$tip.label)
ee<-setNames(tree$edge.length[sapply(1:n,function(x,y)
which(y==x),y=tree$edge[,2])],tree$tip.label)
d <- as.data.frame(ee)
branc.leng.tips <- as.data.frame(d$ee) # yes we can find them in phylo.tree$edge.length but here they are not rounded whereas
# with ee are rounded with 3 digit
tips.id <- as.data.frame(as.character(tree$tip.label))
tips.id <- tree$tip.label
tips.branch.leng.raw <- cbind(tips.id,branc.leng.tips)
names(tips.branch.leng.raw) <- c("Ind", "branch.len")
tips.branch.leng <- tips.branch.leng.raw
## Algorithm: SEARCH YOUR LINKS
Ind.g <- vector()
Ind.g.1 <- vector()
Ind.g.2 <- vector()
branch.len.g <- vector()
branch.len.g.1 <- vector()
branch.len.g.2 <- vector()
# | tips.branch.leng$branch.len[i] - tips.branch.leng$branch.len[j] <= epsilon
for(i in 1:nrow(tips.branch.leng)){
for(j in 1:nrow(tips.branch.leng)){
if(tips.branch.leng$branch.len[i]==tips.branch.leng$branch.len[j] | (abs(tips.branch.leng$branch.len[i] - tips.branch.leng$branch.len[j]) <= epsilon) ){
Ind.g <- c(Ind.g,tips.id[i],tips.id[j]) # there are
branch.len.g <- c(branch.len.g,tips.branch.leng$branch.len[i],tips.branch.leng$branch.len[j])
reconst.grap.raw <- as.data.frame(cbind(Ind.g,branch.len.g))
Ind.g.1 <- c(Ind.g.1, tips.id[i])
Ind.g.2 <- c(Ind.g.2, tips.id[j])
branch.len.g.1 <- c(branch.len.g.1, tips.branch.leng$branch.len[i])
branch.len.g.2 <- c(branch.len.g.2, tips.branch.leng$branch.len[j])
graph.trans <- as.data.frame(cbind(Ind.g.1,Ind.g.2,branch.len.g.1,branch.len.g.2))
}
}
}
gaga = as.matrix(graph.trans)
f = graph.edgelist(gaga[,1:2])
g <- simplify(f,remove.loops = TRUE) # remove loops
l <- as.undirected(g, mode = "collapse") # remove bi-directions
#
# V(g)
# E(g)
return(l)
}
| /Misc/ConnectNearBy.R | no_license | niyongabirejunior/arteta | R | false | false | 2,614 | r | #' Construct a transmission network from a phylogenetic tree (phylo object)
#' Using the branch lengths of leaves (tips), join leaves (tips) with approximately equal branch lengths
#'
#'
#' @param phylo.tree a phylo object
#' @param epsilon cut-off value as difference between two branch lengths
#' @return a transmission matrix
#' @examples
#' transm.mat <- ConnectNearBy(phylo.tree = tree)
#' @import igraph
#' @import ape
#' @import phytools
ConnectNearBy <- function(phylo.tree = tree, epsilon=0.1/2){
# look on branching time
branch.time <- branching.times(phylo.tree)
# Branch length of the tips
tree <- phylo.tree
# tree$edge.length<-round(tree$edge.length,3)
tree$edge.length<-tree$edge.length # remove rounding
n<-length(tree$tip.label)
ee<-setNames(tree$edge.length[sapply(1:n,function(x,y)
which(y==x),y=tree$edge[,2])],tree$tip.label)
d <- as.data.frame(ee)
branc.leng.tips <- as.data.frame(d$ee) # yes we can find them in phylo.tree$edge.length but here they are not rounded whereas
# with ee are rounded with 3 digit
tips.id <- as.data.frame(as.character(tree$tip.label))
tips.id <- tree$tip.label
tips.branch.leng.raw <- cbind(tips.id,branc.leng.tips)
names(tips.branch.leng.raw) <- c("Ind", "branch.len")
tips.branch.leng <- tips.branch.leng.raw
## Algorithm: SEARCH YOUR LINKS
Ind.g <- vector()
Ind.g.1 <- vector()
Ind.g.2 <- vector()
branch.len.g <- vector()
branch.len.g.1 <- vector()
branch.len.g.2 <- vector()
# | tips.branch.leng$branch.len[i] - tips.branch.leng$branch.len[j] <= epsilon
for(i in 1:nrow(tips.branch.leng)){
for(j in 1:nrow(tips.branch.leng)){
if(tips.branch.leng$branch.len[i]==tips.branch.leng$branch.len[j] | (abs(tips.branch.leng$branch.len[i] - tips.branch.leng$branch.len[j]) <= epsilon) ){
Ind.g <- c(Ind.g,tips.id[i],tips.id[j]) # there are
branch.len.g <- c(branch.len.g,tips.branch.leng$branch.len[i],tips.branch.leng$branch.len[j])
reconst.grap.raw <- as.data.frame(cbind(Ind.g,branch.len.g))
Ind.g.1 <- c(Ind.g.1, tips.id[i])
Ind.g.2 <- c(Ind.g.2, tips.id[j])
branch.len.g.1 <- c(branch.len.g.1, tips.branch.leng$branch.len[i])
branch.len.g.2 <- c(branch.len.g.2, tips.branch.leng$branch.len[j])
graph.trans <- as.data.frame(cbind(Ind.g.1,Ind.g.2,branch.len.g.1,branch.len.g.2))
}
}
}
gaga = as.matrix(graph.trans)
f = graph.edgelist(gaga[,1:2])
g <- simplify(f,remove.loops = TRUE) # remove loops
l <- as.undirected(g, mode = "collapse") # remove bi-directions
#
# V(g)
# E(g)
return(l)
}
|
library(sn);library(mvtnorm); library(scatterplot3d); library(EnvStats); library(plotly)
library(mdatools); library(fitdistrplus); library(xtable); library(in2extRemes)
library(evd); library(SimCop); library(copula); library(mgpd)
# E2.1, nine para BEV models ====
par(mfrow = c(2,2)); par(mar = rep(2, 4))
abvevd(x = seq(0,1,by=0.01), dep = 0.99, model = "log", plot =T)
abvevd(x = seq(0,1,by=0.01), dep = 0.8, model = "log", plot =T, add = T, lty = 2)
abvevd(x = seq(0,1,by=0.01), dep = 0.4, model = "log", plot =T, add = T, lty = 3)
abvevd(x = seq(0,1,by=0.01), dep = 0.99, asy = c(0.8,0.2), model = "alog", plot =T)
abvevd(x = seq(0,1,by=0.01), dep = 0.8, asy = c(0.8,0.2), model = "alog", plot =T, add = T, lty = 2)
abvevd(x = seq(0,1,by=0.01), dep = 0.4, asy = c(0.8,0.2), model = "alog", plot =T, add = T, lty = 3)
abvevd(x = seq(0,1,by=0.01), dep = 0.99, model = "hr", plot =T)
abvevd(x = seq(0,1,by=0.01), dep = 0.8, model = "hr", plot =T, add = T, lty = 2)
abvevd(x = seq(0,1,by=0.01), dep = 0.4, model = "hr", plot =T, add = T, lty = 3)
abvevd(x = seq(0,1,by=0.01), dep = 0.99, model = "neglog", plot =T)
abvevd(x = seq(0,1,by=0.01), dep = 0.8, model = "neglog", plot =T, add = T, lty = 2)
abvevd(x = seq(0,1,by=0.01), dep = 0.4, model = "neglog", plot =T, add = T, lty = 3)
abvevd(x = seq(0,1,by=0.01), dep = 0.99, asy = c(0.8,0.2), model = "aneglog", plot =T)
abvevd(x = seq(0,1,by=0.01), dep = 0.8, asy = c(0.8,0.2), model = "aneglog", plot =T, add = T, lty = 2)
abvevd(x = seq(0,1,by=0.01), dep = 0.4, asy = c(0.8,0.2), model = "aneglog", plot =T, add = T, lty = 3)
abvevd(x = seq(0,1,by=0.01), alpha = 0.9, beta = 0.5, model = "bilog", plot =T)
abvevd(x = seq(0,1,by=0.01), alpha = 0.5, beta = 0.5, model = "bilog", plot =T, add = T, lty = 2)
abvevd(x = seq(0,1,by=0.01), alpha = 0.1, beta = 0.5, model = "bilog", plot =T, add = T, lty = 3)
abvevd(x = seq(0,1,by=0.01), alpha = 0.9, beta = 0.9, model = "bilog", plot =T)
abvevd(x = seq(0,1,by=0.01), alpha = 0.5, beta = 0.5, model = "bilog", plot =T, add = T, lty = 2)
abvevd(x = seq(0,1,by=0.01), alpha = 0.5, beta = 0.1, model = "bilog", plot =T, add = T, lty = 3)
abvevd(x = seq(0,1,by=0.01), alpha = 0.9, beta = 0.9, model = "negbilog", plot =T)
abvevd(x = seq(0,1,by=0.01), alpha = 0.5, beta = 0.5, model = "negbilog", plot =T, add = T, lty = 2)
abvevd(x = seq(0,1,by=0.01), alpha = 0.5, beta = 0.1, model = "negbilog", plot =T, add = T, lty = 3)
abvevd(x = seq(0,1,by=0.01), alpha = 0.9, beta = 0.9, model = "ct", plot =T)
abvevd(x = seq(0,1,by=0.01), alpha = 0.5, beta = 0.5, model = "ct", plot =T, add = T, lty = 2)
abvevd(x = seq(0,1,by=0.01), alpha = 0.5, beta = 0.1, model = "ct", plot =T, add = T, lty = 3)
abvevd(x = seq(0,1,by=0.01), alpha = 0.9, beta = 0.05, model = "amix", plot =T)
abvevd(x = seq(0,1,by=0.01), alpha = 0.5, beta = 0.2, model = "amix", plot =T, add = T, lty = 2)
abvevd(x = seq(0,1,by=0.01), alpha = 0.5, beta = 0.1, model = "amix", plot =T, add = T, lty = 3)
# dep
# log, alog - asy, hr, neglog, aneglog - asy
# alpha beta
# bilog, negbilog, ct, amix
# Coles-Tawn and Aneglog seems like interesting models so they are choosen.
abvevd(x = seq(0,1,by=0.01), alpha = 0.9, beta = 0.9, model = "ct", plot =T)
abvevd(x = seq(0,1,by=0.01), alpha = 0.5, beta = 0.5, model = "ct", plot =T, add = T, lty = 2)
abvevd(x = seq(0,1,by=0.01), alpha = 0.5, beta = 0.1, model = "ct", plot =T, add = T, lty = 3)
abvevd(x = seq(0,1,by=0.01), dep = 0.99, asy = c(0.8,0.2), model = "aneglog", plot =T)
abvevd(x = seq(0,1,by=0.01), dep = 0.8, asy = c(0.8,0.2), model = "aneglog", plot =T, add = T, lty = 2)
abvevd(x = seq(0,1,by=0.01), dep = 0.4, asy = c(0.8,0.2), model = "aneglog", plot =T, add = T, lty = 3)
# E2.2 Choose 2 and generate 200 observation ====
# CT
par(mfrow = c(1,2)); par(mar = rep(2, 4))
# Varying beta
abvevd(x = seq(0,1,by=0.01), alpha = 0.8, beta = 20, model = "ct", plot =T, main = "A(w) function of beta, alpha = 0.8", col = "blue")
abvevd(x = seq(0,1,by=0.01), alpha = 0.8, beta = 5, model = "ct", plot =T, add = T, lty = 2, col = "black")
abvevd(x = seq(0,1,by=0.01), alpha = 0.8, beta = 0.7, model = "ct", plot =T, add = T, lty = 3, col = "red")
abvevd(x = seq(0,1,by=0.01), alpha = 0.8, beta = 0.05, model = "ct", plot =T, add = T, lty = 4, col = "green")
legend("bottomright", legend=c("beta = 20", "beta = 5", "beta = 0.7", "beta = 0.5"), col=c("blue", "black", "red", "green"), lty=1:2, cex=0.8)
# Varying alpha
abvevd(x = seq(0,1,by=0.01), alpha = 20, beta = 0.8, model = "ct", plot =T, main = "A(w) function of alpha, beta = 0.8", col = "blue")
abvevd(x = seq(0,1,by=0.01), alpha = 5, beta = 0.8, model = "ct", plot =T, add = T, lty = 2, col = "black")
abvevd(x = seq(0,1,by=0.01), alpha = 0.7, beta = 0.8, model = "ct", plot =T, add = T, lty = 3, col = "red")
abvevd(x = seq(0,1,by=0.01), alpha = 0.05, beta = 0.8, model = "ct", plot =T, add = T, lty = 4, col = "green")
legend("bottomright", legend=c("alpha = 20", "alpha = 5", "alpha = 0.7", "alpha = 0.5"), col=c("blue", "black", "red", "green"), lty=1:2, cex=0.8)
# Aneglog
par(mfrow = c(1,3)); par(mar = rep(2, 4))
# Varying dep
abvevd(x = seq(0,1,by=0.01), dep = 1, asy = c(0.8,0.8), mod = "aneglog", plot =T, main = "A(w) function of dep, asy1, asy2 = 0.8", col = "blue")
abvevd(x = seq(0,1,by=0.01), dep = 0.66, asy = c(0.8,0.8), mod = "aneglog", plot =T, add = T, lty = 2, col = "black")
abvevd(x = seq(0,1,by=0.01), dep = 0.33, asy = c(0.8,0.8), mod = "aneglog", plot =T, add = T, lty = 3, col = "red")
abvevd(x = seq(0,1,by=0.01), dep = 0.01, asy = c(0.8,0.8), mod = "aneglog", plot =T, add = T, lty = 4, col = "green")
legend("bottomright", legend=c("dep = 1", "dep = 0.66", "dep = 0.33", "dep = 0.01"), col=c("blue", "black", "red", "green"), lty=1:2, cex=1)
# Varying asy2
abvevd(x = seq(0,1,by=0.01), dep = 0.5, asy = c(0.8,1), mod = "aneglog", plot =T, main = "A(w) function of asy2, dep = 0.5 asy1 = 0.8", col = "blue")
abvevd(x = seq(0,1,by=0.01), dep = 0.5, asy = c(0.8,0.66), mod = "aneglog", plot =T, add = T, lty = 2, col = "black")
abvevd(x = seq(0,1,by=0.01), dep = 0.5, asy = c(0.8,0.33), mod = "aneglog", plot =T, add = T, lty = 3, col = "red")
abvevd(x = seq(0,1,by=0.01), dep = 0.5, asy = c(0.8,0.01), mod = "aneglog", plot =T, add = T, lty = 4, col = "green")
legend("bottomright", legend=c("asy2 = 1", "asy2 = 0.66", "asy2 = 0.33", "asy2 = 0.01"), col=c("blue", "black", "red", "green"), lty=1:2, cex=1)
# Varying asy1
abvevd(x = seq(0,1,by=0.01), dep = 0.5, asy = c(1,0.8), mod = "aneglog", plot =T, main = "A(w) function of asy1, dep = 0.5 asy2 = 0.8", col = "blue")
abvevd(x = seq(0,1,by=0.01), dep = 0.5, asy = c(0.66,0.8), mod = "aneglog", add = T, lty = 2, col = "black")
abvevd(x = seq(0,1,by=0.01), dep = 0.5, asy = c(0.33,0.8), mod = "aneglog", plot =T, add = T, lty = 3, col = "red")
abvevd(x = seq(0,1,by=0.01), dep = 0.5, asy = c(0.01,0.8), mod = "aneglog", plot =T, add = T, lty = 4, col = "green")
legend("bottomright", legend=c("asy1 = 1", "asy1 = 0.66", "asy1 = 0.33", "asy1 = 0.01"), col=c("blue", "black", "red", "green"), lty=1:2, cex=1)
n = 200
r.ct_1 <- rbvevd(n, alpha = 0.8, beta = 20, mod = "ct")
r.ct_2 <- rbvevd(n, alpha = 0.8, beta = 5, mod = "ct")
r.ct_3 <- rbvevd(n, alpha = 0.8, beta = 0.7, mod = "ct")
r.ct_4 <- rbvevd(n, alpha = 0.8, beta = 0.05, mod = "ct")
par(mfrow = c(2,2)); par(mar = rep(2, 4))
plot(r.ct_1, main = "Coles-Tawn model, a = 0.8, b = 20", col = "black", cex = 0.5)
plot(r.ct_2, main = "CT, a = 0.8, b = 5", col = "black", cex = 0.5)
plot(r.ct_3, main = "CT, a = 0.8, b = 0.7", col = "black", cex = 0.5)
plot(r.ct_4, main = "CT, a = 0.8, b = 0.05", col = "black", cex = 0.5)
# Aneglog
abvevd(x = seq(0,1,by=0.01), dep = 0.99, asy = c(0.8,0.2), model = "aneglog", plot =T)
r.anlo_1 <- rbvevd(n, dep = 0.5, asy = c(0.8,0.2), mod = "aneglog")
r.anlo_2 <- rbvevd(n, dep = 5, asy = c(0.8,0.2), mod = "aneglog")
r.anlo_3 <- rbvevd(n, dep = 50, asy = c(0.8,0.2), mod = "aneglog")
r.anlo_4 <- rbvevd(n, dep = 500, asy = c(0.8,0.2), mod = "aneglog")
par(mfrow = c(2,2)); par(mar = rep(2, 4))
plot(r.anlo_1, main = " dep = 0.5, asy = c(0.8,0.2)", col = "black", cex = 0.5)
plot(r.anlo_2, main = " dep = 5, asy = c(0.8,0.2)", col = "black", cex = 0.5)
plot(r.anlo_3, main = " dep = 50, asy = c(0.8,0.2)", col = "black", cex = 0.5)
plot(r.anlo_4, main = " dep = 500, asy = c(0.8,0.2)", col = "black", cex = 0.5)
cor(r.ct_1, method = "pearson")
cor(r.ct_2, method = "pearson")
cor(r.ct_3, method = "pearson")
cor(r.ct_4, method = "pearson")
cor(r.ct_1, method = "kendall")
cor(r.ct_2, method = "kendall")
cor(r.ct_3, method = "kendall")
cor(r.ct_4, method = "kendall")
cor(r.ct_1, method = "spearman")
cor(r.ct_2, method = "spearman")
cor(r.ct_3, method = "spearman")
cor(r.ct_4, method = "spearman")
cor(r.anlo_1, method = "pearson")
cor(r.anlo_2, method = "pearson")
cor(r.anlo_3, method = "pearson")
cor(r.anlo_4, method = "pearson")
cor(r.anlo_1, method = "kendall")
cor(r.anlo_2, method = "kendall")
cor(r.anlo_3, method = "kendall")
cor(r.anlo_4, method = "kendall")
cor(r.anlo_1, method = "spearman")
cor(r.anlo_2, method = "spearman")
cor(r.anlo_3, method = "spearman")
cor(r.anlo_4, method = "spearman")
# E2.4 ==== Generate from bivariate asymmetric logistic copula
# r > 1, th > 0, 1 > phi > 0
n = 200
cop_1 <- NewBEVAsyLogisticCopula(r = 2,theta=1, phi= 0.5)
cop_2 <- NewBEVAsyLogisticCopula(r = 20,theta=1, phi= 0.5)
cop_3 <- NewBEVAsyLogisticCopula(r = 2000,theta=1, phi= 0.5)
cop_4 <- NewBEVAsyLogisticCopula(r = 2,theta=0.5, phi= 0.5)
cop_5 <- NewBEVAsyLogisticCopula(r = 2,theta=5, phi= 0.5)
cop_6 <- NewBEVAsyLogisticCopula(r = 2,theta=500, phi= 0.5)
cop_7 <- NewBEVAsyLogisticCopula(r = 2,theta=1, phi= 0)
cop_8 <- NewBEVAsyLogisticCopula(r = 2,theta=1, phi= 0.5)
cop_9 <- NewBEVAsyLogisticCopula(r = 2,theta=1, phi= 1)
approx_1 <- GetApprox(cop_1)
approx_2 <- GetApprox(cop_2)
approx_3 <- GetApprox(cop_3)
approx_4 <- GetApprox(cop_4)
approx_5 <- GetApprox(cop_5)
approx_6 <- GetApprox(cop_6)
approx_7 <- GetApprox(cop_7)
approx_8 <- GetApprox(cop_8)
approx_9 <- GetApprox(cop_9)
sample_1 <- GenerateRV(approx_1, n)
sample_2 <- GenerateRV(approx_2, n)
sample_3 <- GenerateRV(approx_3, n)
sample_4 <- GenerateRV(approx_4, n)
sample_5 <- GenerateRV(approx_5, n)
sample_6 <- GenerateRV(approx_6, n)
sample_7 <- GenerateRV(approx_7, n)
sample_8 <- GenerateRV(approx_8, n)
sample_9 <- GenerateRV(approx_9, n)
par(mfrow = c(3,3)); par(mar = rep(2, 4))
plot(sample_1)
plot(sample_2)
plot(sample_3)
plot(sample_4)
plot(sample_5)
plot(sample_6)
plot(sample_7)
plot(sample_8)
plot(sample_9)
# E3 Preprocessing ====
source("fremantle.R.txt")
source("portpirie.R.txt")
max(fremantle[,2])
max(portpirie[,2])
slMerged <- merge(fremantle, portpirie, by = "Year")
sl <- data.frame(year = slMerged[,1], fremantle=slMerged[,2], Portpirie = slMerged[,4])
max(sl[,2]); max(sl[,3])
# Scatter plots
par(mfrow = c(1,2))#; par(mar = rep(2, 4))
plot(sl[,1], sl[,2], xlab = "year", ylab = "Sea level [M]") # Fremantle
abline(h = 1.7)
plot(sl[,1], sl[,3], xlab = "year", ylab = "Sea level [M]") # Portpirie
abline(h = 4.2)
# FML
M1 <- fbvevd(sl[,2:3], model = "ct")
M2 <- fbvevd(sl[,2:3], model = "aneglog")
M3 <- fbvevd(sl[,2:3], model = "bilog")
round(cbind(Estimates = fitted(M1), StandardErrors = std.errors(M1)),3)
round(cbind(Estimates = fitted(M2), StandardErrors = std.errors(M2)),3)
round(cbind(Estimates = fitted(M3), StandardErrors = std.errors(M3)),3)
# IFM - Estimate parameters Marginal distributions.
f1 <- fgev(sl[,2])
f2 <- fgev(sl[,3])
f1$est
f2$est
# IFM - Estimate parameters in dependence function.
M1.IFM <- fbvevd(sl[,2:3], model = "ct", loc1 = f1$est[1], scale1 = f1$est[2],
shape1 = f1$est[3], loc2 = f2$est[1], scale2 = f2$est[2], shape2 = f2$est[3])
M2.IFM <- fbvevd(sl[,2:3], model = "aneglog", loc1 = f1$est[1], scale1 = f1$est[2],
shape1 = f1$est[3], loc2 = f2$est[1], scale2 = f2$est[2], shape2 = f2$est[3])
M3.IFM <- fbvevd(sl[,2:3], model = "bilog", loc1 = f1$est[1], scale1 = f1$est[2],
shape1 = f1$est[3], loc2 = f2$est[1], scale2 = f2$est[2], shape2 = f2$est[3])
# Evaluation of parametric models.
all.AIC <- AIC(M1,M2,M3, M1.IFM, M2.IFM,M3.IFM)
all.AIC[min(all.AIC[,2]) == all.AIC[,2],]
# Non-parametric fit
N1 <- abvnonpar(data = sl[,2:3], epmar = F, method = "cfg", convex = F, plot =T, col = "blue",
main = "Dependence function estimation using cfg")
N2 <- abvnonpar(data = sl[,2:3], epmar = F, method = "cfg", convex = T, add = T, col = "black")
N3 <- abvnonpar(data = sl[,2:3], epmar = T, method = "cfg", convex = F, add = T, col = "red")
N4 <- abvnonpar(data = sl[,2:3], epmar = T, method = "cfg", convex = T, add = T, col = "green")
legend("bottomright", legend=c("Parametric transformation","Convex hull of parametric transformation", "Empiric transformation", "Convex hull of Empiric transformation"), col=c("blue", "black", "red", "green"), lty=1:4, cex=1.0)
# E3.6 and 3.7 Est prob ====
# Parametric estimation of probabilities
sl_am <- as.matrix(sl[,2:3])
# M3 <- fbvevd(sl_am, model = "bilog")
# M3$estimate
mar1 = c(f1$e[1],f1$e[2],f1$e[3])
mar2 = c(f2$e[1],f2$e[2],f2$e[3])
a <- M3.IFM$e[1]
b <- M3.IFM$e[2]
# a <- M1.IFM$e[1]
# b <- M1.IFM$e[2]
# P1
x <- c(30,4.2)
p_CD <- pbvevd(q = x, alpha = a, beta = b, model = "bilog",
mar1 = mar1, mar2 = mar2)
x <- c(1.7,50)
p_BD <- pbvevd(q = x, alpha = a, beta = b, model = "bilog",
mar1 = mar1, mar2 = mar2)
x <- c(1.7,4.2)
p_D <- pbvevd(q = x, alpha = a, beta = b, model = "bilog",
mar1 = mar1, mar2 = mar2)
p_A = 1 - p_CD - p_BD + p_D
round(p_A,4)
# P2
x <- c(30,4.4)
p_CD <- pbvevd(q = x, alpha = a, beta = b, model = "bilog",
mar1 = mar1, mar2 = mar2)
x <- c(1.8,50)
p_BD <- pbvevd(q = x, alpha = a, beta = b, model = "bilog",
mar1 = mar1, mar2 = mar2)
x <- c(1.8,4.4)
p_D <- pbvevd(q = x, alpha = a, beta = b, model = "bilog",
mar1 = mar1, mar2 = mar2)
p_A = 1 - p_CD - p_BD + p_D
round(p_A,5)
# P3
x <- c(1.478,3.85)
p_14_38 <- pbvevd(q = x, alpha = a, beta = b, model = "bilog",
mar1 = mar1, mar2 = mar2)
p_14_38 = 1 - p_14_38
round(p_14_38,4)
# Parametric estimation of p4
mar1 = c(f1$e[1],f1$e[2],f1$e[3])
mar2 = c(f2$e[1],f2$e[2],f2$e[3])
a <- M3.IFM$e[1]
b <- M3.IFM$e[2]
# a <- M1.IFM$e[1]
# b <- M1.IFM$e[2]
x <- c(1.478,3.850)
p_D_1 <- pbvevd(q = x, alpha = a, beta = b, model = "bilog",
mar1 = mar1, mar2 = mar2)
p_D_1
x <- c(1.95,4.8)
p_D_2 <- pbvevd(q = x, alpha = b, beta = b, model = "bilog",
mar1 = mar1, mar2 = mar2)
p_D_2
p = ((1-p_D_1) - (1-p_D_2))/(1-p_D_1)
p
# 3.6 and 3.7 Non-Parametric estimation.
# Parametric transformation
# Estimatation of p1
y1 = c(3, 1.7, 1.7)
y2 = c(4.2, 5, 4.2)
G1 <- pgev(y1, mar1[1], mar1[2], mar1[3])
G2 <- pgev(y2, mar2[1], mar2[2], mar2[3])
p <- exp(log(G1*G2)*N2$y[ceiling(log(G2)/log(G1*G2)*100)])
p1 = 1 + p[3] - p[1] - p[2]
round(p1,4)
# Estimation of p2
y1 = c(3, 1.8, 1.8)
y2 = c(4.4, 5, 4.4)
G1 <- pgev(y1, mar1[1], mar1[2], mar1[3])
G2 <- pgev(y2, mar2[1], mar2[2], mar2[3])
p <- exp(log(G1*G2)*N2$y[ceiling(log(G2)/log(G1*G2)*100)])
p2 = 1 + p[3] - p[1] - p[2]
round(p2,4)
# Estimation of p3
y1 = 1.478
y2 = 3.85
G1 <- pgev(y1, mar1[1], mar1[2], mar1[3])
G2 <- pgev(y2, mar2[1], mar2[2], mar2[3])
p <- exp(log(G1*G2)*N2$y[ceiling(log(G2)/log(G1*G2)*100)])
p3 <- 1-p
round(p3,3)
# Estimation of p4
y1 = 1.478
y2 = 3.85
G1 <- pgev(y1, mar1[1], mar1[2], mar1[3])
G2 <- pgev(y2, mar2[1], mar2[2], mar2[3])
p_D_1 <- exp(log(G1*G2)*N2$y[ceiling(log(G2)/log(G1*G2)*100)])
y1 = 1.95
y2 = 4.8
G1 <- pgev(y1, mar1[1], mar1[2], mar1[3])
G2 <- pgev(y2, mar2[1], mar2[2], mar2[3])
p_D_2 <- exp(log(G1*G2)*N2$y[ceiling(log(G2)/log(G1*G2)*100)])
p4 = ((1-p_D_1) - (1-p_D_2))/(1-p_D_1)
p4
# Empirical transformation
# Estimatation of p1
y1 = c(3, 1.7, 1.7)
y2 = c(4.2, 5, 4.2)
G1 <- pgev(y1, mar1[1], mar1[2], mar1[3])
G2 <- pgev(y2, mar2[1], mar2[2], mar2[3])
p <- exp(log(G1*G2)*N4$y[ceiling(log(G2)/log(G1*G2)*100)])
p1 = 1 + p[3] - p[1] - p[2]
round(p1,4)
# Estimation of p2
y1 = c(3, 1.8, 1.8)
y2 = c(4.4, 5, 4.4)
G1 <- pgev(y1, mar1[1], mar1[2], mar1[3])
G2 <- pgev(y2, mar2[1], mar2[2], mar2[3])
p <- exp(log(G1*G2)*N4$y[ceiling(log(G2)/log(G1*G2)*100)])
p2 = 1 + p[3] - p[1] - p[2]
round(p2,4)
# Estimation of p3
y1 = 1.478
y2 = 3.85
G1 <- pgev(y1, mar1[1], mar1[2], mar1[3])
G2 <- pgev(y2, mar2[1], mar2[2], mar2[3])
p <- exp(log(G1*G2)*N4$y[ceiling(log(G2)/log(G1*G2)*100)])
p3 <- 1-p
round(p3,3)
# Estimation of p4
y1 = 1.478
y2 = 3.85
G1 <- pgev(y1, mar1[1], mar1[2], mar1[3])
G2 <- pgev(y2, mar2[1], mar2[2], mar2[3])
p_D_1 <- exp(log(G1*G2)*N4$y[ceiling(log(G2)/log(G1*G2)*100)])
y1 = 1.95
y2 = 4.8
G1 <- pgev(y1, mar1[1], mar1[2], mar1[3])
G2 <- pgev(y2, mar2[1], mar2[2], mar2[3])
p_D_2 <- exp(log(G1*G2)*N4$y[ceiling(log(G2)/log(G1*G2)*100)])
p4 = ((1-p_D_1) - (1-p_D_2))/(1-p_D_1)
p4
# E3.8 plotting ====
# Creat rbvevd object
par(mfrow = c(1,2)); par(mar = rep(2, 4))
n = 63
bvdata <- rbvevd(n, alpha = a, beta = b, model = "bilog",
mar1 = mar1, mar2 = mar2)
M1 <- fbvevd(bvdata, model = "bilog")
plot(M1, which = 5, p = c(0.75, 0.9, 0.95), col = "blue", xlim = c(1.2,2), ylim = c(3.5,5))
plot(M2, which = 5, p = c(0.75, 0.9, 0.95), col ="green", xlim = c(1.2,2), ylim = c(3.5,5))
# E3.9 Linear trend ====
slMerged <- merge(fremantle, portpirie, by = "Year")
sl_all <- data.frame(year = slMerged[,1], fremantle=slMerged[,2], SOI = slMerged[,3], Portpirie = slMerged[,4])
# Normalize year
sl_all[,1] = 2*(sl_all[,1]-sl_all[1,1])/(sl_all[63,1]-sl_all[1,1]) -1
M_trend.IFM <- fbvevd(sl[,2:3], model = "bilog", nsloc1 = sl_all[,1], nsloc2 = sl_all[,3], loc1 = f1$est[1], scale1 = f1$est[2],
shape1 = f1$est[3], loc2 = f2$est[1], scale2 = f2$est[2], shape2 = f2$est[3])
M_trend_FML <- fbvevd(sl[,2:3], nsloc1 = sl_all[,1], nsloc2 = sl_all[,3], model = "bilog")
anova(M_trend_FML, M_trend.IFM)
# To be compared to M3.IFM and M3
x = logLik(M_trend_FML)
y = logLik(M3)
lr.test(x = x, y = y, alpha = 0.05, df = 2)
y = logLik(M_trend.IFM)
x = logLik(M3.IFM)
lr.test(x = x, y = y, alpha = 0.05, df = 2) | /CA3_2.R | no_license | oscarmandley/Extreme-Value-Theory | R | false | false | 18,643 | r | library(sn);library(mvtnorm); library(scatterplot3d); library(EnvStats); library(plotly)
library(mdatools); library(fitdistrplus); library(xtable); library(in2extRemes)
library(evd); library(SimCop); library(copula); library(mgpd)
# E2.1, nine para BEV models ====
par(mfrow = c(2,2)); par(mar = rep(2, 4))
abvevd(x = seq(0,1,by=0.01), dep = 0.99, model = "log", plot =T)
abvevd(x = seq(0,1,by=0.01), dep = 0.8, model = "log", plot =T, add = T, lty = 2)
abvevd(x = seq(0,1,by=0.01), dep = 0.4, model = "log", plot =T, add = T, lty = 3)
abvevd(x = seq(0,1,by=0.01), dep = 0.99, asy = c(0.8,0.2), model = "alog", plot =T)
abvevd(x = seq(0,1,by=0.01), dep = 0.8, asy = c(0.8,0.2), model = "alog", plot =T, add = T, lty = 2)
abvevd(x = seq(0,1,by=0.01), dep = 0.4, asy = c(0.8,0.2), model = "alog", plot =T, add = T, lty = 3)
abvevd(x = seq(0,1,by=0.01), dep = 0.99, model = "hr", plot =T)
abvevd(x = seq(0,1,by=0.01), dep = 0.8, model = "hr", plot =T, add = T, lty = 2)
abvevd(x = seq(0,1,by=0.01), dep = 0.4, model = "hr", plot =T, add = T, lty = 3)
abvevd(x = seq(0,1,by=0.01), dep = 0.99, model = "neglog", plot =T)
abvevd(x = seq(0,1,by=0.01), dep = 0.8, model = "neglog", plot =T, add = T, lty = 2)
abvevd(x = seq(0,1,by=0.01), dep = 0.4, model = "neglog", plot =T, add = T, lty = 3)
abvevd(x = seq(0,1,by=0.01), dep = 0.99, asy = c(0.8,0.2), model = "aneglog", plot =T)
abvevd(x = seq(0,1,by=0.01), dep = 0.8, asy = c(0.8,0.2), model = "aneglog", plot =T, add = T, lty = 2)
abvevd(x = seq(0,1,by=0.01), dep = 0.4, asy = c(0.8,0.2), model = "aneglog", plot =T, add = T, lty = 3)
abvevd(x = seq(0,1,by=0.01), alpha = 0.9, beta = 0.5, model = "bilog", plot =T)
abvevd(x = seq(0,1,by=0.01), alpha = 0.5, beta = 0.5, model = "bilog", plot =T, add = T, lty = 2)
abvevd(x = seq(0,1,by=0.01), alpha = 0.1, beta = 0.5, model = "bilog", plot =T, add = T, lty = 3)
abvevd(x = seq(0,1,by=0.01), alpha = 0.9, beta = 0.9, model = "bilog", plot =T)
abvevd(x = seq(0,1,by=0.01), alpha = 0.5, beta = 0.5, model = "bilog", plot =T, add = T, lty = 2)
abvevd(x = seq(0,1,by=0.01), alpha = 0.5, beta = 0.1, model = "bilog", plot =T, add = T, lty = 3)
abvevd(x = seq(0,1,by=0.01), alpha = 0.9, beta = 0.9, model = "negbilog", plot =T)
abvevd(x = seq(0,1,by=0.01), alpha = 0.5, beta = 0.5, model = "negbilog", plot =T, add = T, lty = 2)
abvevd(x = seq(0,1,by=0.01), alpha = 0.5, beta = 0.1, model = "negbilog", plot =T, add = T, lty = 3)
abvevd(x = seq(0,1,by=0.01), alpha = 0.9, beta = 0.9, model = "ct", plot =T)
abvevd(x = seq(0,1,by=0.01), alpha = 0.5, beta = 0.5, model = "ct", plot =T, add = T, lty = 2)
abvevd(x = seq(0,1,by=0.01), alpha = 0.5, beta = 0.1, model = "ct", plot =T, add = T, lty = 3)
abvevd(x = seq(0,1,by=0.01), alpha = 0.9, beta = 0.05, model = "amix", plot =T)
abvevd(x = seq(0,1,by=0.01), alpha = 0.5, beta = 0.2, model = "amix", plot =T, add = T, lty = 2)
abvevd(x = seq(0,1,by=0.01), alpha = 0.5, beta = 0.1, model = "amix", plot =T, add = T, lty = 3)
# dep
# log, alog - asy, hr, neglog, aneglog - asy
# alpha beta
# bilog, negbilog, ct, amix
# Coles-Tawn and Aneglog seems like interesting models so they are choosen.
abvevd(x = seq(0,1,by=0.01), alpha = 0.9, beta = 0.9, model = "ct", plot =T)
abvevd(x = seq(0,1,by=0.01), alpha = 0.5, beta = 0.5, model = "ct", plot =T, add = T, lty = 2)
abvevd(x = seq(0,1,by=0.01), alpha = 0.5, beta = 0.1, model = "ct", plot =T, add = T, lty = 3)
abvevd(x = seq(0,1,by=0.01), dep = 0.99, asy = c(0.8,0.2), model = "aneglog", plot =T)
abvevd(x = seq(0,1,by=0.01), dep = 0.8, asy = c(0.8,0.2), model = "aneglog", plot =T, add = T, lty = 2)
abvevd(x = seq(0,1,by=0.01), dep = 0.4, asy = c(0.8,0.2), model = "aneglog", plot =T, add = T, lty = 3)
# E2.2 Choose 2 and generate 200 observation ====
# CT
par(mfrow = c(1,2)); par(mar = rep(2, 4))
# Varying beta
abvevd(x = seq(0,1,by=0.01), alpha = 0.8, beta = 20, model = "ct", plot =T, main = "A(w) function of beta, alpha = 0.8", col = "blue")
abvevd(x = seq(0,1,by=0.01), alpha = 0.8, beta = 5, model = "ct", plot =T, add = T, lty = 2, col = "black")
abvevd(x = seq(0,1,by=0.01), alpha = 0.8, beta = 0.7, model = "ct", plot =T, add = T, lty = 3, col = "red")
abvevd(x = seq(0,1,by=0.01), alpha = 0.8, beta = 0.05, model = "ct", plot =T, add = T, lty = 4, col = "green")
legend("bottomright", legend=c("beta = 20", "beta = 5", "beta = 0.7", "beta = 0.5"), col=c("blue", "black", "red", "green"), lty=1:2, cex=0.8)
# Varying alpha
abvevd(x = seq(0,1,by=0.01), alpha = 20, beta = 0.8, model = "ct", plot =T, main = "A(w) function of alpha, beta = 0.8", col = "blue")
abvevd(x = seq(0,1,by=0.01), alpha = 5, beta = 0.8, model = "ct", plot =T, add = T, lty = 2, col = "black")
abvevd(x = seq(0,1,by=0.01), alpha = 0.7, beta = 0.8, model = "ct", plot =T, add = T, lty = 3, col = "red")
abvevd(x = seq(0,1,by=0.01), alpha = 0.05, beta = 0.8, model = "ct", plot =T, add = T, lty = 4, col = "green")
legend("bottomright", legend=c("alpha = 20", "alpha = 5", "alpha = 0.7", "alpha = 0.5"), col=c("blue", "black", "red", "green"), lty=1:2, cex=0.8)
# Aneglog
par(mfrow = c(1,3)); par(mar = rep(2, 4))
# Varying dep
abvevd(x = seq(0,1,by=0.01), dep = 1, asy = c(0.8,0.8), mod = "aneglog", plot =T, main = "A(w) function of dep, asy1, asy2 = 0.8", col = "blue")
abvevd(x = seq(0,1,by=0.01), dep = 0.66, asy = c(0.8,0.8), mod = "aneglog", plot =T, add = T, lty = 2, col = "black")
abvevd(x = seq(0,1,by=0.01), dep = 0.33, asy = c(0.8,0.8), mod = "aneglog", plot =T, add = T, lty = 3, col = "red")
abvevd(x = seq(0,1,by=0.01), dep = 0.01, asy = c(0.8,0.8), mod = "aneglog", plot =T, add = T, lty = 4, col = "green")
legend("bottomright", legend=c("dep = 1", "dep = 0.66", "dep = 0.33", "dep = 0.01"), col=c("blue", "black", "red", "green"), lty=1:2, cex=1)
# Varying asy2
abvevd(x = seq(0,1,by=0.01), dep = 0.5, asy = c(0.8,1), mod = "aneglog", plot =T, main = "A(w) function of asy2, dep = 0.5 asy1 = 0.8", col = "blue")
abvevd(x = seq(0,1,by=0.01), dep = 0.5, asy = c(0.8,0.66), mod = "aneglog", plot =T, add = T, lty = 2, col = "black")
abvevd(x = seq(0,1,by=0.01), dep = 0.5, asy = c(0.8,0.33), mod = "aneglog", plot =T, add = T, lty = 3, col = "red")
abvevd(x = seq(0,1,by=0.01), dep = 0.5, asy = c(0.8,0.01), mod = "aneglog", plot =T, add = T, lty = 4, col = "green")
legend("bottomright", legend=c("asy2 = 1", "asy2 = 0.66", "asy2 = 0.33", "asy2 = 0.01"), col=c("blue", "black", "red", "green"), lty=1:2, cex=1)
# Varying asy1
abvevd(x = seq(0,1,by=0.01), dep = 0.5, asy = c(1,0.8), mod = "aneglog", plot =T, main = "A(w) function of asy1, dep = 0.5 asy2 = 0.8", col = "blue")
abvevd(x = seq(0,1,by=0.01), dep = 0.5, asy = c(0.66,0.8), mod = "aneglog", add = T, lty = 2, col = "black")
abvevd(x = seq(0,1,by=0.01), dep = 0.5, asy = c(0.33,0.8), mod = "aneglog", plot =T, add = T, lty = 3, col = "red")
abvevd(x = seq(0,1,by=0.01), dep = 0.5, asy = c(0.01,0.8), mod = "aneglog", plot =T, add = T, lty = 4, col = "green")
legend("bottomright", legend=c("asy1 = 1", "asy1 = 0.66", "asy1 = 0.33", "asy1 = 0.01"), col=c("blue", "black", "red", "green"), lty=1:2, cex=1)
n = 200
r.ct_1 <- rbvevd(n, alpha = 0.8, beta = 20, mod = "ct")
r.ct_2 <- rbvevd(n, alpha = 0.8, beta = 5, mod = "ct")
r.ct_3 <- rbvevd(n, alpha = 0.8, beta = 0.7, mod = "ct")
r.ct_4 <- rbvevd(n, alpha = 0.8, beta = 0.05, mod = "ct")
par(mfrow = c(2,2)); par(mar = rep(2, 4))
plot(r.ct_1, main = "Coles-Tawn model, a = 0.8, b = 20", col = "black", cex = 0.5)
plot(r.ct_2, main = "CT, a = 0.8, b = 5", col = "black", cex = 0.5)
plot(r.ct_3, main = "CT, a = 0.8, b = 0.7", col = "black", cex = 0.5)
plot(r.ct_4, main = "CT, a = 0.8, b = 0.05", col = "black", cex = 0.5)
# Aneglog
abvevd(x = seq(0,1,by=0.01), dep = 0.99, asy = c(0.8,0.2), model = "aneglog", plot =T)
r.anlo_1 <- rbvevd(n, dep = 0.5, asy = c(0.8,0.2), mod = "aneglog")
r.anlo_2 <- rbvevd(n, dep = 5, asy = c(0.8,0.2), mod = "aneglog")
r.anlo_3 <- rbvevd(n, dep = 50, asy = c(0.8,0.2), mod = "aneglog")
r.anlo_4 <- rbvevd(n, dep = 500, asy = c(0.8,0.2), mod = "aneglog")
par(mfrow = c(2,2)); par(mar = rep(2, 4))
plot(r.anlo_1, main = " dep = 0.5, asy = c(0.8,0.2)", col = "black", cex = 0.5)
plot(r.anlo_2, main = " dep = 5, asy = c(0.8,0.2)", col = "black", cex = 0.5)
plot(r.anlo_3, main = " dep = 50, asy = c(0.8,0.2)", col = "black", cex = 0.5)
plot(r.anlo_4, main = " dep = 500, asy = c(0.8,0.2)", col = "black", cex = 0.5)
cor(r.ct_1, method = "pearson")
cor(r.ct_2, method = "pearson")
cor(r.ct_3, method = "pearson")
cor(r.ct_4, method = "pearson")
cor(r.ct_1, method = "kendall")
cor(r.ct_2, method = "kendall")
cor(r.ct_3, method = "kendall")
cor(r.ct_4, method = "kendall")
cor(r.ct_1, method = "spearman")
cor(r.ct_2, method = "spearman")
cor(r.ct_3, method = "spearman")
cor(r.ct_4, method = "spearman")
cor(r.anlo_1, method = "pearson")
cor(r.anlo_2, method = "pearson")
cor(r.anlo_3, method = "pearson")
cor(r.anlo_4, method = "pearson")
cor(r.anlo_1, method = "kendall")
cor(r.anlo_2, method = "kendall")
cor(r.anlo_3, method = "kendall")
cor(r.anlo_4, method = "kendall")
cor(r.anlo_1, method = "spearman")
cor(r.anlo_2, method = "spearman")
cor(r.anlo_3, method = "spearman")
cor(r.anlo_4, method = "spearman")
# E2.4 ==== Generate from bivariate asymmetric logistic copula
# r > 1, th > 0, 1 > phi > 0
n = 200
cop_1 <- NewBEVAsyLogisticCopula(r = 2,theta=1, phi= 0.5)
cop_2 <- NewBEVAsyLogisticCopula(r = 20,theta=1, phi= 0.5)
cop_3 <- NewBEVAsyLogisticCopula(r = 2000,theta=1, phi= 0.5)
cop_4 <- NewBEVAsyLogisticCopula(r = 2,theta=0.5, phi= 0.5)
cop_5 <- NewBEVAsyLogisticCopula(r = 2,theta=5, phi= 0.5)
cop_6 <- NewBEVAsyLogisticCopula(r = 2,theta=500, phi= 0.5)
cop_7 <- NewBEVAsyLogisticCopula(r = 2,theta=1, phi= 0)
cop_8 <- NewBEVAsyLogisticCopula(r = 2,theta=1, phi= 0.5)
cop_9 <- NewBEVAsyLogisticCopula(r = 2,theta=1, phi= 1)
approx_1 <- GetApprox(cop_1)
approx_2 <- GetApprox(cop_2)
approx_3 <- GetApprox(cop_3)
approx_4 <- GetApprox(cop_4)
approx_5 <- GetApprox(cop_5)
approx_6 <- GetApprox(cop_6)
approx_7 <- GetApprox(cop_7)
approx_8 <- GetApprox(cop_8)
approx_9 <- GetApprox(cop_9)
sample_1 <- GenerateRV(approx_1, n)
sample_2 <- GenerateRV(approx_2, n)
sample_3 <- GenerateRV(approx_3, n)
sample_4 <- GenerateRV(approx_4, n)
sample_5 <- GenerateRV(approx_5, n)
sample_6 <- GenerateRV(approx_6, n)
sample_7 <- GenerateRV(approx_7, n)
sample_8 <- GenerateRV(approx_8, n)
sample_9 <- GenerateRV(approx_9, n)
par(mfrow = c(3,3)); par(mar = rep(2, 4))
plot(sample_1)
plot(sample_2)
plot(sample_3)
plot(sample_4)
plot(sample_5)
plot(sample_6)
plot(sample_7)
plot(sample_8)
plot(sample_9)
# E3 Preprocessing ====
source("fremantle.R.txt")
source("portpirie.R.txt")
max(fremantle[,2])
max(portpirie[,2])
slMerged <- merge(fremantle, portpirie, by = "Year")
sl <- data.frame(year = slMerged[,1], fremantle=slMerged[,2], Portpirie = slMerged[,4])
max(sl[,2]); max(sl[,3])
# Scatter plots
par(mfrow = c(1,2))#; par(mar = rep(2, 4))
plot(sl[,1], sl[,2], xlab = "year", ylab = "Sea level [M]") # Fremantle
abline(h = 1.7)
plot(sl[,1], sl[,3], xlab = "year", ylab = "Sea level [M]") # Portpirie
abline(h = 4.2)
# FML
M1 <- fbvevd(sl[,2:3], model = "ct")
M2 <- fbvevd(sl[,2:3], model = "aneglog")
M3 <- fbvevd(sl[,2:3], model = "bilog")
round(cbind(Estimates = fitted(M1), StandardErrors = std.errors(M1)),3)
round(cbind(Estimates = fitted(M2), StandardErrors = std.errors(M2)),3)
round(cbind(Estimates = fitted(M3), StandardErrors = std.errors(M3)),3)
# IFM - Estimate parameters Marginal distributions.
f1 <- fgev(sl[,2])
f2 <- fgev(sl[,3])
f1$est
f2$est
# IFM - Estimate parameters in dependence function.
M1.IFM <- fbvevd(sl[,2:3], model = "ct", loc1 = f1$est[1], scale1 = f1$est[2],
shape1 = f1$est[3], loc2 = f2$est[1], scale2 = f2$est[2], shape2 = f2$est[3])
M2.IFM <- fbvevd(sl[,2:3], model = "aneglog", loc1 = f1$est[1], scale1 = f1$est[2],
shape1 = f1$est[3], loc2 = f2$est[1], scale2 = f2$est[2], shape2 = f2$est[3])
M3.IFM <- fbvevd(sl[,2:3], model = "bilog", loc1 = f1$est[1], scale1 = f1$est[2],
shape1 = f1$est[3], loc2 = f2$est[1], scale2 = f2$est[2], shape2 = f2$est[3])
# Evaluation of parametric models.
all.AIC <- AIC(M1,M2,M3, M1.IFM, M2.IFM,M3.IFM)
all.AIC[min(all.AIC[,2]) == all.AIC[,2],]
# Non-parametric fit
N1 <- abvnonpar(data = sl[,2:3], epmar = F, method = "cfg", convex = F, plot =T, col = "blue",
main = "Dependence function estimation using cfg")
N2 <- abvnonpar(data = sl[,2:3], epmar = F, method = "cfg", convex = T, add = T, col = "black")
N3 <- abvnonpar(data = sl[,2:3], epmar = T, method = "cfg", convex = F, add = T, col = "red")
N4 <- abvnonpar(data = sl[,2:3], epmar = T, method = "cfg", convex = T, add = T, col = "green")
legend("bottomright", legend=c("Parametric transformation","Convex hull of parametric transformation", "Empiric transformation", "Convex hull of Empiric transformation"), col=c("blue", "black", "red", "green"), lty=1:4, cex=1.0)
# E3.6 and 3.7 Est prob ====
# Parametric estimation of probabilities
sl_am <- as.matrix(sl[,2:3])
# M3 <- fbvevd(sl_am, model = "bilog")
# M3$estimate
mar1 = c(f1$e[1],f1$e[2],f1$e[3])
mar2 = c(f2$e[1],f2$e[2],f2$e[3])
a <- M3.IFM$e[1]
b <- M3.IFM$e[2]
# a <- M1.IFM$e[1]
# b <- M1.IFM$e[2]
# P1
x <- c(30,4.2)
p_CD <- pbvevd(q = x, alpha = a, beta = b, model = "bilog",
mar1 = mar1, mar2 = mar2)
x <- c(1.7,50)
p_BD <- pbvevd(q = x, alpha = a, beta = b, model = "bilog",
mar1 = mar1, mar2 = mar2)
x <- c(1.7,4.2)
p_D <- pbvevd(q = x, alpha = a, beta = b, model = "bilog",
mar1 = mar1, mar2 = mar2)
p_A = 1 - p_CD - p_BD + p_D
round(p_A,4)
# P2
x <- c(30,4.4)
p_CD <- pbvevd(q = x, alpha = a, beta = b, model = "bilog",
mar1 = mar1, mar2 = mar2)
x <- c(1.8,50)
p_BD <- pbvevd(q = x, alpha = a, beta = b, model = "bilog",
mar1 = mar1, mar2 = mar2)
x <- c(1.8,4.4)
p_D <- pbvevd(q = x, alpha = a, beta = b, model = "bilog",
mar1 = mar1, mar2 = mar2)
p_A = 1 - p_CD - p_BD + p_D
round(p_A,5)
# P3
x <- c(1.478,3.85)
p_14_38 <- pbvevd(q = x, alpha = a, beta = b, model = "bilog",
mar1 = mar1, mar2 = mar2)
p_14_38 = 1 - p_14_38
round(p_14_38,4)
# Parametric estimation of p4
mar1 = c(f1$e[1],f1$e[2],f1$e[3])
mar2 = c(f2$e[1],f2$e[2],f2$e[3])
a <- M3.IFM$e[1]
b <- M3.IFM$e[2]
# a <- M1.IFM$e[1]
# b <- M1.IFM$e[2]
x <- c(1.478,3.850)
p_D_1 <- pbvevd(q = x, alpha = a, beta = b, model = "bilog",
mar1 = mar1, mar2 = mar2)
p_D_1
x <- c(1.95,4.8)
p_D_2 <- pbvevd(q = x, alpha = b, beta = b, model = "bilog",
mar1 = mar1, mar2 = mar2)
p_D_2
p = ((1-p_D_1) - (1-p_D_2))/(1-p_D_1)
p
# 3.6 and 3.7 Non-Parametric estimation.
# Parametric transformation
# Estimatation of p1
y1 = c(3, 1.7, 1.7)
y2 = c(4.2, 5, 4.2)
G1 <- pgev(y1, mar1[1], mar1[2], mar1[3])
G2 <- pgev(y2, mar2[1], mar2[2], mar2[3])
p <- exp(log(G1*G2)*N2$y[ceiling(log(G2)/log(G1*G2)*100)])
p1 = 1 + p[3] - p[1] - p[2]
round(p1,4)
# Estimation of p2
y1 = c(3, 1.8, 1.8)
y2 = c(4.4, 5, 4.4)
G1 <- pgev(y1, mar1[1], mar1[2], mar1[3])
G2 <- pgev(y2, mar2[1], mar2[2], mar2[3])
p <- exp(log(G1*G2)*N2$y[ceiling(log(G2)/log(G1*G2)*100)])
p2 = 1 + p[3] - p[1] - p[2]
round(p2,4)
# Estimation of p3
y1 = 1.478
y2 = 3.85
G1 <- pgev(y1, mar1[1], mar1[2], mar1[3])
G2 <- pgev(y2, mar2[1], mar2[2], mar2[3])
p <- exp(log(G1*G2)*N2$y[ceiling(log(G2)/log(G1*G2)*100)])
p3 <- 1-p
round(p3,3)
# Estimation of p4
y1 = 1.478
y2 = 3.85
G1 <- pgev(y1, mar1[1], mar1[2], mar1[3])
G2 <- pgev(y2, mar2[1], mar2[2], mar2[3])
p_D_1 <- exp(log(G1*G2)*N2$y[ceiling(log(G2)/log(G1*G2)*100)])
y1 = 1.95
y2 = 4.8
G1 <- pgev(y1, mar1[1], mar1[2], mar1[3])
G2 <- pgev(y2, mar2[1], mar2[2], mar2[3])
p_D_2 <- exp(log(G1*G2)*N2$y[ceiling(log(G2)/log(G1*G2)*100)])
p4 = ((1-p_D_1) - (1-p_D_2))/(1-p_D_1)
p4
# Empirical transformation
# Estimatation of p1
y1 = c(3, 1.7, 1.7)
y2 = c(4.2, 5, 4.2)
G1 <- pgev(y1, mar1[1], mar1[2], mar1[3])
G2 <- pgev(y2, mar2[1], mar2[2], mar2[3])
p <- exp(log(G1*G2)*N4$y[ceiling(log(G2)/log(G1*G2)*100)])
p1 = 1 + p[3] - p[1] - p[2]
round(p1,4)
# Estimation of p2
y1 = c(3, 1.8, 1.8)
y2 = c(4.4, 5, 4.4)
G1 <- pgev(y1, mar1[1], mar1[2], mar1[3])
G2 <- pgev(y2, mar2[1], mar2[2], mar2[3])
p <- exp(log(G1*G2)*N4$y[ceiling(log(G2)/log(G1*G2)*100)])
p2 = 1 + p[3] - p[1] - p[2]
round(p2,4)
# Estimation of p3
y1 = 1.478
y2 = 3.85
G1 <- pgev(y1, mar1[1], mar1[2], mar1[3])
G2 <- pgev(y2, mar2[1], mar2[2], mar2[3])
p <- exp(log(G1*G2)*N4$y[ceiling(log(G2)/log(G1*G2)*100)])
p3 <- 1-p
round(p3,3)
# Estimation of p4
y1 = 1.478
y2 = 3.85
G1 <- pgev(y1, mar1[1], mar1[2], mar1[3])
G2 <- pgev(y2, mar2[1], mar2[2], mar2[3])
p_D_1 <- exp(log(G1*G2)*N4$y[ceiling(log(G2)/log(G1*G2)*100)])
y1 = 1.95
y2 = 4.8
G1 <- pgev(y1, mar1[1], mar1[2], mar1[3])
G2 <- pgev(y2, mar2[1], mar2[2], mar2[3])
p_D_2 <- exp(log(G1*G2)*N4$y[ceiling(log(G2)/log(G1*G2)*100)])
p4 = ((1-p_D_1) - (1-p_D_2))/(1-p_D_1)
p4
# E3.8 plotting ====
# Creat rbvevd object
par(mfrow = c(1,2)); par(mar = rep(2, 4))
n = 63
bvdata <- rbvevd(n, alpha = a, beta = b, model = "bilog",
mar1 = mar1, mar2 = mar2)
M1 <- fbvevd(bvdata, model = "bilog")
plot(M1, which = 5, p = c(0.75, 0.9, 0.95), col = "blue", xlim = c(1.2,2), ylim = c(3.5,5))
plot(M2, which = 5, p = c(0.75, 0.9, 0.95), col ="green", xlim = c(1.2,2), ylim = c(3.5,5))
# E3.9 Linear trend ====
slMerged <- merge(fremantle, portpirie, by = "Year")
sl_all <- data.frame(year = slMerged[,1], fremantle=slMerged[,2], SOI = slMerged[,3], Portpirie = slMerged[,4])
# Normalize year
sl_all[,1] = 2*(sl_all[,1]-sl_all[1,1])/(sl_all[63,1]-sl_all[1,1]) -1
M_trend.IFM <- fbvevd(sl[,2:3], model = "bilog", nsloc1 = sl_all[,1], nsloc2 = sl_all[,3], loc1 = f1$est[1], scale1 = f1$est[2],
shape1 = f1$est[3], loc2 = f2$est[1], scale2 = f2$est[2], shape2 = f2$est[3])
M_trend_FML <- fbvevd(sl[,2:3], nsloc1 = sl_all[,1], nsloc2 = sl_all[,3], model = "bilog")
anova(M_trend_FML, M_trend.IFM)
# To be compared to M3.IFM and M3
x = logLik(M_trend_FML)
y = logLik(M3)
lr.test(x = x, y = y, alpha = 0.05, df = 2)
y = logLik(M_trend.IFM)
x = logLik(M3.IFM)
lr.test(x = x, y = y, alpha = 0.05, df = 2) |
#' Plot a slice of a raster stack
#'
#' This function will plot a slice of data at a single point location from a
#' list of prism files
#'
#' @param location a vector of a single location in the form of long,lat
#'
#' @param prismfile a vector of output from [ls_prism_data()]`[,1]` giving a
#' list of prism files to extract data from and plot
#'
#' @return a ggplot2 plot of the requested slice
#' @details the list of prism files should be from a continuous data set.
#' Otherwise the plot will look erratic and incorrect.
#'
#' @examples \dontrun{
#' ### Assumes you have a clean prism directory
#' get_prism_dailys(type="tmean", minDate = "2013-06-01", maxDate = "2013-06-14", keepZip=FALSE)
#' p <- prism_slice(c(-73.2119,44.4758),ls_prism_data())
#' print(p)
#' }
#'
#' @import raster ggplot2
#'
#' @export
prism_slice <- function(location,prismfile){
if(!is.null(dim(prismfile))){
stop("You must enter a vector of file names, not a data frame, try ls_prism_data()[,1]")
}
meta_d <- unlist(prism_md(prismfile,returnDate=T))
meta_names <- unlist(prism_md(prismfile))[1]
param_name <- strsplit(meta_names,"-")[[1]][3]
pstack <- prism_stack(prismfile)
data <- unlist(extract(pstack,matrix(location,nrow=1),buffer=10))
data <- as.data.frame(data)
data$date <- as.Date(meta_d)
## Re order
data <- data[order(data$date),]
if(grepl("tmin|tmax|tmean",rownames(data)[1])){
u <- "(C)"
} else if(grepl("ppt",rownames(data)[1])){
u <- "(mm)"
} else if(grepl("tdmean|vpdmax|vpdmin",rownames(data)[1])){
u <- "hPA"
}
out <- ggplot(data,aes(x=date,y=data))+geom_path()+geom_point()+xlab("Date") + ylab(paste(param_name,u,sep=" "))
return(out)
}
| /R/prism_slice.R | no_license | zejiang-unsw/prism | R | false | false | 1,726 | r | #' Plot a slice of a raster stack
#'
#' This function will plot a slice of data at a single point location from a
#' list of prism files
#'
#' @param location a vector of a single location in the form of long,lat
#'
#' @param prismfile a vector of output from [ls_prism_data()]`[,1]` giving a
#' list of prism files to extract data from and plot
#'
#' @return a ggplot2 plot of the requested slice
#' @details the list of prism files should be from a continuous data set.
#' Otherwise the plot will look erratic and incorrect.
#'
#' @examples \dontrun{
#' ### Assumes you have a clean prism directory
#' get_prism_dailys(type="tmean", minDate = "2013-06-01", maxDate = "2013-06-14", keepZip=FALSE)
#' p <- prism_slice(c(-73.2119,44.4758),ls_prism_data())
#' print(p)
#' }
#'
#' @import raster ggplot2
#'
#' @export
prism_slice <- function(location,prismfile){
if(!is.null(dim(prismfile))){
stop("You must enter a vector of file names, not a data frame, try ls_prism_data()[,1]")
}
meta_d <- unlist(prism_md(prismfile,returnDate=T))
meta_names <- unlist(prism_md(prismfile))[1]
param_name <- strsplit(meta_names,"-")[[1]][3]
pstack <- prism_stack(prismfile)
data <- unlist(extract(pstack,matrix(location,nrow=1),buffer=10))
data <- as.data.frame(data)
data$date <- as.Date(meta_d)
## Re order
data <- data[order(data$date),]
if(grepl("tmin|tmax|tmean",rownames(data)[1])){
u <- "(C)"
} else if(grepl("ppt",rownames(data)[1])){
u <- "(mm)"
} else if(grepl("tdmean|vpdmax|vpdmin",rownames(data)[1])){
u <- "hPA"
}
out <- ggplot(data,aes(x=date,y=data))+geom_path()+geom_point()+xlab("Date") + ylab(paste(param_name,u,sep=" "))
return(out)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generateCI2IFC.R
\name{generateCI2IFC}
\alias{generateCI2IFC}
\title{Generates 2IFC classification image}
\usage{
generateCI2IFC(
stimuli,
responses,
baseimage,
rdata,
save_as_png = TRUE,
filename = "",
targetpath = "./cis",
antiCI = FALSE,
scaling = "independent",
constant = 0.1
)
}
\arguments{
\item{stimuli}{Vector with stimulus numbers (should be numeric) that were presented in the order of the response vector. Stimulus numbers must match those in file name of the generated stimuli.}
\item{responses}{Vector specifying the responses in the same order of the stimuli vector, coded 1 for original stimulus selected and -1 for inverted stimulus selected.}
\item{baseimage}{String specifying which base image was used. Not the file name, but the key used in the list of base images at time of generating the stimuli.}
\item{rdata}{String pointing to .RData file that was created when stimuli were generated. This file contains the contrast parameters of all generated stimuli.}
\item{save_as_png}{Boolean stating whether to additionally save the CI as PNG image.}
\item{filename}{Optional string to specify a file name for the PNG image.}
\item{targetpath}{Optional string specifying path to save PNGs to (default: ./cis).}
\item{antiCI}{Optional boolean specifying whether antiCI instead of CI should be computed.}
\item{scaling}{Optional string specifying scaling method: \code{none}, \code{constant}, \code{matched}, or \code{independent} (default).}
\item{constant}{Optional number specifying the value used as constant scaling factor for the noise (only works for \code{scaling='constant'}).}
}
\value{
List of pixel matrix of classification noise only, scaled classification noise only, base image only and combined.
}
\description{
Generate classification image for 2 images forced choice reverse correlation task. This function exists for backwards compatibility. You can also just use \code{generateCI()}, which this function wraps.
}
\details{
This function saves the classification image as PNG to a folder and returns the CI. Your choice of scaling
matters. The default is \code{'matched'}, and will match the range of the intensity of the pixels to
the range of the base image pixels. This scaling is non linear and depends on the range of both base image
and noise pattern. It is truly suboptimal, because it shifts the 0 point of the noise (that is, pixels that would
have not changed base image at all before scaling may change the base image after scaling and vice versa). It is
however the quick and dirty way to see how the CI noise affects the base image.
For more control, use \code{'constant'} scaling, where the scaling is independent of
the base image and noise range, but where the choice of constant is arbitrary (provided by the user with t
the \code{constant} parameter). The noise is then scale as follows: \code{scaled <- (ci + constant) / (2*constant)}.
Note that pixels can take intensity values between 0 and 1 If your scaled noise exceeds those values,
a warning will be given. You should pick a higher constant (but do so consistently for different classification images
that you want to compare). The higher the constant, the less visible the noise will be in the resulting image.
When creating multiple classification images a good strategy is to find the lowest constant that works for all
classification images. This can be automatized using the \code{autoscale} function.
}
| /man/generateCI2IFC.Rd | no_license | rdotsch/rcicr | R | false | true | 3,528 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generateCI2IFC.R
\name{generateCI2IFC}
\alias{generateCI2IFC}
\title{Generates 2IFC classification image}
\usage{
generateCI2IFC(
stimuli,
responses,
baseimage,
rdata,
save_as_png = TRUE,
filename = "",
targetpath = "./cis",
antiCI = FALSE,
scaling = "independent",
constant = 0.1
)
}
\arguments{
\item{stimuli}{Vector with stimulus numbers (should be numeric) that were presented in the order of the response vector. Stimulus numbers must match those in file name of the generated stimuli.}
\item{responses}{Vector specifying the responses in the same order of the stimuli vector, coded 1 for original stimulus selected and -1 for inverted stimulus selected.}
\item{baseimage}{String specifying which base image was used. Not the file name, but the key used in the list of base images at time of generating the stimuli.}
\item{rdata}{String pointing to .RData file that was created when stimuli were generated. This file contains the contrast parameters of all generated stimuli.}
\item{save_as_png}{Boolean stating whether to additionally save the CI as PNG image.}
\item{filename}{Optional string to specify a file name for the PNG image.}
\item{targetpath}{Optional string specifying path to save PNGs to (default: ./cis).}
\item{antiCI}{Optional boolean specifying whether antiCI instead of CI should be computed.}
\item{scaling}{Optional string specifying scaling method: \code{none}, \code{constant}, \code{matched}, or \code{independent} (default).}
\item{constant}{Optional number specifying the value used as constant scaling factor for the noise (only works for \code{scaling='constant'}).}
}
\value{
List of pixel matrix of classification noise only, scaled classification noise only, base image only and combined.
}
\description{
Generate classification image for 2 images forced choice reverse correlation task. This function exists for backwards compatibility. You can also just use \code{generateCI()}, which this function wraps.
}
\details{
This function saves the classification image as PNG to a folder and returns the CI. Your choice of scaling
matters. The default is \code{'matched'}, and will match the range of the intensity of the pixels to
the range of the base image pixels. This scaling is non linear and depends on the range of both base image
and noise pattern. It is truly suboptimal, because it shifts the 0 point of the noise (that is, pixels that would
have not changed base image at all before scaling may change the base image after scaling and vice versa). It is
however the quick and dirty way to see how the CI noise affects the base image.
For more control, use \code{'constant'} scaling, where the scaling is independent of
the base image and noise range, but where the choice of constant is arbitrary (provided by the user with t
the \code{constant} parameter). The noise is then scale as follows: \code{scaled <- (ci + constant) / (2*constant)}.
Note that pixels can take intensity values between 0 and 1 If your scaled noise exceeds those values,
a warning will be given. You should pick a higher constant (but do so consistently for different classification images
that you want to compare). The higher the constant, the less visible the noise will be in the resulting image.
When creating multiple classification images a good strategy is to find the lowest constant that works for all
classification images. This can be automatized using the \code{autoscale} function.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bracket.women.2017.R
\name{bracket.women.2017}
\alias{bracket.women.2017}
\title{2017 Women's March Madness bracket}
\format{character vector of length 64}
\description{
This dataset contains the ESPN team ids of the 64 teams in the
2017 March Madness women's bracket. The teams are ordered by overall seed,
such that the first four team ids correspond to the four #1 seeds.
}
| /man/bracket.women.2017.Rd | no_license | HennenD/mRchmadness | R | false | true | 459 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bracket.women.2017.R
\name{bracket.women.2017}
\alias{bracket.women.2017}
\title{2017 Women's March Madness bracket}
\format{character vector of length 64}
\description{
This dataset contains the ESPN team ids of the 64 teams in the
2017 March Madness women's bracket. The teams are ordered by overall seed,
such that the first four team ids correspond to the four #1 seeds.
}
|
/aiaR/myfirstR/AI-D3-2.R | no_license | mosabeker/AIA | R | false | false | 17,782 | r | ||
#####################################################################
#
# xchr.R
#
# copyright (c) 2004-2010, Karl W Broman
# last modified Nov, 2010
# first written Apr, 2004
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License,
# version 3, as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but without any warranty; without even the implied warranty of
# merchantability or fitness for a particular purpose. See the GNU
# General Public License, version 3, for more details.
#
# A copy of the GNU General Public License, version 3, is available
# at http://www.r-project.org/Licenses/GPL-3
#
# Part of the R/qtl package
# Contains: Utilities for dealing with the X chromosome.
# getsex, getgenonames, reviseXdata, scanoneXnull
# revisecovar, dropXcol
# [See also fixXgeno.bc & fixXgeno.f2 in read.cross.R]
#
######################################################################
# get sex and pgm columns from phenotype data
getsex <-
function(cross)
{
phe.names <- names(cross$pheno)
sex.column <- grep("^[Ss][Ee][Xx]$", phe.names)
pgm.column <- grep("^[Pp][Gg][Mm]$", phe.names)
if(length(sex.column)==0) { # no sex included
sex <- NULL
}
else {
if(length(sex.column)>1)
warning("'sex' included multiple times. Using the first one.")
temp <- cross$pheno[,sex.column[1]]
if(is.numeric(temp)) {
if(any(!is.na(temp) & temp != 0 & temp != 1)) {
warning("Sex column should be coded as 0=female 1=male; sex ignored.")
sex <- NULL
}
else sex <- temp
}
else {
if(!is.factor(temp)) temp <- as.factor(temp)
if(length(levels(temp)) == 1) {
if(levels(temp) == "F" || levels(temp)=="f" ||
toupper(levels(temp)) == "FEMALE") sex <- rep(0,nind(cross))
else if(levels(temp) == "M" || levels(temp)=="m" ||
toupper(levels(temp)) == "MALE") sex <- rep(1,nind(cross))
else
warning("Sex column should be coded as 0=female 1=male; sex ignored.")
}
else if(length(levels(temp)) > 2) {
warning("Sex column should be coded as a two-level factor; sex ignored.")
sex <- NULL
}
else { # is a factor with two levels
lev <- levels(temp)
if(length(grep("^[Ff]",lev))>0 &&
length(males <- grep("^[Mm]",lev))>0) {
temp <- as.character(temp)
sex <- rep(0,length(temp))
sex[is.na(temp)] <- NA
sex[!is.na(temp) & temp==lev[males]] <- 1
}
else
warning("Don't understand levels in sex column; sex ignored.")
}
}
}
if(length(pgm.column)==0) { # no pgm included
pgm <- NULL
}
else {
if(length(pgm.column)>1)
warning("'pgm' included multiple times. Using the first one.")
temp <- cross$pheno[,pgm.column[1]]
if(!is.numeric(temp))
temp <- as.numeric(temp)-1
if(any(!is.na(temp) & temp != 0 & temp != 1)) {
warning("pgm column should be coded as 0/1; pgm ignored.")
pgm <- NULL
}
else pgm <- temp
}
if(!is.null(sex) && any(is.na(sex))) {
if(all(sex[!is.na(sex)]==1)) {
warning(sum(is.na(sex)), " individuals with missing sex; assuming they're male like the others")
sex[is.na(sex)] <- 1
}
else if(all(sex[!is.na(sex)]==0)) {
warning(sum(is.na(sex)), " individuals with missing sex; assuming they're female like the others")
sex[is.na(sex)] <- 0
}
else {
warning(sum(is.na(sex)), " individuals with missing sex; assuming they're female")
sex[is.na(sex)] <- 0
}
}
if(!is.null(pgm) && any(is.na(pgm))) {
if(all(pgm[!is.na(pgm)]==1)) {
warning(sum(is.na(pgm)), " individuals with missing pgm; assuming pgm==1 like the others")
pgm[is.na(pgm)] <- 1
}
else if(all(pgm[!is.na(pgm)]==0)) {
warning(sum(is.na(pgm)), " individuals with missing pgm; assuming pgm==0 like the others")
pgm[is.na(pgm)] <- 0
}
else {
warning(sum(is.na(pgm)), " individuals with missing pgm; assuming pgm==0")
pgm[is.na(pgm)] <- 0
}
}
list(sex=sex,pgm=pgm)
}
# get names of genotypes
# used in discan, effectplot, plot.pxg, scanone, scantwo, vbscan, reviseXdata
# cross.attr gives the cross attributes
getgenonames <-
function(type=c("f2","bc","riself","risib","4way","dh","special"),
chrtype=c("A","X"), expandX=c("simple","standard","full"),
sexpgm, cross.attr)
{
type <- match.arg(type)
chrtype <- match.arg(chrtype)
expandX <- match.arg(expandX)
if(chrtype=="X") {
sex <- sexpgm$sex
pgm <- sexpgm$pgm
}
if(type=="special") return(cross.attr$genotypes)
if(missing(cross.attr) || !("alleles" %in% names(cross.attr))) {
if(type == "4way") alleles <- LETTERS[1:4]
else alleles <- LETTERS[1:2]
}
else
alleles <- cross.attr$alleles
tempgn <- c(paste(rep(alleles[1],2),collapse=""),
paste(alleles,collapse=""),
paste(rep(alleles[2],2),collapse=""),
paste(alleles[1],"Y",sep=""),
paste(alleles[2],"Y",sep=""))
# get rid of missing sex and pgm values, if there are any
if(chrtype=="X") {
if(length(sex)>0) sex <- sex[!is.na(sex)]
if(length(pgm)>0) pgm <- pgm[!is.na(pgm)]
}
if(type=="riself" || type=="risib" || type=="dh")
gen.names <- tempgn[c(1,3)]
else if(type == "4way") {
if(chrtype=="A")
gen.names <- c(paste(alleles[1],alleles[3],sep=""),
paste(alleles[2],alleles[3],sep=""),
paste(alleles[1],alleles[4],sep=""),
paste(alleles[2],alleles[4],sep=""))
else
gen.names <- c(paste(alleles[1],alleles[3],sep=""),
paste(alleles[2],alleles[3],sep=""),
paste(alleles[1],"Y",sep=""),
paste(alleles[2],"Y",sep=""))
}
else if(type == "bc") {
if(chrtype=="A") # autosome
gen.names <- tempgn[1:2] # AA AB
else { # X chromosome
# simple standard full
# -both sexes A-/AB/BY AA/AB/AY/BY same as std
# -all females AA/AB same same
# -all males AY/BY same same
if(length(sex)==0 || all(sex==0)) # all females
gen.names <- tempgn[1:2] # AA AB
else if(all(sex==1)) # all males
gen.names <- tempgn[4:5] # AY BY
else { # some of each
if(expandX == "simple")
gen.names <- c(paste(alleles[1], "-", sep=""),
tempgn[c(2,5)]) # A-, AB, BY
else gen.names <- tempgn[c(1,2,4,5)] # AA,AB,AY,BY
}
}
}
else { # intercross
if(chrtype == "A") # autosomal
gen.names <- tempgn[1:3]
else { # X chromsome
# both crosses simple standard full
# -both sexes A-/AB/B- AA/AB/BB/AY/BY AA/AB1/AB2/BB/AY/BY
# -all females AA/AB/BB same as simple AA/AB1/AB2/BB
# -all males AY/BY same same
# forw cross
# -both sexes A-/AB/BY AA/AB/AY/BY same as std
# -all females AA/AB same same
# -all males AY/BY same same
# backw cross
# -both sexes B-/AB/AY BB/AB/AY/BY same as std
# -all females BB/AB same same
# -all males AY/BY same same
if(length(sex)==0 || all(sex==0)) { # all females
if(length(pgm)==0 || all(pgm==0)) # all forw dir
gen.names <- tempgn[1:2] # AA AB
else if(all(pgm==1)) # all backw dir
gen.names <- tempgn[3:2] # BB AB
else { # some of each direction
if(expandX=="full")
gen.names <- c(tempgn[1],
paste(tempgn[2],c("f","r"), sep=""),
tempgn[3])
else gen.names <- tempgn[1:3]
}
}
else if(all(sex==1)) # all males
gen.names <- tempgn[4:5]
else { # some of each sex
if(length(pgm)==0 || all(pgm==0)) { # all forw
if(expandX=="simple")
gen.names <- c(paste(alleles[1],"-", sep=""),
tempgn[c(2,5)])
else gen.names <- tempgn[c(1,2,4,5)]
}
else if (all(pgm==1)) { # all backw
if(expandX=="simple")
gen.names <- c(paste(alleles[2], "-",sep=""),
tempgn[c(2,4)])
else gen.names <- tempgn[c(3,2,4,5)]
}
else { # some of each dir
if(expandX=="simple")
gen.names <- c(paste(alleles[1],"-",sep=""),
tempgn[2],
paste(alleles[2],"-",sep=""))
else if(expandX=="standard")
gen.names <- tempgn
else
gen.names <- c(tempgn[1],
paste(tempgn[2],c("f","r"),sep=""),
tempgn[3:5])
}
}
}
}
gen.names
}
# revise genotype data, probabilities or imputations for the X chromosome
reviseXdata <-
function(type=c("f2","bc"), expandX=c("simple","standard","full"),
sexpgm, geno, prob, draws, pairprob, cross.attr, force=FALSE)
{
type <- match.arg(type)
expandX <- match.arg(expandX)
sex <- sexpgm$sex
pgm <- sexpgm$pgm
notmissing <- (!missing(geno)) + (!missing(prob)) + (!missing(draws)) +
(!missing(pairprob))
if(notmissing == 0)
stop("Provide one of geno, prob, draws, pairprob.")
if(notmissing > 1)
stop("Provide just one of geno, prob, draws, pairprob.")
# get genonames
genonames <- getgenonames(type, "X", expandX, sexpgm, cross.attr)
if(type == "bc") { # backcross
if(length(sex)==0 || ((all(sex==0) || all(sex==1)) && !force)) { # all one sex
# no changes necessary
if(!missing(geno)) return(geno)
else if(!missing(prob)) {
dimnames(prob)[[3]] <- genonames
return(prob)
}
else if(!missing(draws))
return(draws)
else # pairprob
return(pairprob)
}
else { # both sexes
if(!missing(geno)) {
gmale <- geno[sex==1,]
if(expandX=="simple")
gmale[!is.na(gmale) & gmale==2] <- 3
else {
gmale[!is.na(gmale) & gmale==1] <- 3
gmale[!is.na(gmale) & gmale==2] <- 4
}
geno[sex==1,] <- gmale
return(geno)
}
else if(!missing(draws)) {
gmale <- draws[sex==1,,]
if(expandX=="simple")
gmale[gmale==2] <- 3
else {
gmale[gmale==1] <- 3
gmale[gmale==2] <- 4
}
draws[sex==1,,] <- gmale
return(draws)
}
else if(!missing(prob)) {
dimprob <- dim(prob)
dimprob[3] <- length(genonames)
newprob <- array(0,dim=dimprob)
dimnames(newprob) <- c(dimnames(prob)[1:2],list(genonames))
newprob[sex==0,,1:2] <- prob[sex==0,,1:2]
if(expandX=="simple") {
newprob[sex==1,,1] <- prob[sex==1,,1]
newprob[sex==1,,3] <- prob[sex==1,,2]
}
else {
newprob[sex==1,,3] <- prob[sex==1,,1]
newprob[sex==1,,4] <- prob[sex==1,,2]
}
return(newprob)
}
else { # pairprob
dimpairprob <- dim(pairprob)
dimpairprob[3] <- dimpairprob[4] <- length(genonames)
newpairprob <- array(0,dim=dimpairprob)
newpairprob[sex==0,,1:2,1:2] <- pairprob[sex==0,,,]
if(expandX=="simple") {
newpairprob[sex==1,,1,1] <- pairprob[sex==1,,1,1]
newpairprob[sex==1,,1,3] <- pairprob[sex==1,,1,2]
newpairprob[sex==1,,3,1] <- pairprob[sex==1,,2,1]
newpairprob[sex==1,,3,3] <- pairprob[sex==1,,2,2]
}
else {
newpairprob[sex==1,,3,3] <- pairprob[sex==1,,1,1]
newpairprob[sex==1,,3,4] <- pairprob[sex==1,,1,2]
newpairprob[sex==1,,4,3] <- pairprob[sex==1,,2,1]
newpairprob[sex==1,,4,4] <- pairprob[sex==1,,2,2]
}
return(newpairprob)
}
} # end of "both sexes" / backcross
} # end of backcross
else { # intercross
if(length(sex)==0 || all(sex==0)) { # all females
if(length(pgm)==0 || ((all(pgm==0) || all(pgm==1)) && !force)) { # one dir, females
if(!missing(geno)) return(geno)
else if(!missing(draws)) return(draws)
else if(!missing(pairprob)) return(pairprob)
else {
dimnames(prob)[[3]] <- genonames
return(prob)
}
}
else { # both dir, females
if(!missing(geno)) {
gback <- geno[pgm==1,]
if(expandX!="full") {
gback[!is.na(gback) & gback==1] <- 3
geno[pgm==1,] <- gback
}
else {
gback[!is.na(gback) & gback==1] <- 4
gback[!is.na(gback) & gback==2] <- 3
geno[pgm==1,] <- gback
}
return(geno)
}
else if(!missing(draws)) {
gback <- draws[pgm==1,,]
if(expandX!="full") {
gback[!is.na(gback) & gback==1] <- 3
}
else {
gback[!is.na(gback) & gback==1] <- 4
gback[!is.na(gback) & gback==2] <- 3
}
draws[pgm==1,,] <- gback
return(draws)
}
else if(!missing(prob)) {
dimprob <- dim(prob)
dimprob[3] <- length(genonames)
newprob <- array(0,dim=dimprob)
dimnames(newprob) <- c(dimnames(prob)[1:2],list(genonames))
newprob[pgm==0,,1:2] <- prob[pgm==0,,1:2]
if(expandX!="full") { # simple/standard
newprob[pgm==1,,3] <- prob[pgm==1,,1]
newprob[pgm==1,,2] <- prob[pgm==1,,2]
}
else {
newprob[pgm==1,,4] <- prob[pgm==1,,1]
newprob[pgm==1,,3] <- prob[pgm==1,,2]
}
return(newprob)
}
else { # pairprob
dimpairprob <- dim(pairprob)
dimpairprob[3] <- dimpairprob[4] <- length(genonames)
newpairprob <- array(0,dim=dimpairprob)
newpairprob[pgm==0,,1:2,1:2] <- pairprob[pgm==0,,,]
if(expandX!="full") { # simple/standard
newpairprob[pgm==1,,3,3] <- pairprob[pgm==1,,1,1]
newpairprob[pgm==1,,3,2] <- pairprob[pgm==1,,1,2]
newpairprob[pgm==1,,2,3] <- pairprob[pgm==1,,2,1]
newpairprob[pgm==1,,2,2] <- pairprob[pgm==1,,2,2]
}
else {
newpairprob[pgm==1,,4,4] <- pairprob[pgm==1,,1,1]
newpairprob[pgm==1,,4,3] <- pairprob[pgm==1,,1,2]
newpairprob[pgm==1,,3,4] <- pairprob[pgm==1,,2,1]
newpairprob[pgm==1,,3,3] <- pairprob[pgm==1,,2,2]
}
return(newpairprob)
}
}
}
else if(all(sex==1) && !force) { # all males
if(!missing(geno)) return(geno)
else if(!missing(draws)) return(draws)
else if(!missing(pairprob)) return(pairprob)
else {
dimnames(prob)[[3]] <- genonames
return(prob)
}
}
else { # both sexes
if(length(pgm)==0 || all(pgm==0)) { # both sexes, forw dir
if(!missing(geno)) {
gmale <- geno[sex==1,]
if(expandX=="simple")
gmale[!is.na(gmale) & gmale==2] <- 3
else {
gmale[!is.na(gmale) & gmale==1] <- 3
gmale[!is.na(gmale) & gmale==2] <- 4
}
geno[sex==1,] <- gmale
return(geno)
}
else if(!missing(draws)) {
gmale <- draws[sex==1,,]
if(expandX=="simple")
gmale[gmale==2] <- 3
else {
gmale[gmale==1] <- 3
gmale[gmale==2] <- 4
}
draws[sex==1,,] <- gmale
return(draws)
}
else if(!missing(prob)) {
dimprob <- dim(prob)
dimprob[3] <- length(genonames)
newprob <- array(0,dim=dimprob)
dimnames(newprob) <- c(dimnames(prob)[1:2],list(genonames))
newprob[sex==0,,1:2] <- prob[sex==0,,1:2]
if(expandX=="simple") {
newprob[sex==1,,1] <- prob[sex==1,,1]
newprob[sex==1,,3] <- prob[sex==1,,2]
}
else {
newprob[sex==1,,3] <- prob[sex==1,,1]
newprob[sex==1,,4] <- prob[sex==1,,2]
}
return(newprob)
}
else { # pairprob
dimpairprob <- dim(pairprob)
dimpairprob[3] <- dimpairprob[4] <- length(genonames)
newpairprob <- array(0,dim=dimpairprob)
newpairprob[sex==0,,1:2,1:2] <- pairprob[sex==0,,,]
if(expandX=="simple") {
newpairprob[sex==1,,1,1] <- pairprob[sex==1,,1,1]
newpairprob[sex==1,,1,3] <- pairprob[sex==1,,1,2]
newpairprob[sex==1,,3,1] <- pairprob[sex==1,,2,1]
newpairprob[sex==1,,3,3] <- pairprob[sex==1,,2,2]
}
else {
newpairprob[sex==1,,3,3] <- pairprob[sex==1,,1,1]
newpairprob[sex==1,,3,4] <- pairprob[sex==1,,1,2]
newpairprob[sex==1,,4,3] <- pairprob[sex==1,,2,1]
newpairprob[sex==1,,4,4] <- pairprob[sex==1,,2,2]
}
return(newpairprob)
}
} # both sexes, forw dir
if(all(pgm==1) && !force) { # both sexes, backw dir
if(!missing(geno)) {
gmale <- geno[sex==1,]
if(expandX!="full") {
gmale[!is.na(gmale) & gmale==1] <- 3
gmale[!is.na(gmale) & gmale==2] <- 1
}
else {
gmale[!is.na(gmale) & gmale==1] <- 3
gmale[!is.na(gmale) & gmale==2] <- 4
}
geno[sex==1,] <- gmale
return(geno)
}
else if(!missing(draws)) {
gmale <- draws[sex==1,,]
if(expandX!="full") {
gmale[gmale==1] <- 3
gmale[gmale==2] <- 1
}
else {
gmale[gmale==1] <- 3
gmale[gmale==2] <- 4
}
draws[sex==1,,] <- gmale
return(draws)
}
else if(!missing(prob)) {
dimprob <- dim(prob)
dimprob[3] <- length(genonames)
newprob <- array(0,dim=dimprob)
dimnames(newprob) <- c(dimnames(prob)[1:2],list(genonames))
newprob[sex==0,,1:2] <- prob[sex==0,,1:2]
if(expandX=="simple") {
newprob[sex==1,,3] <- prob[sex==1,,1]
newprob[sex==1,,1] <- prob[sex==1,,2]
}
else {
newprob[sex==1,,3] <- prob[sex==1,,1]
newprob[sex==1,,4] <- prob[sex==1,,2]
}
return(newprob)
}
else { # pairprob
dimpairprob <- dim(pairprob)
dimpairprob[3] <- dimpairprob[4] <- length(genonames)
newpairprob <- array(0,dim=dimpairprob)
newpairprob[sex==0,,1:2,1:2] <- pairprob[sex==0,,,]
if(expandX=="simple") {
newpairprob[sex==1,,3,3] <- pairprob[sex==1,,1,1]
newpairprob[sex==1,,1,3] <- pairprob[sex==1,,2,1]
newpairprob[sex==1,,3,1] <- pairprob[sex==1,,1,2]
newpairprob[sex==1,,1,1] <- pairprob[sex==1,,2,2]
}
else {
newpairprob[sex==1,,3,3] <- pairprob[sex==1,,1,1]
newpairprob[sex==1,,3,4] <- pairprob[sex==1,,1,2]
newpairprob[sex==1,,4,3] <- pairprob[sex==1,,2,1]
newpairprob[sex==1,,4,4] <- pairprob[sex==1,,2,2]
}
return(newpairprob)
}
} # both sexes, backw dir
else { # both dir, both sexes
if(!missing(geno)) {
gmale <- geno[sex==1,]
gfemaler <- geno[sex==0 & pgm==1,]
if(expandX=="simple") {
gmale[!is.na(gmale) & gmale==2] <- 3
gfemaler[!is.na(gfemaler) & gfemaler==1] <- 3
}
else if(expandX=="standard") {
gmale[!is.na(gmale) & gmale==1] <- 4
gmale[!is.na(gmale) & gmale==2] <- 5
gfemaler[!is.na(gfemaler) & gfemaler==1] <- 3
}
else {
gmale[!is.na(gmale) & gmale==1] <- 5
gmale[!is.na(gmale) & gmale==2] <- 6
gfemaler[!is.na(gfemaler) & gfemaler==1] <- 4
gfemaler[!is.na(gfemaler) & gfemaler==2] <- 3
}
geno[sex==1,] <- gmale
geno[sex==0 & pgm==1,] <- gfemaler
return(geno)
}
else if(!missing(draws)) {
gmale <- draws[sex==1,,]
gfemaler <- draws[sex==0 & pgm==1,,]
if(expandX=="simple") {
gmale[gmale==2] <- 3
gfemaler[gfemaler==1] <- 3
}
else if(expandX=="standard") {
gmale[gmale==1] <- 4
gmale[gmale==2] <- 5
gfemaler[gfemaler==1] <- 3
}
else {
gmale[gmale==1] <- 5
gmale[gmale==2] <- 6
gfemaler[gfemaler==1] <- 4
gfemaler[gfemaler==2] <- 3
}
draws[sex==1,,] <- gmale
draws[sex==0 & pgm==1,,] <- gfemaler
return(draws)
}
else if(!missing(prob)) {
dimprob <- dim(prob)
dimprob[3] <- length(genonames)
newprob <- array(0,dim=dimprob)
dimnames(newprob) <- c(dimnames(prob)[1:2],list(genonames))
newprob[sex==0 & pgm==0,,1:2] <- prob[sex==0 & pgm==0,,1:2]
if(expandX=="simple") {
newprob[sex==1,,1] <- prob[sex==1,,1]
newprob[sex==1,,3] <- prob[sex==1,,2]
newprob[sex==0 & pgm==1,,3] <- prob[sex==0 & pgm==1,,1]
newprob[sex==0 & pgm==1,,2] <- prob[sex==0 & pgm==1,,2]
}
else if(expandX=="standard") {
newprob[sex==1,,4] <- prob[sex==1,,1]
newprob[sex==1,,5] <- prob[sex==1,,2]
newprob[sex==0 & pgm==1,,3] <- prob[sex==0 & pgm==1,,1]
newprob[sex==0 & pgm==1,,2] <- prob[sex==0 & pgm==1,,2]
}
else {
newprob[sex==1,,5] <- prob[sex==1,,1]
newprob[sex==1,,6] <- prob[sex==1,,2]
newprob[sex==0 & pgm==1,,4] <- prob[sex==0 & pgm==1,,1]
newprob[sex==0 & pgm==1,,3] <- prob[sex==0 & pgm==1,,2]
}
return(newprob)
}
else { # pairprob
dimpairprob <- dim(pairprob)
dimpairprob[3] <- dimpairprob[4] <- length(genonames)
newpairprob <- array(0,dim=dimpairprob)
newpairprob[sex==0 & pgm==0,,1:2,1:2] <- pairprob[sex==0 & pgm==0,,,]
male <- (sex==1)
femaler <- (sex==0) & (pgm==1)
if(expandX=="simple") {
newpairprob[male,,1,1] <- pairprob[male,,1,1]
newpairprob[male,,1,3] <- pairprob[male,,1,2]
newpairprob[male,,3,1] <- pairprob[male,,2,1]
newpairprob[male,,3,3] <- pairprob[male,,2,2]
newpairprob[femaler,,3,3] <- pairprob[femaler,,1,1]
newpairprob[femaler,,3,2] <- pairprob[femaler,,1,2]
newpairprob[femaler,,2,3] <- pairprob[femaler,,2,1]
newpairprob[femaler,,2,2] <- pairprob[femaler,,2,2]
}
else if(expandX=="standard") {
newpairprob[male,,4,4] <- pairprob[male,,1,1]
newpairprob[male,,4,5] <- pairprob[male,,1,2]
newpairprob[male,,5,4] <- pairprob[male,,2,1]
newpairprob[male,,5,5] <- pairprob[male,,2,2]
newpairprob[femaler,,3,3] <- pairprob[femaler,,1,1]
newpairprob[femaler,,3,2] <- pairprob[femaler,,1,2]
newpairprob[femaler,,2,3] <- pairprob[femaler,,2,1]
newpairprob[femaler,,2,2] <- pairprob[femaler,,2,2]
}
else {
newpairprob[male,,5,5] <- pairprob[male,,1,1]
newpairprob[male,,5,6] <- pairprob[male,,1,2]
newpairprob[male,,6,5] <- pairprob[male,,2,1]
newpairprob[male,,6,6] <- pairprob[male,,2,2]
newpairprob[femaler,,4,4] <- pairprob[femaler,,1,1]
newpairprob[femaler,,4,3] <- pairprob[femaler,,1,2]
newpairprob[femaler,,3,4] <- pairprob[femaler,,2,1]
newpairprob[femaler,,3,3] <- pairprob[femaler,,2,2]
}
return(newpairprob)
}
}
}
} # end of intercross
}
######################################################################
# scanoneXnull
#
# figure out null hypothesis business for scanone on X chromosome
######################################################################
scanoneXnull <-
function(type, sexpgm)
{
sex <- sexpgm$sex
pgm <- sexpgm$pgm
if(type == "risib" || type=="riself" || type=="dh") type <- "bc"
### first figure out sex/pgm pattern
# sex
if(length(sex)==0 || all(sex==0)) { # all female
onesex <- allfemale <- TRUE
}
else if(all(sex==1)) { # all male
onesex <- TRUE
allfemale <- FALSE
}
else { # both sexes
onesex <- allfemale <- FALSE
}
# pgm
if(length(pgm)==0 || all(pgm==0) || all(pgm==1)) # one direction
onedir <- TRUE
else onedir <- FALSE
allmale <- onesex && !allfemale
bothsex <- !onesex
bothdir <- !onedir
### now figure out the null hypothesis and pull out appropriate
### covariates for the null
# backcross, one sex
# OR intercross, one dir and one sex
# OR intercross, both dir and all male
if((type=="bc" && onesex) ||
(type=="f2" && ((onedir && onesex) || (bothdir && allmale)))) {
adjustX <- FALSE
parX0 <- 1
sexpgmcovar <- sexpgmcovar.alt <- NULL
}
# backcross, both sexes
# OR intercross, one direction and both sexes
else if((type=="bc" && bothsex) ||
(type=="f2" && onedir && bothsex)) {
adjustX <- TRUE
parX0 <- 2
sexpgmcovar <- cbind(sex)
sexpgmcovar.alt <- sex+1
}
# intercross, both dir and all female
else if(type=="f2" && bothdir && allfemale) {
adjustX <- TRUE
parX0 <- 2
sexpgmcovar <- cbind(pgm)
sexpgmcovar.alt <- pgm+1
}
# intercross, both dir and both sexes
else {
adjustX <- TRUE
parX0 <- 3
sexpgmcovar <- cbind(sex,as.numeric(sex==0 & pgm==1))
sexpgmcovar.alt <- rep(3,length(sex))
sexpgmcovar.alt[sex==0 & pgm==0] <- 1
sexpgmcovar.alt[sex==0 & pgm==1] <- 2
}
list(adjustX=adjustX, parX0=parX0, sexpgmcovar=sexpgmcovar,
sexpgmcovar.alt=sexpgmcovar.alt)
}
######################################################################
# revisecovar
#
# Drop sex and pgm and their interxn as covariates for the X chr.
######################################################################
revisecovar <-
function(sexpgm, covar)
{
if(is.null(covar) || (is.null(sexpgm$sex) && is.null(sexpgm$pgm))) {
if(!is.null(covar)) attr(covar, "n.dropped") <- 0
return(covar)
}
covar <- as.matrix(covar)
sex <- sexpgm$sex
pgm <- sexpgm$pgm
if(!is.null(pgm) && length(unique(pgm))==1) pgm <- NULL
allfemale <- FALSE
if(is.null(sex)) allfemale <- TRUE
else {
if(all(sex==0)) {
allfemale <- TRUE
sex <- NULL
}
else if(all(sex==1)) {
allfemale <- FALSE
sex <- NULL
}
}
if(!is.null(pgm)) { # some of each direction
if(!is.null(sex)) { # some of each sex
femf <- as.numeric(pgm==0 & sex==0)
femr <- as.numeric(pgm==1 & sex==0)
mal <- sex
X <- cbind(femf, femr, mal)
}
else { # all of one sex
if(allfemale)
X <- cbind(1-pgm, pgm)
else
X <- cbind(rep(1, nrow(covar)))
}
}
else { # all of one direction
if(!is.null(sex)) # some of each sex
X <- cbind(sex, 1-sex)
else X <- cbind(rep(1, nrow(covar)))
}
nc <- ncol(X)
keep <- rep(TRUE,ncol(covar))
for(i in 1:ncol(covar)) {
if(qr(cbind(X,covar[,i]))$rank <= nc)
keep[i] <- FALSE
}
if(!any(keep))
covar <- numeric(0)
else
covar <- covar[,keep,drop=FALSE]
attr(covar, "n.dropped") <- sum(!keep)
covar
}
######################################################################
# dropXcol: for use with scantwo() for the X chromosome:
# figure out what columns to drop...both for the full model
# and for the additive model.
######################################################################
dropXcol <-
function(type=c("f2","bc"), sexpgm, cross.attr)
{
type <- match.arg(type)
gn <- getgenonames(type, "X", "full", sexpgm, cross.attr)
if(length(gn)==2) return(rep(0,4))
if(length(gn)==4) return( c(0,0,0,0,0,1,0, 0,1,1,1,1,1,1,1,0) )
if(length(gn)==6) {
todrop <- c(rep(0,11), rep(1,25))
todrop[c(8,10)] <- 1
todrop[11+c(1,13,25)] <- 0
return(todrop)
}
return(rep(0,length(gn)^2))
}
# end of xchr.R
| /R/xchr.R | no_license | amanicha/qtl | R | false | false | 28,801 | r | #####################################################################
#
# xchr.R
#
# copyright (c) 2004-2010, Karl W Broman
# last modified Nov, 2010
# first written Apr, 2004
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License,
# version 3, as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but without any warranty; without even the implied warranty of
# merchantability or fitness for a particular purpose. See the GNU
# General Public License, version 3, for more details.
#
# A copy of the GNU General Public License, version 3, is available
# at http://www.r-project.org/Licenses/GPL-3
#
# Part of the R/qtl package
# Contains: Utilities for dealing with the X chromosome.
# getsex, getgenonames, reviseXdata, scanoneXnull
# revisecovar, dropXcol
# [See also fixXgeno.bc & fixXgeno.f2 in read.cross.R]
#
######################################################################
# get sex and pgm columns from phenotype data
getsex <-
function(cross)
{
phe.names <- names(cross$pheno)
sex.column <- grep("^[Ss][Ee][Xx]$", phe.names)
pgm.column <- grep("^[Pp][Gg][Mm]$", phe.names)
if(length(sex.column)==0) { # no sex included
sex <- NULL
}
else {
if(length(sex.column)>1)
warning("'sex' included multiple times. Using the first one.")
temp <- cross$pheno[,sex.column[1]]
if(is.numeric(temp)) {
if(any(!is.na(temp) & temp != 0 & temp != 1)) {
warning("Sex column should be coded as 0=female 1=male; sex ignored.")
sex <- NULL
}
else sex <- temp
}
else {
if(!is.factor(temp)) temp <- as.factor(temp)
if(length(levels(temp)) == 1) {
if(levels(temp) == "F" || levels(temp)=="f" ||
toupper(levels(temp)) == "FEMALE") sex <- rep(0,nind(cross))
else if(levels(temp) == "M" || levels(temp)=="m" ||
toupper(levels(temp)) == "MALE") sex <- rep(1,nind(cross))
else
warning("Sex column should be coded as 0=female 1=male; sex ignored.")
}
else if(length(levels(temp)) > 2) {
warning("Sex column should be coded as a two-level factor; sex ignored.")
sex <- NULL
}
else { # is a factor with two levels
lev <- levels(temp)
if(length(grep("^[Ff]",lev))>0 &&
length(males <- grep("^[Mm]",lev))>0) {
temp <- as.character(temp)
sex <- rep(0,length(temp))
sex[is.na(temp)] <- NA
sex[!is.na(temp) & temp==lev[males]] <- 1
}
else
warning("Don't understand levels in sex column; sex ignored.")
}
}
}
if(length(pgm.column)==0) { # no pgm included
pgm <- NULL
}
else {
if(length(pgm.column)>1)
warning("'pgm' included multiple times. Using the first one.")
temp <- cross$pheno[,pgm.column[1]]
if(!is.numeric(temp))
temp <- as.numeric(temp)-1
if(any(!is.na(temp) & temp != 0 & temp != 1)) {
warning("pgm column should be coded as 0/1; pgm ignored.")
pgm <- NULL
}
else pgm <- temp
}
if(!is.null(sex) && any(is.na(sex))) {
if(all(sex[!is.na(sex)]==1)) {
warning(sum(is.na(sex)), " individuals with missing sex; assuming they're male like the others")
sex[is.na(sex)] <- 1
}
else if(all(sex[!is.na(sex)]==0)) {
warning(sum(is.na(sex)), " individuals with missing sex; assuming they're female like the others")
sex[is.na(sex)] <- 0
}
else {
warning(sum(is.na(sex)), " individuals with missing sex; assuming they're female")
sex[is.na(sex)] <- 0
}
}
if(!is.null(pgm) && any(is.na(pgm))) {
if(all(pgm[!is.na(pgm)]==1)) {
warning(sum(is.na(pgm)), " individuals with missing pgm; assuming pgm==1 like the others")
pgm[is.na(pgm)] <- 1
}
else if(all(pgm[!is.na(pgm)]==0)) {
warning(sum(is.na(pgm)), " individuals with missing pgm; assuming pgm==0 like the others")
pgm[is.na(pgm)] <- 0
}
else {
warning(sum(is.na(pgm)), " individuals with missing pgm; assuming pgm==0")
pgm[is.na(pgm)] <- 0
}
}
list(sex=sex,pgm=pgm)
}
# get names of genotypes
# used in discan, effectplot, plot.pxg, scanone, scantwo, vbscan, reviseXdata
# cross.attr gives the cross attributes
getgenonames <-
function(type=c("f2","bc","riself","risib","4way","dh","special"),
chrtype=c("A","X"), expandX=c("simple","standard","full"),
sexpgm, cross.attr)
{
type <- match.arg(type)
chrtype <- match.arg(chrtype)
expandX <- match.arg(expandX)
if(chrtype=="X") {
sex <- sexpgm$sex
pgm <- sexpgm$pgm
}
if(type=="special") return(cross.attr$genotypes)
if(missing(cross.attr) || !("alleles" %in% names(cross.attr))) {
if(type == "4way") alleles <- LETTERS[1:4]
else alleles <- LETTERS[1:2]
}
else
alleles <- cross.attr$alleles
tempgn <- c(paste(rep(alleles[1],2),collapse=""),
paste(alleles,collapse=""),
paste(rep(alleles[2],2),collapse=""),
paste(alleles[1],"Y",sep=""),
paste(alleles[2],"Y",sep=""))
# get rid of missing sex and pgm values, if there are any
if(chrtype=="X") {
if(length(sex)>0) sex <- sex[!is.na(sex)]
if(length(pgm)>0) pgm <- pgm[!is.na(pgm)]
}
if(type=="riself" || type=="risib" || type=="dh")
gen.names <- tempgn[c(1,3)]
else if(type == "4way") {
if(chrtype=="A")
gen.names <- c(paste(alleles[1],alleles[3],sep=""),
paste(alleles[2],alleles[3],sep=""),
paste(alleles[1],alleles[4],sep=""),
paste(alleles[2],alleles[4],sep=""))
else
gen.names <- c(paste(alleles[1],alleles[3],sep=""),
paste(alleles[2],alleles[3],sep=""),
paste(alleles[1],"Y",sep=""),
paste(alleles[2],"Y",sep=""))
}
else if(type == "bc") {
if(chrtype=="A") # autosome
gen.names <- tempgn[1:2] # AA AB
else { # X chromosome
# simple standard full
# -both sexes A-/AB/BY AA/AB/AY/BY same as std
# -all females AA/AB same same
# -all males AY/BY same same
if(length(sex)==0 || all(sex==0)) # all females
gen.names <- tempgn[1:2] # AA AB
else if(all(sex==1)) # all males
gen.names <- tempgn[4:5] # AY BY
else { # some of each
if(expandX == "simple")
gen.names <- c(paste(alleles[1], "-", sep=""),
tempgn[c(2,5)]) # A-, AB, BY
else gen.names <- tempgn[c(1,2,4,5)] # AA,AB,AY,BY
}
}
}
else { # intercross
if(chrtype == "A") # autosomal
gen.names <- tempgn[1:3]
else { # X chromsome
# both crosses simple standard full
# -both sexes A-/AB/B- AA/AB/BB/AY/BY AA/AB1/AB2/BB/AY/BY
# -all females AA/AB/BB same as simple AA/AB1/AB2/BB
# -all males AY/BY same same
# forw cross
# -both sexes A-/AB/BY AA/AB/AY/BY same as std
# -all females AA/AB same same
# -all males AY/BY same same
# backw cross
# -both sexes B-/AB/AY BB/AB/AY/BY same as std
# -all females BB/AB same same
# -all males AY/BY same same
if(length(sex)==0 || all(sex==0)) { # all females
if(length(pgm)==0 || all(pgm==0)) # all forw dir
gen.names <- tempgn[1:2] # AA AB
else if(all(pgm==1)) # all backw dir
gen.names <- tempgn[3:2] # BB AB
else { # some of each direction
if(expandX=="full")
gen.names <- c(tempgn[1],
paste(tempgn[2],c("f","r"), sep=""),
tempgn[3])
else gen.names <- tempgn[1:3]
}
}
else if(all(sex==1)) # all males
gen.names <- tempgn[4:5]
else { # some of each sex
if(length(pgm)==0 || all(pgm==0)) { # all forw
if(expandX=="simple")
gen.names <- c(paste(alleles[1],"-", sep=""),
tempgn[c(2,5)])
else gen.names <- tempgn[c(1,2,4,5)]
}
else if (all(pgm==1)) { # all backw
if(expandX=="simple")
gen.names <- c(paste(alleles[2], "-",sep=""),
tempgn[c(2,4)])
else gen.names <- tempgn[c(3,2,4,5)]
}
else { # some of each dir
if(expandX=="simple")
gen.names <- c(paste(alleles[1],"-",sep=""),
tempgn[2],
paste(alleles[2],"-",sep=""))
else if(expandX=="standard")
gen.names <- tempgn
else
gen.names <- c(tempgn[1],
paste(tempgn[2],c("f","r"),sep=""),
tempgn[3:5])
}
}
}
}
gen.names
}
# revise genotype data, probabilities or imputations for the X chromosome
reviseXdata <-
function(type=c("f2","bc"), expandX=c("simple","standard","full"),
sexpgm, geno, prob, draws, pairprob, cross.attr, force=FALSE)
{
type <- match.arg(type)
expandX <- match.arg(expandX)
sex <- sexpgm$sex
pgm <- sexpgm$pgm
notmissing <- (!missing(geno)) + (!missing(prob)) + (!missing(draws)) +
(!missing(pairprob))
if(notmissing == 0)
stop("Provide one of geno, prob, draws, pairprob.")
if(notmissing > 1)
stop("Provide just one of geno, prob, draws, pairprob.")
# get genonames
genonames <- getgenonames(type, "X", expandX, sexpgm, cross.attr)
if(type == "bc") { # backcross
if(length(sex)==0 || ((all(sex==0) || all(sex==1)) && !force)) { # all one sex
# no changes necessary
if(!missing(geno)) return(geno)
else if(!missing(prob)) {
dimnames(prob)[[3]] <- genonames
return(prob)
}
else if(!missing(draws))
return(draws)
else # pairprob
return(pairprob)
}
else { # both sexes
if(!missing(geno)) {
gmale <- geno[sex==1,]
if(expandX=="simple")
gmale[!is.na(gmale) & gmale==2] <- 3
else {
gmale[!is.na(gmale) & gmale==1] <- 3
gmale[!is.na(gmale) & gmale==2] <- 4
}
geno[sex==1,] <- gmale
return(geno)
}
else if(!missing(draws)) {
gmale <- draws[sex==1,,]
if(expandX=="simple")
gmale[gmale==2] <- 3
else {
gmale[gmale==1] <- 3
gmale[gmale==2] <- 4
}
draws[sex==1,,] <- gmale
return(draws)
}
else if(!missing(prob)) {
dimprob <- dim(prob)
dimprob[3] <- length(genonames)
newprob <- array(0,dim=dimprob)
dimnames(newprob) <- c(dimnames(prob)[1:2],list(genonames))
newprob[sex==0,,1:2] <- prob[sex==0,,1:2]
if(expandX=="simple") {
newprob[sex==1,,1] <- prob[sex==1,,1]
newprob[sex==1,,3] <- prob[sex==1,,2]
}
else {
newprob[sex==1,,3] <- prob[sex==1,,1]
newprob[sex==1,,4] <- prob[sex==1,,2]
}
return(newprob)
}
else { # pairprob
dimpairprob <- dim(pairprob)
dimpairprob[3] <- dimpairprob[4] <- length(genonames)
newpairprob <- array(0,dim=dimpairprob)
newpairprob[sex==0,,1:2,1:2] <- pairprob[sex==0,,,]
if(expandX=="simple") {
newpairprob[sex==1,,1,1] <- pairprob[sex==1,,1,1]
newpairprob[sex==1,,1,3] <- pairprob[sex==1,,1,2]
newpairprob[sex==1,,3,1] <- pairprob[sex==1,,2,1]
newpairprob[sex==1,,3,3] <- pairprob[sex==1,,2,2]
}
else {
newpairprob[sex==1,,3,3] <- pairprob[sex==1,,1,1]
newpairprob[sex==1,,3,4] <- pairprob[sex==1,,1,2]
newpairprob[sex==1,,4,3] <- pairprob[sex==1,,2,1]
newpairprob[sex==1,,4,4] <- pairprob[sex==1,,2,2]
}
return(newpairprob)
}
} # end of "both sexes" / backcross
} # end of backcross
else { # intercross
if(length(sex)==0 || all(sex==0)) { # all females
if(length(pgm)==0 || ((all(pgm==0) || all(pgm==1)) && !force)) { # one dir, females
if(!missing(geno)) return(geno)
else if(!missing(draws)) return(draws)
else if(!missing(pairprob)) return(pairprob)
else {
dimnames(prob)[[3]] <- genonames
return(prob)
}
}
else { # both dir, females
if(!missing(geno)) {
gback <- geno[pgm==1,]
if(expandX!="full") {
gback[!is.na(gback) & gback==1] <- 3
geno[pgm==1,] <- gback
}
else {
gback[!is.na(gback) & gback==1] <- 4
gback[!is.na(gback) & gback==2] <- 3
geno[pgm==1,] <- gback
}
return(geno)
}
else if(!missing(draws)) {
gback <- draws[pgm==1,,]
if(expandX!="full") {
gback[!is.na(gback) & gback==1] <- 3
}
else {
gback[!is.na(gback) & gback==1] <- 4
gback[!is.na(gback) & gback==2] <- 3
}
draws[pgm==1,,] <- gback
return(draws)
}
else if(!missing(prob)) {
dimprob <- dim(prob)
dimprob[3] <- length(genonames)
newprob <- array(0,dim=dimprob)
dimnames(newprob) <- c(dimnames(prob)[1:2],list(genonames))
newprob[pgm==0,,1:2] <- prob[pgm==0,,1:2]
if(expandX!="full") { # simple/standard
newprob[pgm==1,,3] <- prob[pgm==1,,1]
newprob[pgm==1,,2] <- prob[pgm==1,,2]
}
else {
newprob[pgm==1,,4] <- prob[pgm==1,,1]
newprob[pgm==1,,3] <- prob[pgm==1,,2]
}
return(newprob)
}
else { # pairprob
dimpairprob <- dim(pairprob)
dimpairprob[3] <- dimpairprob[4] <- length(genonames)
newpairprob <- array(0,dim=dimpairprob)
newpairprob[pgm==0,,1:2,1:2] <- pairprob[pgm==0,,,]
if(expandX!="full") { # simple/standard
newpairprob[pgm==1,,3,3] <- pairprob[pgm==1,,1,1]
newpairprob[pgm==1,,3,2] <- pairprob[pgm==1,,1,2]
newpairprob[pgm==1,,2,3] <- pairprob[pgm==1,,2,1]
newpairprob[pgm==1,,2,2] <- pairprob[pgm==1,,2,2]
}
else {
newpairprob[pgm==1,,4,4] <- pairprob[pgm==1,,1,1]
newpairprob[pgm==1,,4,3] <- pairprob[pgm==1,,1,2]
newpairprob[pgm==1,,3,4] <- pairprob[pgm==1,,2,1]
newpairprob[pgm==1,,3,3] <- pairprob[pgm==1,,2,2]
}
return(newpairprob)
}
}
}
else if(all(sex==1) && !force) { # all males
if(!missing(geno)) return(geno)
else if(!missing(draws)) return(draws)
else if(!missing(pairprob)) return(pairprob)
else {
dimnames(prob)[[3]] <- genonames
return(prob)
}
}
else { # both sexes
if(length(pgm)==0 || all(pgm==0)) { # both sexes, forw dir
if(!missing(geno)) {
gmale <- geno[sex==1,]
if(expandX=="simple")
gmale[!is.na(gmale) & gmale==2] <- 3
else {
gmale[!is.na(gmale) & gmale==1] <- 3
gmale[!is.na(gmale) & gmale==2] <- 4
}
geno[sex==1,] <- gmale
return(geno)
}
else if(!missing(draws)) {
gmale <- draws[sex==1,,]
if(expandX=="simple")
gmale[gmale==2] <- 3
else {
gmale[gmale==1] <- 3
gmale[gmale==2] <- 4
}
draws[sex==1,,] <- gmale
return(draws)
}
else if(!missing(prob)) {
dimprob <- dim(prob)
dimprob[3] <- length(genonames)
newprob <- array(0,dim=dimprob)
dimnames(newprob) <- c(dimnames(prob)[1:2],list(genonames))
newprob[sex==0,,1:2] <- prob[sex==0,,1:2]
if(expandX=="simple") {
newprob[sex==1,,1] <- prob[sex==1,,1]
newprob[sex==1,,3] <- prob[sex==1,,2]
}
else {
newprob[sex==1,,3] <- prob[sex==1,,1]
newprob[sex==1,,4] <- prob[sex==1,,2]
}
return(newprob)
}
else { # pairprob
dimpairprob <- dim(pairprob)
dimpairprob[3] <- dimpairprob[4] <- length(genonames)
newpairprob <- array(0,dim=dimpairprob)
newpairprob[sex==0,,1:2,1:2] <- pairprob[sex==0,,,]
if(expandX=="simple") {
newpairprob[sex==1,,1,1] <- pairprob[sex==1,,1,1]
newpairprob[sex==1,,1,3] <- pairprob[sex==1,,1,2]
newpairprob[sex==1,,3,1] <- pairprob[sex==1,,2,1]
newpairprob[sex==1,,3,3] <- pairprob[sex==1,,2,2]
}
else {
newpairprob[sex==1,,3,3] <- pairprob[sex==1,,1,1]
newpairprob[sex==1,,3,4] <- pairprob[sex==1,,1,2]
newpairprob[sex==1,,4,3] <- pairprob[sex==1,,2,1]
newpairprob[sex==1,,4,4] <- pairprob[sex==1,,2,2]
}
return(newpairprob)
}
} # both sexes, forw dir
if(all(pgm==1) && !force) { # both sexes, backw dir
if(!missing(geno)) {
gmale <- geno[sex==1,]
if(expandX!="full") {
gmale[!is.na(gmale) & gmale==1] <- 3
gmale[!is.na(gmale) & gmale==2] <- 1
}
else {
gmale[!is.na(gmale) & gmale==1] <- 3
gmale[!is.na(gmale) & gmale==2] <- 4
}
geno[sex==1,] <- gmale
return(geno)
}
else if(!missing(draws)) {
gmale <- draws[sex==1,,]
if(expandX!="full") {
gmale[gmale==1] <- 3
gmale[gmale==2] <- 1
}
else {
gmale[gmale==1] <- 3
gmale[gmale==2] <- 4
}
draws[sex==1,,] <- gmale
return(draws)
}
else if(!missing(prob)) {
dimprob <- dim(prob)
dimprob[3] <- length(genonames)
newprob <- array(0,dim=dimprob)
dimnames(newprob) <- c(dimnames(prob)[1:2],list(genonames))
newprob[sex==0,,1:2] <- prob[sex==0,,1:2]
if(expandX=="simple") {
newprob[sex==1,,3] <- prob[sex==1,,1]
newprob[sex==1,,1] <- prob[sex==1,,2]
}
else {
newprob[sex==1,,3] <- prob[sex==1,,1]
newprob[sex==1,,4] <- prob[sex==1,,2]
}
return(newprob)
}
else { # pairprob
dimpairprob <- dim(pairprob)
dimpairprob[3] <- dimpairprob[4] <- length(genonames)
newpairprob <- array(0,dim=dimpairprob)
newpairprob[sex==0,,1:2,1:2] <- pairprob[sex==0,,,]
if(expandX=="simple") {
newpairprob[sex==1,,3,3] <- pairprob[sex==1,,1,1]
newpairprob[sex==1,,1,3] <- pairprob[sex==1,,2,1]
newpairprob[sex==1,,3,1] <- pairprob[sex==1,,1,2]
newpairprob[sex==1,,1,1] <- pairprob[sex==1,,2,2]
}
else {
newpairprob[sex==1,,3,3] <- pairprob[sex==1,,1,1]
newpairprob[sex==1,,3,4] <- pairprob[sex==1,,1,2]
newpairprob[sex==1,,4,3] <- pairprob[sex==1,,2,1]
newpairprob[sex==1,,4,4] <- pairprob[sex==1,,2,2]
}
return(newpairprob)
}
} # both sexes, backw dir
else { # both dir, both sexes
if(!missing(geno)) {
gmale <- geno[sex==1,]
gfemaler <- geno[sex==0 & pgm==1,]
if(expandX=="simple") {
gmale[!is.na(gmale) & gmale==2] <- 3
gfemaler[!is.na(gfemaler) & gfemaler==1] <- 3
}
else if(expandX=="standard") {
gmale[!is.na(gmale) & gmale==1] <- 4
gmale[!is.na(gmale) & gmale==2] <- 5
gfemaler[!is.na(gfemaler) & gfemaler==1] <- 3
}
else {
gmale[!is.na(gmale) & gmale==1] <- 5
gmale[!is.na(gmale) & gmale==2] <- 6
gfemaler[!is.na(gfemaler) & gfemaler==1] <- 4
gfemaler[!is.na(gfemaler) & gfemaler==2] <- 3
}
geno[sex==1,] <- gmale
geno[sex==0 & pgm==1,] <- gfemaler
return(geno)
}
else if(!missing(draws)) {
gmale <- draws[sex==1,,]
gfemaler <- draws[sex==0 & pgm==1,,]
if(expandX=="simple") {
gmale[gmale==2] <- 3
gfemaler[gfemaler==1] <- 3
}
else if(expandX=="standard") {
gmale[gmale==1] <- 4
gmale[gmale==2] <- 5
gfemaler[gfemaler==1] <- 3
}
else {
gmale[gmale==1] <- 5
gmale[gmale==2] <- 6
gfemaler[gfemaler==1] <- 4
gfemaler[gfemaler==2] <- 3
}
draws[sex==1,,] <- gmale
draws[sex==0 & pgm==1,,] <- gfemaler
return(draws)
}
else if(!missing(prob)) {
dimprob <- dim(prob)
dimprob[3] <- length(genonames)
newprob <- array(0,dim=dimprob)
dimnames(newprob) <- c(dimnames(prob)[1:2],list(genonames))
newprob[sex==0 & pgm==0,,1:2] <- prob[sex==0 & pgm==0,,1:2]
if(expandX=="simple") {
newprob[sex==1,,1] <- prob[sex==1,,1]
newprob[sex==1,,3] <- prob[sex==1,,2]
newprob[sex==0 & pgm==1,,3] <- prob[sex==0 & pgm==1,,1]
newprob[sex==0 & pgm==1,,2] <- prob[sex==0 & pgm==1,,2]
}
else if(expandX=="standard") {
newprob[sex==1,,4] <- prob[sex==1,,1]
newprob[sex==1,,5] <- prob[sex==1,,2]
newprob[sex==0 & pgm==1,,3] <- prob[sex==0 & pgm==1,,1]
newprob[sex==0 & pgm==1,,2] <- prob[sex==0 & pgm==1,,2]
}
else {
newprob[sex==1,,5] <- prob[sex==1,,1]
newprob[sex==1,,6] <- prob[sex==1,,2]
newprob[sex==0 & pgm==1,,4] <- prob[sex==0 & pgm==1,,1]
newprob[sex==0 & pgm==1,,3] <- prob[sex==0 & pgm==1,,2]
}
return(newprob)
}
else { # pairprob
dimpairprob <- dim(pairprob)
dimpairprob[3] <- dimpairprob[4] <- length(genonames)
newpairprob <- array(0,dim=dimpairprob)
newpairprob[sex==0 & pgm==0,,1:2,1:2] <- pairprob[sex==0 & pgm==0,,,]
male <- (sex==1)
femaler <- (sex==0) & (pgm==1)
if(expandX=="simple") {
newpairprob[male,,1,1] <- pairprob[male,,1,1]
newpairprob[male,,1,3] <- pairprob[male,,1,2]
newpairprob[male,,3,1] <- pairprob[male,,2,1]
newpairprob[male,,3,3] <- pairprob[male,,2,2]
newpairprob[femaler,,3,3] <- pairprob[femaler,,1,1]
newpairprob[femaler,,3,2] <- pairprob[femaler,,1,2]
newpairprob[femaler,,2,3] <- pairprob[femaler,,2,1]
newpairprob[femaler,,2,2] <- pairprob[femaler,,2,2]
}
else if(expandX=="standard") {
newpairprob[male,,4,4] <- pairprob[male,,1,1]
newpairprob[male,,4,5] <- pairprob[male,,1,2]
newpairprob[male,,5,4] <- pairprob[male,,2,1]
newpairprob[male,,5,5] <- pairprob[male,,2,2]
newpairprob[femaler,,3,3] <- pairprob[femaler,,1,1]
newpairprob[femaler,,3,2] <- pairprob[femaler,,1,2]
newpairprob[femaler,,2,3] <- pairprob[femaler,,2,1]
newpairprob[femaler,,2,2] <- pairprob[femaler,,2,2]
}
else {
newpairprob[male,,5,5] <- pairprob[male,,1,1]
newpairprob[male,,5,6] <- pairprob[male,,1,2]
newpairprob[male,,6,5] <- pairprob[male,,2,1]
newpairprob[male,,6,6] <- pairprob[male,,2,2]
newpairprob[femaler,,4,4] <- pairprob[femaler,,1,1]
newpairprob[femaler,,4,3] <- pairprob[femaler,,1,2]
newpairprob[femaler,,3,4] <- pairprob[femaler,,2,1]
newpairprob[femaler,,3,3] <- pairprob[femaler,,2,2]
}
return(newpairprob)
}
}
}
} # end of intercross
}
######################################################################
# scanoneXnull
#
# figure out null hypothesis business for scanone on X chromosome
######################################################################
scanoneXnull <-
function(type, sexpgm)
{
sex <- sexpgm$sex
pgm <- sexpgm$pgm
if(type == "risib" || type=="riself" || type=="dh") type <- "bc"
### first figure out sex/pgm pattern
# sex
if(length(sex)==0 || all(sex==0)) { # all female
onesex <- allfemale <- TRUE
}
else if(all(sex==1)) { # all male
onesex <- TRUE
allfemale <- FALSE
}
else { # both sexes
onesex <- allfemale <- FALSE
}
# pgm
if(length(pgm)==0 || all(pgm==0) || all(pgm==1)) # one direction
onedir <- TRUE
else onedir <- FALSE
allmale <- onesex && !allfemale
bothsex <- !onesex
bothdir <- !onedir
### now figure out the null hypothesis and pull out appropriate
### covariates for the null
# backcross, one sex
# OR intercross, one dir and one sex
# OR intercross, both dir and all male
if((type=="bc" && onesex) ||
(type=="f2" && ((onedir && onesex) || (bothdir && allmale)))) {
adjustX <- FALSE
parX0 <- 1
sexpgmcovar <- sexpgmcovar.alt <- NULL
}
# backcross, both sexes
# OR intercross, one direction and both sexes
else if((type=="bc" && bothsex) ||
(type=="f2" && onedir && bothsex)) {
adjustX <- TRUE
parX0 <- 2
sexpgmcovar <- cbind(sex)
sexpgmcovar.alt <- sex+1
}
# intercross, both dir and all female
else if(type=="f2" && bothdir && allfemale) {
adjustX <- TRUE
parX0 <- 2
sexpgmcovar <- cbind(pgm)
sexpgmcovar.alt <- pgm+1
}
# intercross, both dir and both sexes
else {
adjustX <- TRUE
parX0 <- 3
sexpgmcovar <- cbind(sex,as.numeric(sex==0 & pgm==1))
sexpgmcovar.alt <- rep(3,length(sex))
sexpgmcovar.alt[sex==0 & pgm==0] <- 1
sexpgmcovar.alt[sex==0 & pgm==1] <- 2
}
list(adjustX=adjustX, parX0=parX0, sexpgmcovar=sexpgmcovar,
sexpgmcovar.alt=sexpgmcovar.alt)
}
######################################################################
# revisecovar
#
# Drop sex and pgm and their interxn as covariates for the X chr.
######################################################################
revisecovar <-
function(sexpgm, covar)
{
if(is.null(covar) || (is.null(sexpgm$sex) && is.null(sexpgm$pgm))) {
if(!is.null(covar)) attr(covar, "n.dropped") <- 0
return(covar)
}
covar <- as.matrix(covar)
sex <- sexpgm$sex
pgm <- sexpgm$pgm
if(!is.null(pgm) && length(unique(pgm))==1) pgm <- NULL
allfemale <- FALSE
if(is.null(sex)) allfemale <- TRUE
else {
if(all(sex==0)) {
allfemale <- TRUE
sex <- NULL
}
else if(all(sex==1)) {
allfemale <- FALSE
sex <- NULL
}
}
if(!is.null(pgm)) { # some of each direction
if(!is.null(sex)) { # some of each sex
femf <- as.numeric(pgm==0 & sex==0)
femr <- as.numeric(pgm==1 & sex==0)
mal <- sex
X <- cbind(femf, femr, mal)
}
else { # all of one sex
if(allfemale)
X <- cbind(1-pgm, pgm)
else
X <- cbind(rep(1, nrow(covar)))
}
}
else { # all of one direction
if(!is.null(sex)) # some of each sex
X <- cbind(sex, 1-sex)
else X <- cbind(rep(1, nrow(covar)))
}
nc <- ncol(X)
keep <- rep(TRUE,ncol(covar))
for(i in 1:ncol(covar)) {
if(qr(cbind(X,covar[,i]))$rank <= nc)
keep[i] <- FALSE
}
if(!any(keep))
covar <- numeric(0)
else
covar <- covar[,keep,drop=FALSE]
attr(covar, "n.dropped") <- sum(!keep)
covar
}
######################################################################
# dropXcol: for use with scantwo() for the X chromosome:
# figure out what columns to drop...both for the full model
# and for the additive model.
######################################################################
dropXcol <-
function(type=c("f2","bc"), sexpgm, cross.attr)
{
type <- match.arg(type)
gn <- getgenonames(type, "X", "full", sexpgm, cross.attr)
if(length(gn)==2) return(rep(0,4))
if(length(gn)==4) return( c(0,0,0,0,0,1,0, 0,1,1,1,1,1,1,1,0) )
if(length(gn)==6) {
todrop <- c(rep(0,11), rep(1,25))
todrop[c(8,10)] <- 1
todrop[11+c(1,13,25)] <- 0
return(todrop)
}
return(rep(0,length(gn)^2))
}
# end of xchr.R
|
#' Shipments
#'
#' Shipments
#'
#'
#' @format Time series data
#' @source Makridakis, Wheelwright and Hyndman (1998) \emph{Forecasting:
#' methods and applications}, John Wiley & Sons: New York. Exercise 3.1
#' @keywords datasets
#' @examples
#' plot(shipex)
#' @export
shipex <- stats::ts(c(42, 69, 100, 115, 132, 141, 154, 171, 180, 204,
228, 247, 291, 337, 391),s=1,f=1)
| /timeseries/R/shipex.R | no_license | somnath1077/ML | R | false | false | 390 | r |
#' Shipments
#'
#' Shipments
#'
#'
#' @format Time series data
#' @source Makridakis, Wheelwright and Hyndman (1998) \emph{Forecasting:
#' methods and applications}, John Wiley & Sons: New York. Exercise 3.1
#' @keywords datasets
#' @examples
#' plot(shipex)
#' @export
shipex <- stats::ts(c(42, 69, 100, 115, 132, 141, 154, 171, 180, 204,
228, 247, 291, 337, 391),s=1,f=1)
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/credentials.R
\name{saveCredentials}
\alias{credentialsExist}
\alias{removeCredentials}
\alias{saveCredentials}
\title{manage database credentials}
\usage{
saveCredentials(user, pwd, host, db, path)
}
\value{
\code{credentialsExist},\code{removeCredentials}, and \code{saveCredentials} return \code{TRUE} if successful
}
\description{
manage database credentials for easier database access
}
\details{
saveCredentials saves the specified credentials to a default location unless you specify a custom path. This information can be used by \code{\link{dbcon}} and \code{\link{dbq}} to connect to and query the database.
Currently, you can store credentials for different hosts and for different users within a host.
\code{removeCredentials} removes the credentials file.
credentialsExist checks if credentials are saved for a specified host.
}
\section{Warning}{
Credentials are stored in plain text in a hidden file in your home directory. Passwords are saved obfuscated. The obfuscation is settings dependent so it is not possible to use the same credentials file on another machine.
}
\examples{
saveCredentials(user = 'user_a', pwd = 'pwd_a', host = 'localhost')
saveCredentials('user_b', 'pass_b', host = 'localhost')
removeCredentials()
}
\seealso{
\code{\link{dbcon}},\code{\link{dbq}}
}
| /man/saveCredentials.Rd | no_license | alrutten/sdb | R | false | false | 1,381 | rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/credentials.R
\name{saveCredentials}
\alias{credentialsExist}
\alias{removeCredentials}
\alias{saveCredentials}
\title{manage database credentials}
\usage{
saveCredentials(user, pwd, host, db, path)
}
\value{
\code{credentialsExist},\code{removeCredentials}, and \code{saveCredentials} return \code{TRUE} if successful
}
\description{
manage database credentials for easier database access
}
\details{
saveCredentials saves the specified credentials to a default location unless you specify a custom path. This information can be used by \code{\link{dbcon}} and \code{\link{dbq}} to connect to and query the database.
Currently, you can store credentials for different hosts and for different users within a host.
\code{removeCredentials} removes the credentials file.
credentialsExist checks if credentials are saved for a specified host.
}
\section{Warning}{
Credentials are stored in plain text in a hidden file in your home directory. Passwords are saved obfuscated. The obfuscation is settings dependent so it is not possible to use the same credentials file on another machine.
}
\examples{
saveCredentials(user = 'user_a', pwd = 'pwd_a', host = 'localhost')
saveCredentials('user_b', 'pass_b', host = 'localhost')
removeCredentials()
}
\seealso{
\code{\link{dbcon}},\code{\link{dbq}}
}
|
library(dplyr)
for (k in unique(datos_resumidos$ESTADO)){
datos_resumidos_edo[[k]]<-filter(datos_resumidos,ESTADO==k)
x<-which(datos_resumidos_edo[[k]]$FECHA_DEF=="9999-99-99")
datos_resumidos_edo[[k]]$Def<-0
datos_resumidos_edo[[k]]$Def[-x]<-1
reg_diarios_positivos_edo[[k]]<-filter(datos_resumidos_edo[[k]],RESULTADO==1)
reg_diarios_decesos_edo[[k]]<-filter(reg_diarios_positivos_edo[[k]],Def==1)
por_fechas<-group_by(reg_diarios_decesos_edo[[k]],Dia_registro,FECHA_DEF)
decesos_registrados_edo[[k]] <- summarise(por_fechas,n())
names(decesos_registrados_edo[[k]])[3]<-"Decesos_contados"
decesos_registrados_edo[[k]]$desfase<-
decesos_registrados_edo[[k]]$Dia_registro-as.integer(as.Date(decesos_registrados_edo[[k]]$FECHA_DEF)-
as.Date("2020-04-11"))
decesos_registrados_edo[[k]]<-filter(decesos_registrados_edo[[k]],as.Date(FECHA_DEF)>"2020-03-17")
decesos_registrados_edo[[k]]$Dia_Def<-
as.numeric(as.Date(decesos_registrados_edo[[k]]$FECHA_DEF)-as.Date("2020-03-17"))
}
# duno<-21
# diferencias<-numeric()
#
# for (k in unique(decesos_registrados$Dia_Def)){
# tempo <- filter(decesos_registrados,Dia_Def==k)$Decesos_contados
# if (length(tempo)>duno){
# diferencias<-append(diferencias,(tempo[duno+1]-tempo[duno])/tempo[duno])
# }
#
# }
# print(summary(diferencias))
#
# decesos_estados<-list()
# for (k in unique(datos_resumidos$ESTADO)){
# decesos_estados[[k]]<-filter(reg_diarios_decesos,ESTADO==k)
#
# }
#rm(x,tempo,duno) | /hacer_registros_por_dia_estados.R | permissive | plumeriopipichas/Covid_MX | R | false | false | 1,645 | r | library(dplyr)
for (k in unique(datos_resumidos$ESTADO)){
datos_resumidos_edo[[k]]<-filter(datos_resumidos,ESTADO==k)
x<-which(datos_resumidos_edo[[k]]$FECHA_DEF=="9999-99-99")
datos_resumidos_edo[[k]]$Def<-0
datos_resumidos_edo[[k]]$Def[-x]<-1
reg_diarios_positivos_edo[[k]]<-filter(datos_resumidos_edo[[k]],RESULTADO==1)
reg_diarios_decesos_edo[[k]]<-filter(reg_diarios_positivos_edo[[k]],Def==1)
por_fechas<-group_by(reg_diarios_decesos_edo[[k]],Dia_registro,FECHA_DEF)
decesos_registrados_edo[[k]] <- summarise(por_fechas,n())
names(decesos_registrados_edo[[k]])[3]<-"Decesos_contados"
decesos_registrados_edo[[k]]$desfase<-
decesos_registrados_edo[[k]]$Dia_registro-as.integer(as.Date(decesos_registrados_edo[[k]]$FECHA_DEF)-
as.Date("2020-04-11"))
decesos_registrados_edo[[k]]<-filter(decesos_registrados_edo[[k]],as.Date(FECHA_DEF)>"2020-03-17")
decesos_registrados_edo[[k]]$Dia_Def<-
as.numeric(as.Date(decesos_registrados_edo[[k]]$FECHA_DEF)-as.Date("2020-03-17"))
}
# duno<-21
# diferencias<-numeric()
#
# for (k in unique(decesos_registrados$Dia_Def)){
# tempo <- filter(decesos_registrados,Dia_Def==k)$Decesos_contados
# if (length(tempo)>duno){
# diferencias<-append(diferencias,(tempo[duno+1]-tempo[duno])/tempo[duno])
# }
#
# }
# print(summary(diferencias))
#
# decesos_estados<-list()
# for (k in unique(datos_resumidos$ESTADO)){
# decesos_estados[[k]]<-filter(reg_diarios_decesos,ESTADO==k)
#
# }
#rm(x,tempo,duno) |
#############################################################
# Example Query : Gridded fishing effort
# Description:
# This query demonstrates how to extract valid positions and
# calculate fishing hours.
##############################################################
##############################################################
# Sub-query 1: Identify only good segments for vessel positions
# Sub-query 2: Identify "fishing" positions using neural net score
# Sub-query 3: Calculate fishing hours using neural net score/hours
# Aggregate hours and fishing hours to a 0.1 degree grid
##############################################################
## Setup
library(bigrquery)
billing_project <- "proj_code" # Update with project code
## Define the query
sql <-"
###standardSQL
WITH
######################################
#Subquery1
good_segments AS (
SELECT
seg_id
FROM
`gfw_research.pipe_v20190502_segs`
WHERE
good_seg
AND positions > 10
AND NOT overlapping_and_short),
######################################
#Subquery2
fishing AS (
SELECT
ssvid,
FLOOR(lat * 10) as lat_bin,
FLOOR(lon * 10) as lon_bin,
EXTRACT(date FROM date) as date,
hours,
IF(nnet_score2 > 0.5, hours, 0) as fishing_hours
FROM
`gfw_resesarch.pipe_v20190502_fishing`
WHERE date = '2018-11-20'
AND seg_id IN (
SELECT
seg_id
FROM
good_segments)),
######################################
#Subquery3
fishing_binned AS (
SELECT
date,
lat_bin / 10 as lat_bin
lon_bin / 10 as lon_bin,
SUM(hours) as hours,
SUM(fishing_hours) as fishing_hours
FROM fishing
GROUP BY date, lat_bin, lon_bin)
######################################
SELECT
*
FROM
fishing_binned
"
## Run the query
#gridded_effort <- bq_project_query(billing_project, sql) | /practice_scripts/fishing_hours_gridded.R | no_license | saraorofino/gfw-sql-practice | R | false | false | 1,900 | r | #############################################################
# Example Query : Gridded fishing effort
# Description:
# This query demonstrates how to extract valid positions and
# calculate fishing hours.
##############################################################
##############################################################
# Sub-query 1: Identify only good segments for vessel positions
# Sub-query 2: Identify "fishing" positions using neural net score
# Sub-query 3: Calculate fishing hours using neural net score/hours
# Aggregate hours and fishing hours to a 0.1 degree grid
##############################################################
## Setup
library(bigrquery)
billing_project <- "proj_code" # Update with project code
## Define the query
sql <-"
###standardSQL
WITH
######################################
#Subquery1
good_segments AS (
SELECT
seg_id
FROM
`gfw_research.pipe_v20190502_segs`
WHERE
good_seg
AND positions > 10
AND NOT overlapping_and_short),
######################################
#Subquery2
fishing AS (
SELECT
ssvid,
FLOOR(lat * 10) as lat_bin,
FLOOR(lon * 10) as lon_bin,
EXTRACT(date FROM date) as date,
hours,
IF(nnet_score2 > 0.5, hours, 0) as fishing_hours
FROM
`gfw_resesarch.pipe_v20190502_fishing`
WHERE date = '2018-11-20'
AND seg_id IN (
SELECT
seg_id
FROM
good_segments)),
######################################
#Subquery3
fishing_binned AS (
SELECT
date,
lat_bin / 10 as lat_bin
lon_bin / 10 as lon_bin,
SUM(hours) as hours,
SUM(fishing_hours) as fishing_hours
FROM fishing
GROUP BY date, lat_bin, lon_bin)
######################################
SELECT
*
FROM
fishing_binned
"
## Run the query
#gridded_effort <- bq_project_query(billing_project, sql) |
\name{tableau_color_pal}
\alias{tableau_color_pal}
\title{Tabaleau Color Palettes (discrete)}
\usage{
tableau_color_pal(palette = "tableau10")
}
\arguments{
\item{palette}{Palette name}
}
\description{
Color palettes used by
\href{http://www.tableausoftware.com/}{Trableau}.
}
\examples{
library(scales)
show_col(tableau_color_pal("tableau20")(20))
show_col(tableau_color_pal("tableau10")(10))
show_col(tableau_color_pal("tableau10medium")(10))
show_col(tableau_color_pal("tableau10light")(10))
show_col(tableau_color_pal("colorblind10")(10))
show_col(tableau_color_pal("trafficlight")(10))
show_col(tableau_color_pal("purplegray12")(12))
show_col(tableau_color_pal("bluered12")(12))
show_col(tableau_color_pal("greenorange12")(12))
}
\references{
\url{http://vis.stanford.edu/color-names/analyzer/}
Maureen Stone, "Designing Colors for Data" (slides), at
the International Symposium on Computational Aesthetics
in Graphics, Visualization, and Imaging, Banff, AB,
Canada, June 22, 2007
\url{http://www.stonesc.com/slides/CompAe\%202007.pdf}.
Heer, Jeffrey and Maureen Stone, 2012 "Color Naming
Models for Color Selection, Image Editing and Palette
Design", ACM Human Factors in Computing Systems (CHI)
\url{http://vis.stanford.edu/files/2012-ColorNameModels-CHI.pdf}.
}
| /man/tableau_color_pal.Rd | no_license | jknowles/ggplotJrnold | R | false | false | 1,301 | rd | \name{tableau_color_pal}
\alias{tableau_color_pal}
\title{Tabaleau Color Palettes (discrete)}
\usage{
tableau_color_pal(palette = "tableau10")
}
\arguments{
\item{palette}{Palette name}
}
\description{
Color palettes used by
\href{http://www.tableausoftware.com/}{Trableau}.
}
\examples{
library(scales)
show_col(tableau_color_pal("tableau20")(20))
show_col(tableau_color_pal("tableau10")(10))
show_col(tableau_color_pal("tableau10medium")(10))
show_col(tableau_color_pal("tableau10light")(10))
show_col(tableau_color_pal("colorblind10")(10))
show_col(tableau_color_pal("trafficlight")(10))
show_col(tableau_color_pal("purplegray12")(12))
show_col(tableau_color_pal("bluered12")(12))
show_col(tableau_color_pal("greenorange12")(12))
}
\references{
\url{http://vis.stanford.edu/color-names/analyzer/}
Maureen Stone, "Designing Colors for Data" (slides), at
the International Symposium on Computational Aesthetics
in Graphics, Visualization, and Imaging, Banff, AB,
Canada, June 22, 2007
\url{http://www.stonesc.com/slides/CompAe\%202007.pdf}.
Heer, Jeffrey and Maureen Stone, 2012 "Color Naming
Models for Color Selection, Image Editing and Palette
Design", ACM Human Factors in Computing Systems (CHI)
\url{http://vis.stanford.edu/files/2012-ColorNameModels-CHI.pdf}.
}
|
# Run this script only once to install all packages necessary for the workshop
install.packages('tidyverse')
install.packages('knitr')
install.packages('DT')
install.packages('readxl')
install.packages('sf')
install.packages('mapview')
install.packages('plotly')
install.packages('lubridate') | /installPackages.R | no_license | EmmaVJones/NCTC_Processing-Benthic-Macroinvertebrate-Data-in-R | R | false | false | 293 | r | # Run this script only once to install all packages necessary for the workshop
install.packages('tidyverse')
install.packages('knitr')
install.packages('DT')
install.packages('readxl')
install.packages('sf')
install.packages('mapview')
install.packages('plotly')
install.packages('lubridate') |
clayton_loglik <- function(para, X, Y, d1, d2, k = 2, knotsx, knotsy){
#k is the number of knots in the model, this determines the length of para
gammax <- para[1:(k + 2)]
gammay <- para[(k + 3):(2*(k + 2))]
#last value in para is the association parameter
theta <- para[2*(k + 2) + 1]
#survival probabilities
u = flexsurv::psurvspline(q = X, gamma = gammax, knots = knotsx, lower.tail = FALSE)
v = flexsurv::psurvspline(q = Y, gamma = gammay, knots = knotsy, lower.tail = FALSE)
#densities
du = flexsurv::dsurvspline(x = X, gamma = gammax, knots = knotsx)
dv = flexsurv::dsurvspline(x = Y, gamma = gammay, knots = knotsy)
C <- (u^(-theta) + v^(-theta) - 1)^(-1/theta) #copula
part1 <- ifelse(d1*d2==1,log(1+theta)+(1+2*theta)*log(C) - (theta+1)*log(u)-(theta+1)*log(v) + log(du) + log(dv),0) #both events
part2 <- ifelse(d1*(1-d2)==1, (theta+1)*log(C) - (theta+1)*log(u) + log(du),0) #non-terminal event only
part3 <- ifelse((1-d1)*d2==1, (theta+1)*log(C) - (theta+1)*log(v) + log(dv),0) #terminal event only
part4 <- ifelse((1-d1)*(1-d2)==1,log(C),0) #both events censored
loglik <- sum(part1+part2+part3+part4)
return(loglik)
}
frank_loglik <- function(para, X, Y, d1, d2, k = 2, knotsx, knotsy){
#k is the number of knots in the model, this determines the length of para
gammax <- para[1:(k + 2)]
gammay <- para[(k + 3):(2*(k + 2))]
#last value in para is the association parameter
theta <- para[2*(k + 2) + 1]
#survival probabilities
u = flexsurv::psurvspline(q = X, gamma = gammax, knots = knotsx, lower.tail = FALSE)
v = flexsurv::psurvspline(q = Y, gamma = gammay, knots = knotsy, lower.tail = FALSE)
#densities
du = flexsurv::dsurvspline(x = X, gamma = gammax, knots = knotsx)
dv = flexsurv::dsurvspline(x = Y, gamma = gammay, knots = knotsy)
#copula
C <- (-1/theta)*log(((1-exp(-theta)-(1-exp(-theta*u))*(1-exp(-theta*v))))/(1-exp(-theta)))
part1 <- ifelse(d1*d2==1,(log(theta)+theta*C+log(exp(theta*C)-1)-log(exp(theta*u)-1)-log(exp(theta*v)-1)+log(du)+log(dv)),0)
part2 <- ifelse(d1*(1-d2)==1,log((1-exp(theta*C))/(1-exp(theta*u)))+log(du),0)
part3 <- ifelse(((1-d1)*(d2))==1,(log((1-exp(theta*C))/(1-exp(theta*v)))+log(dv)),0)
part4 <- ifelse(((1-d1)*(1-d2))==1,log(C),0)
loglik <- sum(part1+part2+part3+part4)
return(loglik)
}
gumbel_loglik <- function(para, X, Y, d1, d2, k = 2, knotsx, knotsy){
#k is the number of knots in the model, this determines the length of para
gammax <- para[1:(k + 2)]
gammay <- para[(k + 3):(2*(k + 2))]
#last value in para is the association parameter
theta <- para[2*(k + 2) + 1]
#survival probabilities
u = flexsurv::psurvspline(q = X, gamma = gammax, knots = knotsx, lower.tail = FALSE)
v = flexsurv::psurvspline(q = Y, gamma = gammay, knots = knotsy, lower.tail = FALSE)
#densities
du = flexsurv::dsurvspline(x = X, gamma = gammax, knots = knotsx)
dv = flexsurv::dsurvspline(x = Y, gamma = gammay, knots = knotsy)
#copula
C <- exp(-((-log(u))^(theta)+(-log(v))^(theta))^(1/theta))
part1 <- ifelse(d1*d2==1,log(C)+(theta-1)*log(-log(u))+(theta-1)*log(-log(v))+
log(theta-1-log(C))-log(u)-log(v)-(2*theta-1)*log(-log(C))+
log(dv)+log(du),0)
part2 <- ifelse(d1*(1-d2)==1,log(C)+(theta-1)*log(-log(u))-log(u)-(theta-1)*log(-log(C))+log(du),0)
part3 <- ifelse(((1-d1)*(d2))==1,(log(C)+(theta-1)*log(-log(v))-log(v)-(theta-1)*log(-log(C))+log(dv)),0)
part4 <- ifelse(((1-d1)*(1-d2))==1,log(C),0)
loglik <- sum(part1+part2+part3+part4)
return(loglik)
}
#' @importFrom stats pnorm qnorm
normal_loglik <- function(para, X, Y, d1, d2, k = 2, knotsx, knotsy){
#k is the number of knots in the model, this determines the length of para
gammax <- para[1:(k + 2)]
gammay <- para[(k + 3):(2*(k + 2))]
#last value in para is rho
rho <- (exp(para[2*(k + 2) + 1]) - 1)/(exp(para[2*(k + 2) + 1]) + 1)
df.1 <- d1 & d2 #case part 1
df.2 <- d1 & (!d2) #case part 2
df.3 <- (!d1)&d2; #case part 3
df.4 <- (!d1)&(!d2) #case part 4
df = data.frame(X, Y)
X.1 <- df[df.1,1]
Y.1 <- df[df.1,2]
X.2 <- df[df.2,1]
Y.2 <- df[df.2,2]
X.3 <- df[df.3,1]
Y.3 <- df[df.3,2]
X.4 <- df[df.4,1]
Y.4 <- df[df.4,2]
part1 <-
ifelse((sum(df.1) > 0), sum(
-0.5 * log(1 - rho ^ 2) + (((
2 * rho * qnorm(flexsurv::psurvspline(
q = X.1, gamma = gammax, knots = knotsx
)) * qnorm(flexsurv::psurvspline(
q = Y.1, gamma = gammay, knots = knotsy
)) -
rho ^ 2 * (qnorm(
flexsurv::psurvspline(q = X.1, gamma = gammax, knots = knotsx)
) ^ 2 + qnorm(
flexsurv::psurvspline(q = Y.1, gamma = gammay, knots = knotsy)
) ^ 2)
)) / ((2 * (
1 - rho ^ 2
))))
+ log(flexsurv::dsurvspline(
x = X.1, gamma = gammax, knots = knotsx
)) + log(flexsurv::dsurvspline(
x = Y.1, gamma = gammay, knots = knotsy
))
), 0)
part2 <-
ifelse((sum(df.2) > 0), sum(log(
pnorm(
qnorm(
flexsurv::psurvspline(
q = Y.2,
gamma = gammay,
knots = knotsy,
timescale = "log"
)
),
mean = rho * qnorm(
flexsurv::psurvspline(
q = X.2,
gamma = gammax,
knots = knotsx,
timescale = "log"
)
),
sd = sqrt(1 - rho ^ 2),
lower.tail = F
)
) + log(
flexsurv::dsurvspline(
x = X.2,
gamma = gammax,
knots = knotsx,
timescale = "log"
)
)), 0)
part3 <-
ifelse((sum(df.3) > 0), sum(log(
pnorm(
qnorm(
flexsurv::psurvspline(
q = X.3,
gamma = gammax,
knots = knotsx,
timescale = "log"
)
),
mean = rho * qnorm(
flexsurv::psurvspline(
q = Y.3,
gamma = gammay,
knots = knotsy,
timescale = "log"
)
),
sd = sqrt(1 - rho ^ 2),
lower.tail = F
)
) + log(
flexsurv::dsurvspline(
x = Y.3,
gamma = gammay,
knots = knotsy,
timescale = "log"
)
)), 0)
cov_matrix <- matrix(c(1,rho,rho,1),nrow=2)
normal_cdf <- function(V,sigma){
return(mvtnorm::pmvnorm(lower = V,upper=Inf, sigma=sigma,mean=c(0,0))[1])
}
part4 <- ifelse((sum(df.4) > 0), sum(log(apply(
qnorm(cbind(
flexsurv::psurvspline(
q = X.4,
gamma = gammax,
knots = knotsx,
timescale = "log"
),
flexsurv::psurvspline(
q = Y.4,
gamma = gammay,
knots = knotsy,
timescale = "log"
)
)), 1, normal_cdf, cov_matrix
))), 0)
loglik <- (part1+part2+part3+part4)
return(loglik)
}
| /R/log_likelihoods.R | no_license | cran/Surrogate | R | false | false | 6,863 | r | clayton_loglik <- function(para, X, Y, d1, d2, k = 2, knotsx, knotsy){
#k is the number of knots in the model, this determines the length of para
gammax <- para[1:(k + 2)]
gammay <- para[(k + 3):(2*(k + 2))]
#last value in para is the association parameter
theta <- para[2*(k + 2) + 1]
#survival probabilities
u = flexsurv::psurvspline(q = X, gamma = gammax, knots = knotsx, lower.tail = FALSE)
v = flexsurv::psurvspline(q = Y, gamma = gammay, knots = knotsy, lower.tail = FALSE)
#densities
du = flexsurv::dsurvspline(x = X, gamma = gammax, knots = knotsx)
dv = flexsurv::dsurvspline(x = Y, gamma = gammay, knots = knotsy)
C <- (u^(-theta) + v^(-theta) - 1)^(-1/theta) #copula
part1 <- ifelse(d1*d2==1,log(1+theta)+(1+2*theta)*log(C) - (theta+1)*log(u)-(theta+1)*log(v) + log(du) + log(dv),0) #both events
part2 <- ifelse(d1*(1-d2)==1, (theta+1)*log(C) - (theta+1)*log(u) + log(du),0) #non-terminal event only
part3 <- ifelse((1-d1)*d2==1, (theta+1)*log(C) - (theta+1)*log(v) + log(dv),0) #terminal event only
part4 <- ifelse((1-d1)*(1-d2)==1,log(C),0) #both events censored
loglik <- sum(part1+part2+part3+part4)
return(loglik)
}
frank_loglik <- function(para, X, Y, d1, d2, k = 2, knotsx, knotsy){
#k is the number of knots in the model, this determines the length of para
gammax <- para[1:(k + 2)]
gammay <- para[(k + 3):(2*(k + 2))]
#last value in para is the association parameter
theta <- para[2*(k + 2) + 1]
#survival probabilities
u = flexsurv::psurvspline(q = X, gamma = gammax, knots = knotsx, lower.tail = FALSE)
v = flexsurv::psurvspline(q = Y, gamma = gammay, knots = knotsy, lower.tail = FALSE)
#densities
du = flexsurv::dsurvspline(x = X, gamma = gammax, knots = knotsx)
dv = flexsurv::dsurvspline(x = Y, gamma = gammay, knots = knotsy)
#copula
C <- (-1/theta)*log(((1-exp(-theta)-(1-exp(-theta*u))*(1-exp(-theta*v))))/(1-exp(-theta)))
part1 <- ifelse(d1*d2==1,(log(theta)+theta*C+log(exp(theta*C)-1)-log(exp(theta*u)-1)-log(exp(theta*v)-1)+log(du)+log(dv)),0)
part2 <- ifelse(d1*(1-d2)==1,log((1-exp(theta*C))/(1-exp(theta*u)))+log(du),0)
part3 <- ifelse(((1-d1)*(d2))==1,(log((1-exp(theta*C))/(1-exp(theta*v)))+log(dv)),0)
part4 <- ifelse(((1-d1)*(1-d2))==1,log(C),0)
loglik <- sum(part1+part2+part3+part4)
return(loglik)
}
gumbel_loglik <- function(para, X, Y, d1, d2, k = 2, knotsx, knotsy){
#k is the number of knots in the model, this determines the length of para
gammax <- para[1:(k + 2)]
gammay <- para[(k + 3):(2*(k + 2))]
#last value in para is the association parameter
theta <- para[2*(k + 2) + 1]
#survival probabilities
u = flexsurv::psurvspline(q = X, gamma = gammax, knots = knotsx, lower.tail = FALSE)
v = flexsurv::psurvspline(q = Y, gamma = gammay, knots = knotsy, lower.tail = FALSE)
#densities
du = flexsurv::dsurvspline(x = X, gamma = gammax, knots = knotsx)
dv = flexsurv::dsurvspline(x = Y, gamma = gammay, knots = knotsy)
#copula
C <- exp(-((-log(u))^(theta)+(-log(v))^(theta))^(1/theta))
part1 <- ifelse(d1*d2==1,log(C)+(theta-1)*log(-log(u))+(theta-1)*log(-log(v))+
log(theta-1-log(C))-log(u)-log(v)-(2*theta-1)*log(-log(C))+
log(dv)+log(du),0)
part2 <- ifelse(d1*(1-d2)==1,log(C)+(theta-1)*log(-log(u))-log(u)-(theta-1)*log(-log(C))+log(du),0)
part3 <- ifelse(((1-d1)*(d2))==1,(log(C)+(theta-1)*log(-log(v))-log(v)-(theta-1)*log(-log(C))+log(dv)),0)
part4 <- ifelse(((1-d1)*(1-d2))==1,log(C),0)
loglik <- sum(part1+part2+part3+part4)
return(loglik)
}
#' @importFrom stats pnorm qnorm
normal_loglik <- function(para, X, Y, d1, d2, k = 2, knotsx, knotsy){
#k is the number of knots in the model, this determines the length of para
gammax <- para[1:(k + 2)]
gammay <- para[(k + 3):(2*(k + 2))]
#last value in para is rho
rho <- (exp(para[2*(k + 2) + 1]) - 1)/(exp(para[2*(k + 2) + 1]) + 1)
df.1 <- d1 & d2 #case part 1
df.2 <- d1 & (!d2) #case part 2
df.3 <- (!d1)&d2; #case part 3
df.4 <- (!d1)&(!d2) #case part 4
df = data.frame(X, Y)
X.1 <- df[df.1,1]
Y.1 <- df[df.1,2]
X.2 <- df[df.2,1]
Y.2 <- df[df.2,2]
X.3 <- df[df.3,1]
Y.3 <- df[df.3,2]
X.4 <- df[df.4,1]
Y.4 <- df[df.4,2]
part1 <-
ifelse((sum(df.1) > 0), sum(
-0.5 * log(1 - rho ^ 2) + (((
2 * rho * qnorm(flexsurv::psurvspline(
q = X.1, gamma = gammax, knots = knotsx
)) * qnorm(flexsurv::psurvspline(
q = Y.1, gamma = gammay, knots = knotsy
)) -
rho ^ 2 * (qnorm(
flexsurv::psurvspline(q = X.1, gamma = gammax, knots = knotsx)
) ^ 2 + qnorm(
flexsurv::psurvspline(q = Y.1, gamma = gammay, knots = knotsy)
) ^ 2)
)) / ((2 * (
1 - rho ^ 2
))))
+ log(flexsurv::dsurvspline(
x = X.1, gamma = gammax, knots = knotsx
)) + log(flexsurv::dsurvspline(
x = Y.1, gamma = gammay, knots = knotsy
))
), 0)
part2 <-
ifelse((sum(df.2) > 0), sum(log(
pnorm(
qnorm(
flexsurv::psurvspline(
q = Y.2,
gamma = gammay,
knots = knotsy,
timescale = "log"
)
),
mean = rho * qnorm(
flexsurv::psurvspline(
q = X.2,
gamma = gammax,
knots = knotsx,
timescale = "log"
)
),
sd = sqrt(1 - rho ^ 2),
lower.tail = F
)
) + log(
flexsurv::dsurvspline(
x = X.2,
gamma = gammax,
knots = knotsx,
timescale = "log"
)
)), 0)
part3 <-
ifelse((sum(df.3) > 0), sum(log(
pnorm(
qnorm(
flexsurv::psurvspline(
q = X.3,
gamma = gammax,
knots = knotsx,
timescale = "log"
)
),
mean = rho * qnorm(
flexsurv::psurvspline(
q = Y.3,
gamma = gammay,
knots = knotsy,
timescale = "log"
)
),
sd = sqrt(1 - rho ^ 2),
lower.tail = F
)
) + log(
flexsurv::dsurvspline(
x = Y.3,
gamma = gammay,
knots = knotsy,
timescale = "log"
)
)), 0)
cov_matrix <- matrix(c(1,rho,rho,1),nrow=2)
normal_cdf <- function(V,sigma){
return(mvtnorm::pmvnorm(lower = V,upper=Inf, sigma=sigma,mean=c(0,0))[1])
}
part4 <- ifelse((sum(df.4) > 0), sum(log(apply(
qnorm(cbind(
flexsurv::psurvspline(
q = X.4,
gamma = gammax,
knots = knotsx,
timescale = "log"
),
flexsurv::psurvspline(
q = Y.4,
gamma = gammay,
knots = knotsy,
timescale = "log"
)
)), 1, normal_cdf, cov_matrix
))), 0)
loglik <- (part1+part2+part3+part4)
return(loglik)
}
|
library(tidyverse)
library(openxlsx)
library(dplyr)
library(readxl)
# 指数表記を避けるため
options(scipen=100)
setwd("/xxx/")
getwd()
# パスは「/」から「/」に修正する必要あり
path = "/xxxxxx/"
# すべてのファイルパスを取得
path_list <- list.files(path, full.names=T)
# すべてのファイルを読み込み縦に結合
df <- do.call(rbind, lapply(path_list, read_excel)) | /すべてのファイルパスを取得してファイルを読み込み縦に結合する.R | no_license | hwpwk/R | R | false | false | 429 | r | library(tidyverse)
library(openxlsx)
library(dplyr)
library(readxl)
# 指数表記を避けるため
options(scipen=100)
setwd("/xxx/")
getwd()
# パスは「/」から「/」に修正する必要あり
path = "/xxxxxx/"
# すべてのファイルパスを取得
path_list <- list.files(path, full.names=T)
# すべてのファイルを読み込み縦に結合
df <- do.call(rbind, lapply(path_list, read_excel)) |
#' Figure 4
#'
#' Code to reproduce figure 4.
#'
#' @param path Path to results files for LDA and NBC.
#' @param file_ml Path to results file (a rds file) for MLP.
#' @param files_pca A vector of 3 rds files (1 per method) corresponding to the results obtained once a PCA was applied to the data set.
#'
#' @export
scr_fig4 <- function(path = "output/res_lda_nb/nbio/non_pca", file_ml = "output/res_f/res_combn_ml_nbio.rds",
files_pca = c("output/res_f/nbio_pca_lda_01.rds", "output/res_f/nbio_pca_nb_01.rds", "output/res_f/ml_nbio_pca.rds")) {
## Helper functions
get_res_bb <- function(file) {
raw <- readRDS(file)
lapply(raw, function(x) apply(x$mean, 3, function(x) mean(diag(x))))
}
get_res_pca <- function(file) {
raw <- readRDS(file)
lapply(raw, function(x) apply(x, c(1, 2), mean))
}
get_res_sd <- function(file) {
raw <- readRDS(file)
lapply(raw, function(x) apply(x$sd, c(1, 2), mean))
}
addlet <- function(let, x = 1) mtext(let, 3, at = x, cex = 1, font = 2)
addaxes <- function() {
axis(1, at = seq(2, 16, 2), labels = NA, lwd = 0, lwd.ticks = 0.25, tck = -0.025)
axis(1, at = seq(1, 17, 2), labels = seq(1, 17, 2), lwd = 0, lwd.ticks = 0.5)
axis(2, lwd = 0, lwd.ticks = 0.5)
box()
}
## Read files
files_lda <- sprintf(paste0(path, "/nbio_lda_%02d.rds"), 1)
files_nb <- sprintf(paste0(path, "/nbio_lda_%02d.rds"), 1)
pca_lda <- unlist(lapply(get_res_pca(files_pca[1L]),
function(x) mean(diag(x))))
pca_nb <- unlist(lapply(get_res_pca(files_pca[2L]),
function(x) mean(diag(x))))
# MLP
tmp <- readRDS(files_pca[3L])
res_ml <- tmp[tmp$id_reg_test == tmp$id_reg_true, ]
ml_pca <- aggregate(prob ~ nbio, mean, data = res_ml)
tmp2 <- readRDS("output/res_f/res_ml_nbio.rds")
res_ml2 <- tmp2[tmp2$id_reg_test == tmp2$id_reg_true, ]
ml_reg <- aggregate(prob ~ nbio * id_comb, mean, data = res_ml2)
pca_ml <- split(ml_reg$prob, ml_reg$nbio)
sq_bt <- seq_len(17)
output_dir("output/figs")
msgInfo("Creating figure 4")
png("output/figs/fig4.png", width = 183, height = 72, units = "mm", res = 600)
par(mfrow = c(1, 3), las = 1, mar = c(4, 3.2, 1.5, 0.4),
mgp = c(2.25, 0.6, 0))
plot(range(sq_bt), c(0.33, 1), type = "n", axes = FALSE, xlab = "",
ylab = "Overall performance")
boxplot(get_res_bb(files_lda[[1L]]), col = "grey95", add = TRUE, pch = 19, border = "grey55",
lwd = 0.8, cex = 0.5, axes = FALSE)
points(sq_bt, pca_lda, col = 1, pch = 19, cex = 1)
lines(sq_bt, pca_lda, col = 1, lwd = 0.7)
addaxes()
addlet("a")
plot(range(sq_bt), c(0.33, 1), type = "n", xlab = "Number of bio-tracers combined",
ylab = "", axes = FALSE)
boxplot(get_res_bb(files_nb[[1L]]), col = "grey95", add = TRUE, xes = FALSE,
pch = 19, border = "grey55", lwd = 0.8, cex = 0.5)
points(sq_bt, pca_nb, col = 1, pch = 19, cex = 1)
lines(sq_bt, pca_nb, col = 1, lwd = 0.7)
addaxes()
addlet("b")
plot(range(sq_bt), c(0.33, 1), type = "n", xlab = "", ylab = "", axes = FALSE)
boxplot(pca_ml, col = "grey95", add = TRUE, pch = 19, border = "grey55", lwd = 0.8,
cex = 0.5, axes = FALSE)
points(sq_bt, ml_pca[, 2], col = 1, pch = 19, cex = 1)
lines(sq_bt, ml_pca[, 2], col = 1, lwd = 0.7)
addaxes()
addlet("c")
dev.off()
msgSuccess_fig("4", "output/figs")
invisible(0)
}
| /R/scr_fig4.R | permissive | McCannLab/spatial_fingerprints | R | false | false | 3,330 | r | #' Figure 4
#'
#' Code to reproduce figure 4.
#'
#' @param path Path to results files for LDA and NBC.
#' @param file_ml Path to results file (a rds file) for MLP.
#' @param files_pca A vector of 3 rds files (1 per method) corresponding to the results obtained once a PCA was applied to the data set.
#'
#' @export
scr_fig4 <- function(path = "output/res_lda_nb/nbio/non_pca", file_ml = "output/res_f/res_combn_ml_nbio.rds",
files_pca = c("output/res_f/nbio_pca_lda_01.rds", "output/res_f/nbio_pca_nb_01.rds", "output/res_f/ml_nbio_pca.rds")) {
## Helper functions
get_res_bb <- function(file) {
raw <- readRDS(file)
lapply(raw, function(x) apply(x$mean, 3, function(x) mean(diag(x))))
}
get_res_pca <- function(file) {
raw <- readRDS(file)
lapply(raw, function(x) apply(x, c(1, 2), mean))
}
get_res_sd <- function(file) {
raw <- readRDS(file)
lapply(raw, function(x) apply(x$sd, c(1, 2), mean))
}
addlet <- function(let, x = 1) mtext(let, 3, at = x, cex = 1, font = 2)
addaxes <- function() {
axis(1, at = seq(2, 16, 2), labels = NA, lwd = 0, lwd.ticks = 0.25, tck = -0.025)
axis(1, at = seq(1, 17, 2), labels = seq(1, 17, 2), lwd = 0, lwd.ticks = 0.5)
axis(2, lwd = 0, lwd.ticks = 0.5)
box()
}
## Read files
files_lda <- sprintf(paste0(path, "/nbio_lda_%02d.rds"), 1)
files_nb <- sprintf(paste0(path, "/nbio_lda_%02d.rds"), 1)
pca_lda <- unlist(lapply(get_res_pca(files_pca[1L]),
function(x) mean(diag(x))))
pca_nb <- unlist(lapply(get_res_pca(files_pca[2L]),
function(x) mean(diag(x))))
# MLP
tmp <- readRDS(files_pca[3L])
res_ml <- tmp[tmp$id_reg_test == tmp$id_reg_true, ]
ml_pca <- aggregate(prob ~ nbio, mean, data = res_ml)
tmp2 <- readRDS("output/res_f/res_ml_nbio.rds")
res_ml2 <- tmp2[tmp2$id_reg_test == tmp2$id_reg_true, ]
ml_reg <- aggregate(prob ~ nbio * id_comb, mean, data = res_ml2)
pca_ml <- split(ml_reg$prob, ml_reg$nbio)
sq_bt <- seq_len(17)
output_dir("output/figs")
msgInfo("Creating figure 4")
png("output/figs/fig4.png", width = 183, height = 72, units = "mm", res = 600)
par(mfrow = c(1, 3), las = 1, mar = c(4, 3.2, 1.5, 0.4),
mgp = c(2.25, 0.6, 0))
plot(range(sq_bt), c(0.33, 1), type = "n", axes = FALSE, xlab = "",
ylab = "Overall performance")
boxplot(get_res_bb(files_lda[[1L]]), col = "grey95", add = TRUE, pch = 19, border = "grey55",
lwd = 0.8, cex = 0.5, axes = FALSE)
points(sq_bt, pca_lda, col = 1, pch = 19, cex = 1)
lines(sq_bt, pca_lda, col = 1, lwd = 0.7)
addaxes()
addlet("a")
plot(range(sq_bt), c(0.33, 1), type = "n", xlab = "Number of bio-tracers combined",
ylab = "", axes = FALSE)
boxplot(get_res_bb(files_nb[[1L]]), col = "grey95", add = TRUE, xes = FALSE,
pch = 19, border = "grey55", lwd = 0.8, cex = 0.5)
points(sq_bt, pca_nb, col = 1, pch = 19, cex = 1)
lines(sq_bt, pca_nb, col = 1, lwd = 0.7)
addaxes()
addlet("b")
plot(range(sq_bt), c(0.33, 1), type = "n", xlab = "", ylab = "", axes = FALSE)
boxplot(pca_ml, col = "grey95", add = TRUE, pch = 19, border = "grey55", lwd = 0.8,
cex = 0.5, axes = FALSE)
points(sq_bt, ml_pca[, 2], col = 1, pch = 19, cex = 1)
lines(sq_bt, ml_pca[, 2], col = 1, lwd = 0.7)
addaxes()
addlet("c")
dev.off()
msgSuccess_fig("4", "output/figs")
invisible(0)
}
|
testlist <- list(doy = c(-Inf, 0), latitude = c(-6.93132091139805e-107, 1.86807199752012e+112, -Inf, 2.00994342527714e-162, 1.81554993050676e-79, 7.89363005545926e+139, 2.3317908961407e-93, NaN, -1.51345790188863e+21, 1.44942408802595e-285, -1.72131968218895e+83, 0), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158))
result <- do.call(meteor:::ET0_ThornthwaiteWilmott,testlist)
str(result) | /meteor/inst/testfiles/ET0_ThornthwaiteWilmott/AFL_ET0_ThornthwaiteWilmott/ET0_ThornthwaiteWilmott_valgrind_files/1615833039-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 421 | r | testlist <- list(doy = c(-Inf, 0), latitude = c(-6.93132091139805e-107, 1.86807199752012e+112, -Inf, 2.00994342527714e-162, 1.81554993050676e-79, 7.89363005545926e+139, 2.3317908961407e-93, NaN, -1.51345790188863e+21, 1.44942408802595e-285, -1.72131968218895e+83, 0), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158))
result <- do.call(meteor:::ET0_ThornthwaiteWilmott,testlist)
str(result) |
#' load_data UI Function
#'
#' @description A shiny Module.
#'
#' @param id,input,output,session Internal parameters for {shiny}.
#'
#' @noRd
#'
#' @importFrom shiny NS tagList
mod_load_data_ui <- function(id){
ns <- NS(id)
tagList(
fileInput(ns("csv_input"), "Select CSV File to Import", accept = ".csv"),
br(),
DT::DTOutput(ns("data_input"))
)
}
#' load_data Server Functions
#'
#' @noRd
mod_load_data_server <- function(id){
moduleServer( id, function(input, output, session){
ns <- session$ns
data_input <- reactive({
req(input$csv_input)
rio::import(input$csv_input$datapath)
})
output$data_input <- DT::renderDataTable({
data_input()
})
})
}
## To be copied in the UI
# mod_load_data_ui("load_data_1")
## To be copied in the server
# mod_load_data_server("load_data_1")
| /R/mod_load_data.R | permissive | jixing475/shiy4tools | R | false | false | 865 | r | #' load_data UI Function
#'
#' @description A shiny Module.
#'
#' @param id,input,output,session Internal parameters for {shiny}.
#'
#' @noRd
#'
#' @importFrom shiny NS tagList
mod_load_data_ui <- function(id){
ns <- NS(id)
tagList(
fileInput(ns("csv_input"), "Select CSV File to Import", accept = ".csv"),
br(),
DT::DTOutput(ns("data_input"))
)
}
#' load_data Server Functions
#'
#' @noRd
mod_load_data_server <- function(id){
moduleServer( id, function(input, output, session){
ns <- session$ns
data_input <- reactive({
req(input$csv_input)
rio::import(input$csv_input$datapath)
})
output$data_input <- DT::renderDataTable({
data_input()
})
})
}
## To be copied in the UI
# mod_load_data_ui("load_data_1")
## To be copied in the server
# mod_load_data_server("load_data_1")
|
\name{plot.fasp}
\alias{plot.fasp}
\title{Plot a Function Array}
\description{
Plots an array of summary functions, usually associated with a
point pattern, stored in an object of class \code{"fasp"}.
A method for \code{plot}.
}
\usage{
\method{plot}{fasp}(x,formule=NULL, \dots,
subset=NULL, title=NULL, banner=TRUE,
transpose=FALSE,
samex=FALSE, samey=FALSE,
mar.panel=NULL,
outerlabels=TRUE, cex.outerlabels=1.25,
legend=FALSE)
}
\arguments{
\item{x}{An object of class \code{"fasp"} representing a
function array.
}
\item{formule}{
A formula or list of formulae indicating what
variables are to be plotted against what variable. Each formula is
either an R language formula object, or a string that can be parsed
as a formula. If \code{formule} is a list, its \eqn{k^{th}}{k-th} component
should be applicable to the \eqn{(i,j)^{th}}{(i,j)-th}
plot where \code{x$which[i,j]=k}. If the formula is left
as \code{NULL}, then \code{plot.fasp} attempts to use the component
\code{default.formula} of \code{x}. If that component is NULL
as well, it gives up.
}
\item{\dots}{
Arguments passed to \code{\link{plot.fv}} to control
the individual plot panels.
}
\item{subset}{
A logical vector, or a vector of indices, or an
expression or a character string, or a \bold{list} of such,
indicating a subset of the data to be included in each plot.
If \code{subset} is a list, its \eqn{k^{th}}{k-th} component
should be applicable to the \eqn{(i,j)^{th}}{(i,j)-th} plot
where \code{x$which[i,j]=k}.
}
\item{title}{
Overall title for the plot.
}
\item{banner}{
Logical. If \code{TRUE}, the overall title is plotted.
If \code{FALSE}, the overall title is not plotted
and no space is allocated for it.
}
\item{transpose}{
Logical. If \code{TRUE}, rows and columns will be exchanged.
}
\item{samex,samey}{
Logical values indicating whether all individual plot panels should have the
same x axis limits and the same y axis limits, respectively.
This makes it easier to compare the plots.
}
\item{mar.panel}{
Vector of length 4 giving the value of the
graphics parameter \code{mar} controlling the size of plot margins
for each individual plot panel. See \code{\link{par}}.
}
\item{outerlabels}{Logical.
If \code{TRUE}, the row and column names of the array of functions
are plotted in the margins of the array of plot panels.
If \code{FALSE}, each individual plot panel is labelled by its
row and column name.
}
\item{cex.outerlabels}{
Character expansion factor for row and column labels of array.
}
\item{legend}{
Logical flag determining whether to plot a legend in each panel.
}
}
\details{
An object of class \code{"fasp"} represents
an array of summary functions, usually associated with a point
pattern. See \code{\link{fasp.object}} for details.
Such an object is created, for example, by \code{\link{alltypes}}.
The function \code{plot.fasp} is
a method for \code{plot}. It calls \code{\link{plot.fv}} to plot the
individual panels.
For information about the interpretation of the
arguments \code{formule} and \code{subset},
see \code{\link{plot.fv}}.
Arguments that are often passed through \code{...} include
\code{col} to control the colours of the different lines in a panel,
and \code{lty} and \code{lwd} to control the line type and line width
of the different lines in a panel. The argument \code{shade}
can also be used to display confidence intervals or significance bands
as filled grey shading. See \code{\link{plot.fv}}.
The argument \code{title}, if present, will determine the
overall title of the plot. If it is absent, it defaults to \code{x$title}.
Titles for the individual plot panels will be taken from
\code{x$titles}.
}
\value{None.}
\section{Warnings}{
(Each component of) the \code{subset} argument may be a
logical vector (of the same length as the vectors of data which
are extracted from \code{x}), or a vector of indices, or an
\bold{expression} such as \code{expression(r<=0.2)}, or a text string,
such as \code{"r<=0.2"}.
Attempting a syntax such as \code{subset = r<=0.2} (without
wrapping \code{r<=0.2} either in quote marks or in \code{expression()})
will cause this function to fall over.
Variables referred to in any formula must exist in the data frames
stored in \code{x}. What the names of these variables are will
of course depend upon the nature of \code{x}.
}
\seealso{
\code{\link{alltypes}},
\code{\link{plot.fv}},
\code{\link{fasp.object}}
}
\examples{
\dontrun{
# Bramble Canes data.
data(bramblecanes)
X.G <- alltypes(bramblecanes,"G",dataname="Bramblecanes",verb=TRUE)
plot(X.G)
plot(X.G,subset="r<=0.2")
plot(X.G,formule=asin(sqrt(cbind(km,theo))) ~ asin(sqrt(theo)))
plot(X.G,fo=cbind(km,theo) - theo~r,subset="r<=0.2")
# Simulated data.
pp <- runifpoint(350, owin(c(0,1),c(0,1)))
pp <- pp \%mark\% factor(c(rep(1,50),rep(2,100),rep(3,200)))
X.K <- alltypes(pp,"K",verb=TRUE,dataname="Fake Data")
plot(X.K,fo=cbind(border,theo)~theo,subset="theo<=0.75")
}
}
\author{Adrian Baddeley \email{Adrian.Baddeley@curtin.edu.au}
and Rolf Turner \email{r.turner@auckland.ac.nz}
}
\keyword{spatial}
\keyword{hplot}
| /man/plot.fasp.Rd | no_license | chenjiaxun9/spatstat | R | false | false | 5,539 | rd | \name{plot.fasp}
\alias{plot.fasp}
\title{Plot a Function Array}
\description{
Plots an array of summary functions, usually associated with a
point pattern, stored in an object of class \code{"fasp"}.
A method for \code{plot}.
}
\usage{
\method{plot}{fasp}(x,formule=NULL, \dots,
subset=NULL, title=NULL, banner=TRUE,
transpose=FALSE,
samex=FALSE, samey=FALSE,
mar.panel=NULL,
outerlabels=TRUE, cex.outerlabels=1.25,
legend=FALSE)
}
\arguments{
\item{x}{An object of class \code{"fasp"} representing a
function array.
}
\item{formule}{
A formula or list of formulae indicating what
variables are to be plotted against what variable. Each formula is
either an R language formula object, or a string that can be parsed
as a formula. If \code{formule} is a list, its \eqn{k^{th}}{k-th} component
should be applicable to the \eqn{(i,j)^{th}}{(i,j)-th}
plot where \code{x$which[i,j]=k}. If the formula is left
as \code{NULL}, then \code{plot.fasp} attempts to use the component
\code{default.formula} of \code{x}. If that component is NULL
as well, it gives up.
}
\item{\dots}{
Arguments passed to \code{\link{plot.fv}} to control
the individual plot panels.
}
\item{subset}{
A logical vector, or a vector of indices, or an
expression or a character string, or a \bold{list} of such,
indicating a subset of the data to be included in each plot.
If \code{subset} is a list, its \eqn{k^{th}}{k-th} component
should be applicable to the \eqn{(i,j)^{th}}{(i,j)-th} plot
where \code{x$which[i,j]=k}.
}
\item{title}{
Overall title for the plot.
}
\item{banner}{
Logical. If \code{TRUE}, the overall title is plotted.
If \code{FALSE}, the overall title is not plotted
and no space is allocated for it.
}
\item{transpose}{
Logical. If \code{TRUE}, rows and columns will be exchanged.
}
\item{samex,samey}{
Logical values indicating whether all individual plot panels should have the
same x axis limits and the same y axis limits, respectively.
This makes it easier to compare the plots.
}
\item{mar.panel}{
Vector of length 4 giving the value of the
graphics parameter \code{mar} controlling the size of plot margins
for each individual plot panel. See \code{\link{par}}.
}
\item{outerlabels}{Logical.
If \code{TRUE}, the row and column names of the array of functions
are plotted in the margins of the array of plot panels.
If \code{FALSE}, each individual plot panel is labelled by its
row and column name.
}
\item{cex.outerlabels}{
Character expansion factor for row and column labels of array.
}
\item{legend}{
Logical flag determining whether to plot a legend in each panel.
}
}
\details{
An object of class \code{"fasp"} represents
an array of summary functions, usually associated with a point
pattern. See \code{\link{fasp.object}} for details.
Such an object is created, for example, by \code{\link{alltypes}}.
The function \code{plot.fasp} is
a method for \code{plot}. It calls \code{\link{plot.fv}} to plot the
individual panels.
For information about the interpretation of the
arguments \code{formule} and \code{subset},
see \code{\link{plot.fv}}.
Arguments that are often passed through \code{...} include
\code{col} to control the colours of the different lines in a panel,
and \code{lty} and \code{lwd} to control the line type and line width
of the different lines in a panel. The argument \code{shade}
can also be used to display confidence intervals or significance bands
as filled grey shading. See \code{\link{plot.fv}}.
The argument \code{title}, if present, will determine the
overall title of the plot. If it is absent, it defaults to \code{x$title}.
Titles for the individual plot panels will be taken from
\code{x$titles}.
}
\value{None.}
\section{Warnings}{
(Each component of) the \code{subset} argument may be a
logical vector (of the same length as the vectors of data which
are extracted from \code{x}), or a vector of indices, or an
\bold{expression} such as \code{expression(r<=0.2)}, or a text string,
such as \code{"r<=0.2"}.
Attempting a syntax such as \code{subset = r<=0.2} (without
wrapping \code{r<=0.2} either in quote marks or in \code{expression()})
will cause this function to fall over.
Variables referred to in any formula must exist in the data frames
stored in \code{x}. What the names of these variables are will
of course depend upon the nature of \code{x}.
}
\seealso{
\code{\link{alltypes}},
\code{\link{plot.fv}},
\code{\link{fasp.object}}
}
\examples{
\dontrun{
# Bramble Canes data.
data(bramblecanes)
X.G <- alltypes(bramblecanes,"G",dataname="Bramblecanes",verb=TRUE)
plot(X.G)
plot(X.G,subset="r<=0.2")
plot(X.G,formule=asin(sqrt(cbind(km,theo))) ~ asin(sqrt(theo)))
plot(X.G,fo=cbind(km,theo) - theo~r,subset="r<=0.2")
# Simulated data.
pp <- runifpoint(350, owin(c(0,1),c(0,1)))
pp <- pp \%mark\% factor(c(rep(1,50),rep(2,100),rep(3,200)))
X.K <- alltypes(pp,"K",verb=TRUE,dataname="Fake Data")
plot(X.K,fo=cbind(border,theo)~theo,subset="theo<=0.75")
}
}
\author{Adrian Baddeley \email{Adrian.Baddeley@curtin.edu.au}
and Rolf Turner \email{r.turner@auckland.ac.nz}
}
\keyword{spatial}
\keyword{hplot}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/langmuir4LM.R
\name{langmuir4.LM}
\alias{langmuir4.LM}
\title{Langmuir Isotherm Fourth Linear Form Analysis}
\usage{
langmuir4.LM(Ce, Qe)
}
\arguments{
\item{Ce}{the numerical value for the equilibrium capacity}
\item{Qe}{the numerical value for the adsorbed capacity}
}
\value{
the parameters for the Langmuir isotherm (fourth form), model error analysis,
and linear regression analysis
}
\description{
The Langmuir isotherm is described to be the most useful and
simplest isotherm for both chemical adsorption and physical adsorption. It
assumes that there is uniform adsorption energy onto the monolayer surface
and that there would be no interaction between the adsorbate and the surface.
}
\examples{
Ce <- c(0.01353, 0.04648, 0.13239, 0.27714, 0.41600, 0.63607, 0.80435, 1.10327, 1.58223)
Qe <- c(0.03409, 0.06025, 0.10622, 0.12842, 0.15299, 0.15379, 0.15735, 0.15735, 0.16607)
langmuir4.LM(Ce,Qe)
}
\references{
Langmuir, I. (1918). <doi:/10.1021/ja01269a066> The adsorption of
gases on plane surfaces of glass, mics and platinum. Journal of the American
Chemical Society, 1361-1403.
Chen, X. (2015) <doi:/10.3390/info6010014> Modeling of Experimental
Adsorption Isotherm Data. 14-22.
}
\author{
Keith T. Ostan
Chester C. Deocaris
}
| /man/langmuir4.LM.Rd | no_license | cran/PUPAIM | R | false | true | 1,321 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/langmuir4LM.R
\name{langmuir4.LM}
\alias{langmuir4.LM}
\title{Langmuir Isotherm Fourth Linear Form Analysis}
\usage{
langmuir4.LM(Ce, Qe)
}
\arguments{
\item{Ce}{the numerical value for the equilibrium capacity}
\item{Qe}{the numerical value for the adsorbed capacity}
}
\value{
the parameters for the Langmuir isotherm (fourth form), model error analysis,
and linear regression analysis
}
\description{
The Langmuir isotherm is described to be the most useful and
simplest isotherm for both chemical adsorption and physical adsorption. It
assumes that there is uniform adsorption energy onto the monolayer surface
and that there would be no interaction between the adsorbate and the surface.
}
\examples{
Ce <- c(0.01353, 0.04648, 0.13239, 0.27714, 0.41600, 0.63607, 0.80435, 1.10327, 1.58223)
Qe <- c(0.03409, 0.06025, 0.10622, 0.12842, 0.15299, 0.15379, 0.15735, 0.15735, 0.16607)
langmuir4.LM(Ce,Qe)
}
\references{
Langmuir, I. (1918). <doi:/10.1021/ja01269a066> The adsorption of
gases on plane surfaces of glass, mics and platinum. Journal of the American
Chemical Society, 1361-1403.
Chen, X. (2015) <doi:/10.3390/info6010014> Modeling of Experimental
Adsorption Isotherm Data. 14-22.
}
\author{
Keith T. Ostan
Chester C. Deocaris
}
|
# create plot for MSE measures
ggplot(data = tScenario1_MSE, aes(x = as.factor(Trials), y = MSE, color = Method)) +
geom_point() +
scale_y_log10() +
facet_wrap(~ as.factor(NoiseLevel), nrow = 2) +
theme_minimal() +
labs(title = "MSE Comparison Between Methods as Function of Number of Trials
Faceted by Noise Level for Scenario 1", x = "Number of Trials", y = "MSE", color = "Method") +
theme(plot.title = element_text(hjust = 0.5))
#create plot for MSE breakdown
ggplot(data = tScenario1_allGsvdError, aes(x = as.factor(Trials), y = PercentError, fill = Partial)) +
geom_bar(stat = "identity", position = "dodge") +
facet_wrap(~ as.factor(NoiseLevel), nrow = 2) +
theme_minimal() +
labs(title = "Breakdown of MSE by Trial Size Faceted by Noise Level for Scenario 1", x = "Number of Trials",
y = " Percent of MSE", fill = "Part of MSE") +
theme(plot.title = element_text(hjust = 0.2))
# create SDI plot
ggplot(data = tScenario1_allGsvdSDI, aes(x = as.factor(Trials), y = SDI, color = as.factor(NoiseLevel))) +
geom_point() +
scale_y_log10()+
theme_minimal() +
labs(title = "Signal Distortion Index in Scenario 1", x = "Number of Trials",
y = "Signal Distortion Index", color = "Noise Level") +
theme(plot.title = element_text(hjust = 0.5))
# Create Noise Reduction Factor plot
ggplot(data = tScenario1_allGsvdNRfactor, aes(x = as.factor(Trials), y = NoiseReduction, color = as.factor(NoiseLevel))) +
geom_point() +
scale_y_log10() +
theme_minimal() +
labs(title = "Noise Reduction in Scenario 1", x = "Number of Trials",
y = "Noise Reduction Factor", color = "Noise Level") +
theme(plot.title = element_text(hjust = 0.5))
# Create SNR plot
ggplot(data = tScenario1_allSNR, aes(x = as.factor(Trials), y = SNR, color = Condition)) +
geom_point() +
scale_y_log10() +
facet_wrap(~ as.factor(NoiseLevel), nrow = 2) +
theme_minimal() +
labs(title = "Signal-to-Noise Ratio Comparison Between Methods as a
Function of Number of Trials and Faceted by Noise Level for Scenario 1", x = "Number of Trials", y = "SNR", color = "Condition") +
theme(plot.title = element_text(hjust = 0.5))
| /rCode/Figures/Alt_signal_plots.R | no_license | aleslab/optimalEegFiltering | R | false | false | 2,172 | r | # create plot for MSE measures
ggplot(data = tScenario1_MSE, aes(x = as.factor(Trials), y = MSE, color = Method)) +
geom_point() +
scale_y_log10() +
facet_wrap(~ as.factor(NoiseLevel), nrow = 2) +
theme_minimal() +
labs(title = "MSE Comparison Between Methods as Function of Number of Trials
Faceted by Noise Level for Scenario 1", x = "Number of Trials", y = "MSE", color = "Method") +
theme(plot.title = element_text(hjust = 0.5))
#create plot for MSE breakdown
ggplot(data = tScenario1_allGsvdError, aes(x = as.factor(Trials), y = PercentError, fill = Partial)) +
geom_bar(stat = "identity", position = "dodge") +
facet_wrap(~ as.factor(NoiseLevel), nrow = 2) +
theme_minimal() +
labs(title = "Breakdown of MSE by Trial Size Faceted by Noise Level for Scenario 1", x = "Number of Trials",
y = " Percent of MSE", fill = "Part of MSE") +
theme(plot.title = element_text(hjust = 0.2))
# create SDI plot
ggplot(data = tScenario1_allGsvdSDI, aes(x = as.factor(Trials), y = SDI, color = as.factor(NoiseLevel))) +
geom_point() +
scale_y_log10()+
theme_minimal() +
labs(title = "Signal Distortion Index in Scenario 1", x = "Number of Trials",
y = "Signal Distortion Index", color = "Noise Level") +
theme(plot.title = element_text(hjust = 0.5))
# Create Noise Reduction Factor plot
ggplot(data = tScenario1_allGsvdNRfactor, aes(x = as.factor(Trials), y = NoiseReduction, color = as.factor(NoiseLevel))) +
geom_point() +
scale_y_log10() +
theme_minimal() +
labs(title = "Noise Reduction in Scenario 1", x = "Number of Trials",
y = "Noise Reduction Factor", color = "Noise Level") +
theme(plot.title = element_text(hjust = 0.5))
# Create SNR plot
ggplot(data = tScenario1_allSNR, aes(x = as.factor(Trials), y = SNR, color = Condition)) +
geom_point() +
scale_y_log10() +
facet_wrap(~ as.factor(NoiseLevel), nrow = 2) +
theme_minimal() +
labs(title = "Signal-to-Noise Ratio Comparison Between Methods as a
Function of Number of Trials and Faceted by Noise Level for Scenario 1", x = "Number of Trials", y = "SNR", color = "Condition") +
theme(plot.title = element_text(hjust = 0.5))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/function_crossValidation.R
\name{DTD_cv_lambda_R}
\alias{DTD_cv_lambda_R}
\title{Cross-validation for digital tissue deconvolution}
\usage{
DTD_cv_lambda_R(
lambda.seq = "none",
tweak.start,
n.folds = 5,
lambda.length = 10,
train.data.list,
cv.verbose = TRUE,
warm.start = FALSE,
F.GRAD.FUN,
EVAL.FUN,
...
)
}
\arguments{
\item{lambda.seq}{numeric vector or NULL or "none": Over this series of lambdas the
FISTA will be optimized. If 'lambda.seq' is set to NULL, a generic series of
lambdas - depending on the dimensions of the training set -
will be generated. If 'lambda.seq' is "none", no cross validation is done.
Only one model with lambda = 0 is trained on the complete data set.}
\item{tweak.start}{numeric vector, starting vector for the DTD algorithm.}
\item{n.folds}{integer, number of buckets in the cross validation.}
\item{lambda.length}{integer, how many lambdas will be generated
(only used if lambda.seq is NULL)}
\item{train.data.list}{list, with two entries, a numeric matrix each,
named 'mixtures' and 'quantities'
Within this list the train/test cross validation will be done.
(see Vignette `browseVignettes("DTD")` for details)}
\item{cv.verbose}{logical, should information about the cv process be
printed to the screen?}
\item{warm.start}{logical, should the solution of a previous model of
the cross validation be used as start in the next model.
Notice, that the warm.start starts with the most unpenalized model.}
\item{F.GRAD.FUN}{gradient function, see
\code{\link{descent_generalized_fista}}
The 'F.GRAD.FUN' and 'EVAL.FUN' parameters are only present in the
R cross validation. With these parameters an alternativ gradient,
and evaluation function can be provided. Both functions are called
using only the tweak vector as first argument.}
\item{EVAL.FUN}{evaluation function,
see \code{\link{descent_generalized_fista}}}
\item{...}{all parameters that are passed to the
\code{\link{descent_generalized_fista}} function.
E.g. 'maxiter', 'NORM.FUN', 'cycles' etc ...}
}
\value{
list of length 2:
\itemize{
\item 'cv.obj', list of lists. DTD model for each lambda, and every folds.
\item 'best.model', list. DTD model optimized on the complete data set
with the best lambda from the cross validation.
}
}
\description{
Our descent generalized FISTA implementation includes a l1 regularization
term (see \code{\link{train_deconvolution_model}}).
This function performs a 'n.folds'-fold cross validation to find the
best fitting regularization parameter.
}
\details{
For an example see `browseVignettes("DTD")`.
Notice, there is an R and a C++ implementation of our optimizer.
Hence, there are two cross validation implementations,
calling either the R or C++ implementation:
\code{\link{DTD_cv_lambda_R}} and \code{\link{DTD_cv_lambda_cxx}}.
}
| /man/DTD_cv_lambda_R.Rd | permissive | MarianSchoen/DTD | R | false | true | 2,889 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/function_crossValidation.R
\name{DTD_cv_lambda_R}
\alias{DTD_cv_lambda_R}
\title{Cross-validation for digital tissue deconvolution}
\usage{
DTD_cv_lambda_R(
lambda.seq = "none",
tweak.start,
n.folds = 5,
lambda.length = 10,
train.data.list,
cv.verbose = TRUE,
warm.start = FALSE,
F.GRAD.FUN,
EVAL.FUN,
...
)
}
\arguments{
\item{lambda.seq}{numeric vector or NULL or "none": Over this series of lambdas the
FISTA will be optimized. If 'lambda.seq' is set to NULL, a generic series of
lambdas - depending on the dimensions of the training set -
will be generated. If 'lambda.seq' is "none", no cross validation is done.
Only one model with lambda = 0 is trained on the complete data set.}
\item{tweak.start}{numeric vector, starting vector for the DTD algorithm.}
\item{n.folds}{integer, number of buckets in the cross validation.}
\item{lambda.length}{integer, how many lambdas will be generated
(only used if lambda.seq is NULL)}
\item{train.data.list}{list, with two entries, a numeric matrix each,
named 'mixtures' and 'quantities'
Within this list the train/test cross validation will be done.
(see Vignette `browseVignettes("DTD")` for details)}
\item{cv.verbose}{logical, should information about the cv process be
printed to the screen?}
\item{warm.start}{logical, should the solution of a previous model of
the cross validation be used as start in the next model.
Notice, that the warm.start starts with the most unpenalized model.}
\item{F.GRAD.FUN}{gradient function, see
\code{\link{descent_generalized_fista}}
The 'F.GRAD.FUN' and 'EVAL.FUN' parameters are only present in the
R cross validation. With these parameters an alternativ gradient,
and evaluation function can be provided. Both functions are called
using only the tweak vector as first argument.}
\item{EVAL.FUN}{evaluation function,
see \code{\link{descent_generalized_fista}}}
\item{...}{all parameters that are passed to the
\code{\link{descent_generalized_fista}} function.
E.g. 'maxiter', 'NORM.FUN', 'cycles' etc ...}
}
\value{
list of length 2:
\itemize{
\item 'cv.obj', list of lists. DTD model for each lambda, and every folds.
\item 'best.model', list. DTD model optimized on the complete data set
with the best lambda from the cross validation.
}
}
\description{
Our descent generalized FISTA implementation includes a l1 regularization
term (see \code{\link{train_deconvolution_model}}).
This function performs a 'n.folds'-fold cross validation to find the
best fitting regularization parameter.
}
\details{
For an example see `browseVignettes("DTD")`.
Notice, there is an R and a C++ implementation of our optimizer.
Hence, there are two cross validation implementations,
calling either the R or C++ implementation:
\code{\link{DTD_cv_lambda_R}} and \code{\link{DTD_cv_lambda_cxx}}.
}
|
### Load stuff
library(transformr)
library(tidyverse)
library(mgcv)
library(broom)
library(furrr)
library(lubridate)
pass <- function(x, ...) {x}
ParticleSizeCutoff <- 0.5
### Function to load in initial data and prepare a data frame.
## Define global parameter
C_f = 133 # SmoothsAndFlusRevisited.Rmd, calculated from trap flux
ag = 2.00 # SmoothsAndFlusRevisited
alpha = (ag + 1) / 2 # assuming spherical drag profile
gamma = alpha - 1
C_f_global <- C_f
alpha_global <- alpha
gamma_global <- gamma
ag_global <- ag
# Get the times of the CTD casts
CTD_Unique <- read_csv("data/SKQ201617S_CTD_Profile.csv") %>%
filter(Station == "sta016") %>%
mutate(DateJ = mdy(`mon/dd/yyyy`), TimeJ = hms(`hh:mm`)) %>%
mutate(DateTimeJ = mdy_hms(paste(`mon/dd/yyyy`, `hh:mm`))) %>%
select(time = DateTimeJ, depth = `Pressure [db]`) %>%
group_by(time) %>% summarize(MaxDepth = max(depth))
## Times on the UVP are the time it was switched on, not the time the cast started. I will find the next time of the cast
Find_Next_Cast <- function(LTime){
NextTime <- min(CTD_Unique$time[CTD_Unique$time - LTime > 0]) # Find the next cast
NextTime
}
Find_Next_Cast_2 <- Vectorize(Find_Next_Cast)
Find_Next_Cast_3 <- function(LTimeVec){
NextVec <- Find_Next_Cast_2(LTimeVec)
attributes(NextVec) <- attributes(LTimeVec)
NextVec
}
bring_in_p2 <- function(){
# bring in metadata that specifies relevant files
uvpMeta <- read_tsv("data/uvpdata/export_detailed_20190304_23_14_Export_metadata_summary.tsv") %>% rename(time = `yyyy-mm-dd hh:mm`) %>% arrange(time)
# just take p2 data
uvpMetaP2 <- uvpMeta %>% filter(Site == "016")
# bring in particle data
uvp_data_path <- "data/uvpdata"
particleData <- uvpMetaP2 %>% pull(`Particle filename`) %>%
map(~read_tsv(file.path(uvp_data_path, .), locale = locale(encoding = "latin1", tz="UTC"))) %>%
reduce(rbind)
# some initial processing
particleNumbers <- particleData %>%
select(profile = Profile, time = `yyyy-mm-dd hh:mm`, depth = `Depth [m]`, vol = `Sampled volume[L]`, `LPM (102-128 µm)[#/L]`:`LPM (>26 mm)[#/L]`) %>%
gather(key = "sizeclass", value = "nparticles", `LPM (102-128 µm)[#/L]`:`LPM (>26 mm)[#/L]`) %>%
# convert to central time, originals are in UTC
#mutate(time = lubridate::with_tz(time, tzone = "Mexico/General"))
mutate(time = Find_Next_Cast_3(time)) %>%
pass()
#
classData <- particleNumbers %>% select(sizeclass) %>% unique() %>%
mutate(lb0 = as.numeric(str_extract(sizeclass,"(?<=\\().*(?=-)")),
ub0 = str_extract(sizeclass, "(?<=-).*(?=\\s)") %>% as.numeric(),
#unit0 = str_extract(sizeclass, "(?<=\\s).(?=m)"),
ismm = str_detect(sizeclass, "mm"),
lb1 = if_else(ismm, lb0, lb0 / 1000),
ub1 = if_else(ismm, ub0, ub0 / 1000),
lb2 = if_else(str_detect(sizeclass, "\\>26"),26, lb1),
ub2 = if_else(str_detect(sizeclass, "\\>26"),32, ub1) # arbitrary
) %>%
select(sizeclass, lb = lb2, ub = ub2)
particleNumbers01 <- left_join(particleNumbers, classData, by = "sizeclass") %>%
mutate(TotalParticles = nparticles * vol) %>%
mutate(binsize = ub - lb) %>%
mutate(n_nparticles = nparticles / binsize)
particleNumbers001 <- particleNumbers01 %>% select(profile, time, depth, vol, sizeclass, lb, ub, binsize, TotalParticles, nparticles, n_nparticles)
particleNumbers001
}
### Import P16 station 100
bring_in_p16_s100 <- function(){
require(tidyverse)
## initial read in
s100data00 <- read_tsv("data/p16uvp/export_detailed_20190908_00_44_PAR_p16n_100.tsv", locale = locale(encoding = "latin1"))
## just the data we need
s100data01 <- s100data00 %>%
select(profile = Profile, time = `yyyy-mm-dd hh:mm`, depth = `Depth [m]`, vol = `Sampled volume [L]`, `LPM (102-128 µm) [# l-1]`:`LPM (>26 mm) [# l-1]`) %>%
gather(key = "sizeclass", value = "nparticles", `LPM (102-128 µm) [# l-1]`:`LPM (>26 mm) [# l-1]`)
## Convert bin names to values in microns
renDf00 <- s100data01 %>% select(sizeclass) %>% unique()
renDf01 <- renDf00 %>%
mutate(
##bounds, ignoring microns or mm designation
lb0 = str_extract(sizeclass, "(?<=\\().*(?=-)") %>% str_extract( ".*(?=-)") %>% as.numeric(),
ub0 = str_extract(sizeclass, "(?<=-).*(?=\\s)") %>% str_extract(".*(?=\\s)") %>% str_extract(".*(?=\\s)") %>% as.numeric(),
## test for mm
ismm = str_detect(sizeclass, "mm"),
## set everthing in microns
lb1 = if_else(ismm, lb0, lb0 / 1000),
ub1 = if_else(ismm, ub0, ub0 / 1000),
## deal with biggist size bin
lb2 = if_else(str_detect(sizeclass, "\\>26"),26, lb1),
ub2 = if_else(str_detect(sizeclass, "\\>26"),32, ub1) # arbitrary
) %>%
select(sizeclass, lb = lb2, ub = ub2)
## join the names
s100data02 <- s100data01 %>% left_join(renDf01, by = "sizeclass")
## merge small bin with no particles wiht the bin one smaller
s100data03 <- s100data02 %>%
filter(lb != 0.128) %>% # removing small bin with no particles
mutate(ub = if_else(ub == 0.128, 0.161, ub))
## Preliminary math
s100data04 <- s100data03 %>%
mutate(
TotalParticles = nparticles * vol,
binsize = ub - lb,
n_nparticles = nparticles/binsize
)
s100data04
}
combine_projects <- function(proj1, proj2, name1 = "ETNP", name2 = "P16"){
proj1 <- proj1 %>% mutate(project = name1)
proj2 <- proj2 %>% mutate(project = name2)
bothProj = bind_rows(proj1, proj2)
bothProj
}
## I tend to work with lists of two element. One is the data of each size, and one is the depth summary.
## The following makes the early instance of that list, on whihc I work.
make_twin_df_list <- function(EachSize){
DepthSummary <- EachSize %>% group_by(project, profile, time, depth) %>%
summarize()
list(EachSize, DepthSummary)
}
## A function that takes a variable x that can either be a list of two elements, "DepthSummary" and "EachSize"
## and saves them to the parent environment. Alternatively, if DepthSummary is specified, EachSize is saved to the parent environment, and DepthSummary
## is passed forward
## I stopped using this because the passing data directly to the parent thing, while concise, I think may be confusing to some readers.
## The second approach is more conventional.
parse_jac_input <- function(x, DepthSummary = NULL){
p <- parent.frame()
if(!is.null(DepthSummary)){
p$EachSize = x
p$DepthSummary = DepthSummary
}else{
if(length(x) == 2){
p$EachSize = x[[1]]
p$DepthSummary = x[[2]]
}else{
stop("Either DepthSummary must be specified, or else the first variable must be a two element list containing EachSize, and DepthSummary elements")}
}
}
# As above, but this just sends out a two element list of EachSize and DepthSummary
parse_jac_input2 <- function(x, DepthSummary){
if(!is.null(DepthSummary)){
return(list(ES = x, DS = DepthSummary))
}else{
if(length(x) == 2){
return(x)
}else{
stop("Either DepthSummary must be specified, or else the first variable must be a two element list containing EachSize, and DepthSummary elements")}
}
}
## I dont' actually use this for anything,but these twin functions all have common elements, so here's a template
function_template <- function(x, DepthSummary = NULL){
x2 <- parse_jac_input2(x, DepthSummary)
EachSize = x2[[1]]
DepthSummary = x2[[2]]
# Code does stuff here
return(list(ES = EachSize, DS = DepthSummary))
}
### Calculate biovolume, flux, speed, both for size classes and in aggregate
calc_particle_parameters <- function(x, DepthSummary = NULL){
#Allow passing in either a two elemet list of Eachsize and DepthSummary, or passing in as two variables.
x2 <- parse_jac_input2(x, DepthSummary)
EachSize = x2[[1]]
DepthSummary = x2[[2]]
## Particle fractal dimension estimates
# alpha = 1.7 # Klips et al. 1994
# gamma = alpha - 1 # assume spherical drag profile as in Guidi
# ag_fit = 0.26 # Fitted alpha + gamma parameter; hardcoded but copied from NormalizeUVP_Flux.Rmd
# C_f_fit = 3.98 # Fitted C_f parameter
# alpha = 0.52 # Alldredge
# gamma = 0.26 # Alldredge & Gotschalk
# C_f_fit = 10.51 # Normalize_UVP_Flux.Rmd, nonlinear for now
# ag_fit = alpha + gamma # Zerod out for now; I'd like to clean this all up soon.
EachSize2 <- EachSize %>%
mutate(
biovolume = nparticles * lb ^ alpha,
speed = lb ^ gamma,
flux = biovolume * speed,
flux_fit = nparticles * C_f_global * lb ^ ag_global
)
DepthSummary2 <- EachSize2 %>%
group_by(project, profile, time, depth) %>%
summarize(
vol = first(vol),
tot_TotParticles = sum(TotalParticles),
tot_nparticles = sum(nparticles),
tot_nnparticles = sum(n_nparticles),
tot_biovolume = sum(biovolume),
tot_flux = sum(flux),
tot_flux_fit = sum(flux_fit),
tot_speed = tot_flux/tot_biovolume
)
list(ES = EachSize2, DS = DepthSummary2)
}
## Seperate parameters for small < 530 micron and big > 530 micron particles
calc_small_and_big <- function(x, DepthSummary = NULL){
#Allow passing in either a two elemet list of Eachsize and DepthSummary, or passing in as two variables.
x2 <- parse_jac_input2(x, DepthSummary)
EachSize = x2[[1]]
DepthSummary = x2[[2]]
small <- EachSize %>%
filter(lb < ParticleSizeCutoff)
big <- EachSize %>%
filter(lb >= ParticleSizeCutoff)
small2 <- small %>%
group_by(project, profile, time, depth) %>%
summarise(
small_TotParticles = sum(TotalParticles),
small_nparticles = sum(nparticles),
small_nnparticles = sum(n_nparticles),
small_biovolume = sum(biovolume),
small_flux = sum(flux),
small_flux_fit = sum(flux_fit),
small_speed = small_flux/small_biovolume
)
big2 <- big %>%
group_by(project, profile, time, depth) %>%
summarise(
big_TotParticles = sum(TotalParticles),
big_nparticles = sum(nparticles),
big_nnparticles = sum(n_nparticles),
big_biovolume = sum(biovolume),
big_flux = sum(flux),
big_flux_fit = sum(flux_fit),
big_speed = big_flux/big_biovolume
)
DepthSummary2 <- left_join(DepthSummary, small2, by = c("project","profile", "time", "depth")) %>%
left_join(big2, by = c("project","profile", "time", "depth"))
list(ES = EachSize, DS = DepthSummary2)
}
## Used in a few functions
## Poisson
fit_model = function(df) glm(TotalParticles ~ log(lb), offset = log(binsize * vol), data = df, family = "poisson")
## Neg Bin, doesn't work
fit_nb = function(df) MASS::glm.nb(TotalParticles ~ log(lb) + offset(log(vol * binsize)), data = df)
safe_fit_nb <- safely(fit_nb)
# I switched to gam because it can handle negative binomial. If one does this, one also needs to add the call `parametric = TRUE` to the tidy function
fit_nb_2 <- function(df) gam(TotalParticles ~ log(lb), offset = log(binsize * vol), data = df, family = "nb")
### Calculate particle size distribution, intercept and slope
## Takes EachSize, a data frame of particle size specific stuff, and Depth Summary, which is depth specific stuff
## Returns a list with the above.
calc_psd <- function(x, DepthSummary = NULL){
x2 <- parse_jac_input2(x, DepthSummary)
EachSize = x2[[1]]
DepthSummary = x2[[2]]
psdCalc01 <- EachSize %>%
group_by(project, profile, time, depth) %>%
nest() %>%
mutate(model = map(data, fit_nb_2)) %>%
mutate(tidied = map(model, tidy, parametric = TRUE)) %>%
select(-data, -model) %>%
unnest(tidied) %>%
select(project, profile:estimate) %>%
spread(key = "term", value = "estimate") %>%
rename(icp = `(Intercept)`, psd = `log(lb)`)
DepthSummary2 <- left_join(DepthSummary, psdCalc01, by = c("project","profile", "time", "depth"))
list(ES = EachSize, DS = DepthSummary2)
}
# Get the particle size distribution of small particles
calc_small_psd <- function(x, DepthSummary = NULL){
x2 <- parse_jac_input2(x, DepthSummary)
EachSize = x2[[1]]
DepthSummary = x2[[2]]
#fit_model = function(df) glm(TotalParticles ~ log(lb), offset = log(binsize * vol), data = df, family = "poisson")
psdCalc01 <- EachSize %>%
filter(lb < ParticleSizeCutoff) %>%
group_by(project, profile, time, depth) %>%
nest() %>%
mutate(model = map(data, fit_nb_2)) %>%
mutate(tidied = map(model, tidy, parametric = TRUE)) %>%
select(-data, -model) %>%
unnest(tidied) %>%
select(project, profile:estimate) %>%
spread(key = "term", value = "estimate") %>%
rename(small_icp = `(Intercept)`, small_psd = `log(lb)`)
DepthSummary2 <- left_join(DepthSummary, psdCalc01, by = c("project","profile", "time", "depth"))
list(ES = EachSize, DS = DepthSummary2)
}
# Get the particle size distribution of large particles
calc_big_psd <- function(x, DepthSummary = NULL){
x2 <- parse_jac_input2(x, DepthSummary)
EachSize = x2[[1]]
DepthSummary = x2[[2]]
#fit_model = function(df) glm(TotalParticles ~ log(lb), offset = log(binsize * vol), data = df, family = "poisson")
psdCalc01 <- EachSize %>%
filter(lb >= ParticleSizeCutoff) %>%
group_by(project, profile, time, depth) %>%
nest() %>%
mutate(model = map(data, fit_nb_2)) %>%
mutate(tidied = map(model, tidy, parametric = TRUE)) %>%
select(-data, -model) %>%
unnest(tidied) %>%
select(project, profile:estimate) %>%
spread(key = "term", value = "estimate") %>%
rename(big_icp = `(Intercept)`, big_psd = `log(lb)`)
DepthSummary2 <- left_join(DepthSummary, psdCalc01, by = c("project","profile", "time", "depth"))
list(ES = EachSize, DS = DepthSummary2)
}
## Old Way
calc_psd_gam <- function(x, DepthSummary = NULL){
# this currently slushes everything together. I need to nest and run over everything.
x2 <- parse_jac_input2(x, DepthSummary)
EachSize = x2[[1]]
DepthSummary = x2[[2]]
psd_gam_model <- gam(psd ~ s(depth), data = DepthSummary)
intercept_gam_model <- gam(icp ~ s(depth), data = DepthSummary)
psd_pred = predict(psd_gam_model, DepthSummary, se.fit = TRUE)
icp_pred = predict(intercept_gam_model, DepthSummary, se.fit = TRUE)
DepthSummary2 <- bind_cols(DepthSummary,
psd_gam= psd_pred$fit, psd_seg = psd_pred$se.fit,
icp_gam = icp_pred$fit, icp_seg = icp_pred$se.fit)
list(ES = EachSize, DS = DepthSummary2)
}
# Treat each cast seperately when calculating smooded particle size distributions
calc_psd_gam_multiprofile <- function(x, DepthSummary = NULL){
x2 <- parse_jac_input2(x, DepthSummary)
EachSize = x2[[1]]
DepthSummary = x2[[2]]
DSN <- DepthSummary %>% group_by(project, profile, time) %>%
nest()
DSN$psd_gam_model <- map(DSN$data, ~ gam(psd ~ s(depth), data = .))
DSN$intercept_gam_model <- map(DSN$data, ~ gam(icp ~ s(depth), data = .))
DSN$psd_pred = map2(DSN$psd_gam_model, DSN$data, ~predict(.x, .y, se.fit = TRUE))
DSN$icp_pred = map2(DSN$intercept_gam_model, DSN$data, ~predict(.x, .y, se.fit = TRUE))
DSN$DepthSummary2 <- pmap(
.l = list(DSN$data, DSN$psd_pred, DSN$icp_pred),
.f = function(DepthSummary, psd_pred, icp_pred){
bind_cols(DepthSummary,
psd_gam= psd_pred$fit, psd_seg = psd_pred$se.fit,
icp_gam = icp_pred$fit, icp_seg = icp_pred$se.fit)
}
)
DS2 <- DSN %>%
select(project, profile, time, DepthSummary2) %>% unnest(cols = c(DepthSummary2))
mods = DSN$psd_gam_model
names(mods) <- DSN$profile
#list(out = list(ES = EachSize, DS = DS2), psd_gam_mod = mods)
list(ES = EachSize, DS = DS2)
}
# predict total particles for each size class from the gam
pred_tp_gam <- function(x, DepthSummary = NULL){
x2 <- parse_jac_input2(x, DepthSummary)
EachSize = x2[[1]]
DepthSummary = x2[[2]]
EachSize2 <- DepthSummary %>% select(project, profile, time, depth, icp_gam, psd_gam) %>%
right_join(EachSize, by = c("project","profile", "time", "depth")) %>%
mutate(GamPredictTP = vol * lb * (exp(icp_gam + log(lb) * psd_gam))) %>%
select(-icp_gam, psd_gam)
list(ES = EachSize2, DS = DepthSummary)
}
### Predicted quantiles of particle numbers from gam
## Helper functions
fit_model_secondary = function(df) glm(rp ~ log(lb), offset = log(binsize * vol), data = df, family = "poisson")
pos_fit_model_secondary = quietly(fit_model_secondary)
quantilater <- function(df, niter = 10, q1 = 0.025, q2 = 0.975){
t2Test00 <- df
t2Test01 <- t2Test00 %>% mutate(poisDraw = map(GamPredictTP, ~data.frame(iter = 1:niter, rp = rpois(niter, .)))) %>%
unnest(poisDraw)
t2Test02 <- t2Test01 %>% group_by(iter) %>% nest()
t2Test03 <- t2Test02 %>% mutate(modelAndWarnings = map(data, pos_fit_model_secondary)) %>%
mutate(model = map(modelAndWarnings, ~.[[1]]))
t2Test04 <- t2Test03 %>% mutate(tidied = map(model, tidy))
t2Test05 <- t2Test04 %>% select(iter,tidied)%>% unnest(tidied) %>% select(iter, estimate, term) %>% spread(value = "estimate", key = "term")
t2Test06 <- t2Test05 %>% ungroup() %>% summarise(qpt05 = quantile(`log(lb)`, probs = 0.05), qpt95 = quantile(`log(lb)`, probs = .95))
t2Test06
}
## I don't do this analyis in the paper, but it was for deciding whether bins with zero particles are likely from some
## non power law particle size distribution function or not.
tp_quantiles <- function(x, DepthSummary = NULL, niter = 10){
x2 <- parse_jac_input2(x, DepthSummary)
EachSize = x2[[1]]
DepthSummary = x2[[2]]
t2Calc01 <- EachSize %>% select(project, profile, time, depth, binsize, lb, vol, GamPredictTP) %>%
mutate(vol2 = vol) %>%
group_by(project, profile, time, depth) %>%
nest(data = c(binsize, lb, vol, GamPredictTP))
#t2Calc02 <- t2Calc01 %>% mutate(quantiles = future_map(data, quantilater, niter = niter))
# the above doesn't work, trying an alternative
future_map(t2Calc01[["data"]][], quantilater, niter = niter) -> moo
t2Calc02 <- t2Calc01
t2Calc02$quantiles = moo
t2Calc03 <- t2Calc02 %>% select(-data) %>% unnest(quantiles)
DepthSummary2 <- DepthSummary %>% left_join(t2Calc03, by = c("project","profile", "time", "depth"))
return(list(ES = EachSize, DS = DepthSummary2))
}
### Binning
## Daniele's scheme
#unique(c(0:20:100, 100:25:200, 200:50:1000, 1000:100:2000, 2000:200:5000))
BianchiBins <- c(
seq(from = 0, to = 100, by = 20),
seq(from = 100, to = 200, by = 25),
seq(from = 200, to = 1000, by = 50),
seq(from = 1000, to = 2000, by = 100),
seq(from = 2000, to = 5600, by = 200)
) %>% unique
## Go from highly resoved, to binned data
bin_depths <- function(x, DepthSummary = NULL, bins = BianchiBins){
# Input from twin please.
#Allow passing in either a two elemet list of Eachsize and DepthSummary, or passing in as two variables.
x2 <- parse_jac_input2(x, DepthSummary)
EachSize = x2[[1]]
DepthSummary = x2[[2]]
dlb <- bins[1:length(bins)-1]
dub <- bins[2:length(bins)]
mids = (dlb + dub)/2
EachSize2 <- EachSize %>% mutate(DepthBin = cut(depth, bins, labels = mids))
EachSize3 <- EachSize2 %>%
select(-nparticles, -n_nparticles, -depth) %>%
group_by(project, profile, time, DepthBin, lb, ub, binsize) %>%
summarize(vol = sum(vol), TotalParticles = sum(TotalParticles)) %>%
ungroup()
# Recalculate nparticles and n_nparticles
EachSize4 <- EachSize3 %>%
mutate(nparticles = TotalParticles/vol,
n_nparticles = nparticles/binsize) %>%
mutate(depth = as.numeric(as.character(DepthBin))) %>%
select(-DepthBin) %>%
pass
# Match DepthSummary
DepthSummary2 <- DepthSummary %>%
mutate(DepthBin = cut(depth, bins, labels = mids)) %>%
group_by(project, profile, time, DepthBin) %>%
summarize() %>%
ungroup() %>%
mutate(depth = as.numeric(as.character(DepthBin))) %>%
select(-DepthBin)
pass
return(list(ES = EachSize4, DS = DepthSummary2))
}
# average profiles by summing TotalParticles and volume and then recalculating
sum_profiles <- function(x, DepthSummary = NULL){
# Input from twin please.
#Allow passing in either a two elemet list of Eachsize and DepthSummary, or passing in as two variables.
x2 <- parse_jac_input2(x, DepthSummary)
EachSize = x2[[1]]
DepthSummary = x2[[2]]
# dlb <- bins[1:length(bins)-1]
# dub <- bins[2:length(bins)]
# mids = (dlb + dub)/2
#EachSize2 <- EachSize %>% mutate(DepthBin = cut(depth, bins, labels = mids))
# combines the profiiles, loosing npartincles and n_nparticles
EachSize3 <- EachSize %>%
select(-nparticles, -n_nparticles) %>%
group_by(project, depth, lb) %>%
summarize(vol = sum(vol), TotalParticles = sum(TotalParticles), ub = first(ub), binsize = first(binsize)) %>%
ungroup()
# Recalculate nparticles and n_nparticles
EachSize4 <- EachSize3 %>%
mutate(nparticles = TotalParticles/vol,
n_nparticles = nparticles/binsize) %>%
#mutate(depth = as.numeric(as.character(DepthBin))) %>%
#select(-DepthBin) %>%
pass
# Match DepthSummary
DepthSummary2 <- DepthSummary %>%
#mutate(DepthBin = cut(depth, bins, labels = mids)) %>%
group_by(project, time, depth) %>%
summarize() %>%
ungroup() %>%
mutate(profile = "multiple")
#mutate(depth = as.numeric(as.character(DepthBin))) %>%
#select(-DepthBin)
pass
return(list(ES = EachSize4, DS = DepthSummary))
}
# combine_timesteps <- function(x, DepthSummary = NULL){
# # Load in data, flexably
# x2 <- parse_jac_input2(x, DepthSummary)
# EachSize = x2[[1]]
# DepthSummary = x2[[2]]
#
# # consistency with other expressions
# EachSize2 <- EachSize
#
# EachSize3 <- EachSize2 %>%
# select(-nparticles, -n_nparticles) %>%
# group_by(project, profile, time, DepthBin, lb, ub, binsize) %>%
# summarize(vol = sum(vol), TotalParticles = sum(TotalParticles)) %>%
# ungroup()
#
# }
## 24 November 2020
# my_double_gam <- function(df){
# gam(TotalParticles ~s(log(lb), log(depth)), offset = log(vol * binsize), family = nb(), data = df)
# }
#
# safe_double_gam <- safely(my_double_gam)
## The following functions are never used as far as I can tell, but if I delete them, they'll probably turn
## out to be required somewhere cryptic and break everything, so here they stay.
## I'm not sure what they do
expand_with_gam <- function(df, mod){
loc_pred <- predict(mod, type = "link", se.fit = TRUE) %>% as.data.frame %>% mutate(lower = fit - 2 * se.fit, upper = fit + 2 * se.fit) %>% mutate(resp_fit = exp(fit), resp_lower = exp(lower), resp_upper = exp(upper))
loc_df <- bind_cols(df, loc_pred)
loc_df
}
gam_size_ggplot <- function(df){
ggplot(df, aes(x = lb)) + geom_point(aes(y = resp_fit), shape = 1) +
geom_errorbar(aes(ymin = resp_lower, ymax = resp_upper)) +
geom_point(aes(y = n_nparticles)) + scale_x_log10() + scale_y_log10()
}
gam_size_ggplot_2d <- function(df){
df %>% ggplot(aes(x = resp_fit, y = depth, col = log(lb), group = lb)) +
scale_y_reverse() + geom_point() + scale_x_log10(limits = c(10^-8, NA)) +
scale_color_viridis_c() + geom_path() + geom_vline(xintercept = 1) + geom_vline(xintercept = 5) +
geom_errorbar(aes(xmin = resp_lower, xmax = resp_upper), width = 10, alpha = 0.5) + theme_bw()
}
nnp_size_ggplot_2d <- function(df){
df %>% ggplot(aes(x = n_nparticles, y = depth, col = log(lb), group = lb)) +
scale_y_reverse() + geom_point() + scale_x_log10(limits = c(10^-8, NA)) +
scale_color_viridis_c() + geom_path() + geom_vline(xintercept = 1) + geom_vline(xintercept = 5) +
theme_bw()
}
##Here's a commented out function. I think a newer version is defined below.
# double_gam_smooth <- function(x, DepthSummary = NULL){
# # Input from twin please.
# #Allow passing in either a two elemet list of Eachsize and DepthSummary, or passing in as two variables.
#
# x2 <- parse_jac_input2(x, DepthSummary)
# EachSize = x2[[1]]
# DepthSummary = x2[[2]]
#
# withGamFit <- EachSize %>% group_by(project) %>% nest() %>%
# mutate(mod = map(data, safe_double_gam),
# modOnly = map(mod, ~.[[1]]),
# pred = map2(modOnly, data, safely(predict), se.fit = TRUE),
# predOnly = map(pred, ~.[[1]]),
# data01 = map2(data, predOnly,
# ~bind_cols(.x, link = .y$fit, lse = .y$se.fit))) %>%
# select(project, data01) %>%
# unnest(data01) %>%
# mutate(link_lower = link - lse,
# link_upper = link + lse,
# nnp_smooth = exp(link),
# nnp_lower = exp(link_lower),
# nnp_upper = exp(link_upper),
# np_smooth = nnp_smooth * binsize,
# tp_smooth = np_smooth * vol,
# flux_smooth = np_smooth * (C_f_global * lb ^ ag_global)
# )
#
# TotalStuff <- withGamFit %>% group_by(project, profile, time, depth) %>%
# summarize(smooth_TotParticles = sum(tp_smooth),
# smooth_nparticles = sum(np_smooth),
# smooth_nnparticles = sum(nnp_smooth),
# smooth_flux_fit = sum(flux_smooth)
# )
#
# DepthSummary_B <- left_join(DepthSummary, TotalStuff, by = c("project", "profile", "time", "depth"))
#
# return(list(ES = withGamFit, DS = DepthSummary_B))
#
# }
## Functions for smoothing data, using size and depth informaiton together
## originally coded in SmoothsAndFluxRevisited
## I loose at DRY coding, but also if I try to fix this, it will break everything and I'll spend a week fixing things.
my_double_gam <- function(df){
gam(TotalParticles ~s(log(lb), log(depth), by = factor(profile)), offset = log(vol * binsize), family = nb(), data = df)
}
safe_double_gam <- safely(my_double_gam)
double_gam_smooth <- function(x, DepthSummary = NULL){
# Input from twin please.
#Allow passing in either a two elemet list of Eachsize and DepthSummary, or passing in as two variables.
x2 <- parse_jac_input2(x, DepthSummary)
EachSize = x2[[1]]
DepthSummary = x2[[2]]
withGamFit <- EachSize %>% group_by(project) %>% nest() %>%
mutate(mod = map(data, safe_double_gam),
modOnly = map(mod, ~.[[1]]),
pred = map2(modOnly, data, safely(predict), se.fit = TRUE),
predOnly = map(pred, ~.[[1]]),
data01 = map2(data, pred,
~bind_cols(.x, link = .y$result$fit, lse = .y$result$se.fit))) %>%
select(project, data01) %>%
unnest(data01) %>%
mutate(link_lower = link - lse,
link_upper = link + lse,
nnp_smooth = exp(link),
nnp_lower = exp(link_lower),
nnp_upper = exp(link_upper),
np_smooth = nnp_smooth * binsize,
tp_smooth = np_smooth * vol,
flux_smooth = np_smooth * (C_f_global * lb ^ ag_global)
)
TotalStuff <- withGamFit %>% group_by(project, profile, time, depth) %>%
summarize(smooth_TotParticles = sum(tp_smooth),
smooth_nparticles = sum(np_smooth),
smooth_nnparticles = sum(nnp_smooth),
smooth_flux_fit = sum(flux_smooth)
)
DepthSummary_B <- left_join(DepthSummary, TotalStuff, by = c("project", "profile", "time", "depth"))
return(list(ES = withGamFit, DS = DepthSummary_B))
}
## Sometimes I want to run things on just one profile, especially when building functions that I will later
## run over every profile. This filters out just one profile (aka cast)
filter_profile <- function(x, DepthSummary = NULL, profile = "stn_043"){
prof2 <- profile
x2 <- parse_jac_input2(x, DepthSummary)
EachSize = x2[[1]] %>% ungroup()
DepthSummary = x2[[2]] %>% ungroup()
EachSize <- EachSize %>% filter(profile == prof2) %>% select(-c(project, profile, time))
DepthSummary <- DepthSummary %>% filter(profile == prof2) %>% select(-c(project, profile, time))
return(list(ES = EachSize, DS = DepthSummary))
}
## This calls the eularian prism model and calculates a bunch of relevant parameters
diagnose_disaggregation_one_profile <- function(x, DepthSummary = NULL){
## Preamble
x2 <- parse_jac_input2(x, DepthSummary)
EachSize = x2[[1]]
DepthSummary = x2[[2]]
lb_vec <- sort(unique(EachSize$lb))
m_vec = Cm * lb_vec ^ alpha;
w_vec = Cw * lb_vec ^ gamma;
llb_01 <- little_lb <- lb_vec[1] - (lb_vec[2] - lb_vec[1])/2
specData <- EachSize %>% select(depth, lb, np_smooth, nnp_smooth, flux_smooth) %>%
nest(spec_meta = c(lb, np_smooth, nnp_smooth, flux_smooth))
preparedData <- DepthSummary %>% left_join(specData, by = c("depth")) %>%
arrange(depth) %>%
mutate(spec_only = map(spec_meta, ~pull(., np_smooth)),
spec_prev = lag(spec_only),
flux_prev = lag(smooth_flux_fit),
DF = smooth_flux_fit - flux_prev,
#DFP = 1 - DF/flux_prev, # I was using this for a while.
DFP = smooth_flux_fit/flux_prev,
depth_prev = lag(depth),
DZ = depth - depth_prev,
)
minDepth = min(preparedData$depth)
saveFirstDepth = preparedData %>% filter(depth == minDepth) %>% select(depth, spec_meta) %>% unnest(spec_meta)
modelRun <- preparedData %>%
.[-1,] %>%
# fix flux leak here
mutate(use_this_DFP = map2_dbl(spec_prev, DFP, optFun, lbv = lb_vec, mv = m_vec, wv = w_vec, llb = llb_01, alpha = alpha_global, gamma = gamma_global)) %>%
mutate(spec_pred = map2(spec_prev, use_this_DFP, remin_smooth_shuffle, lbv = lb_vec, mv = m_vec, wv = w_vec, llb = llb_01, alpha = alpha_global, gamma = gamma_global))
#modelRunFixLine1 <- bind_rows(preparedData[1,], modelRun)
modelConcise <- modelRun %>%
mutate(spec_meta = map2(spec_meta, spec_prev, ~tibble(.x, np_prev = .y))) %>%
mutate(spec_meta = map2(spec_meta, spec_pred, ~tibble(.x, np_pred = .y))) %>%
select(depth, depth_prev, DZ, DF, DFP, use_this_DFP, spec_meta)
modelUnnest <- modelConcise %>%
#select(depth, spec_meta) %>%
unnest(spec_meta) %>%
ungroup()
modelUnnestWithFirstDepth <- bind_rows(saveFirstDepth, modelUnnest)
modelPostCalc <- modelUnnestWithFirstDepth %>%
mutate(
flux_prev = np_prev * (C_f_global * lb ^ ag_global),
flux_pred = np_pred * (C_f_global * lb ^ ag_global)
)
Tot <- modelPostCalc %>%
group_by(depth) %>%
summarize(depth_prev = first(depth_prev), DZ = first(DZ) ,DF = first(DF), DFP = first(DFP), use_this_DFP = first(use_this_DFP),
Flux = sum(flux_smooth),
Flux_Prev = sum(flux_prev),
Flux_Pred = sum(flux_pred))
Small <- modelPostCalc %>%
filter(lb <= ParticleSizeCutoff) %>%
group_by(depth) %>%
summarize(DF = first(DF), DFP = first(DFP),
Flux = sum(flux_smooth),
Flux_Prev = sum(flux_prev),
Flux_Pred = sum(flux_pred))
Big <- modelPostCalc %>%
filter(lb > ParticleSizeCutoff) %>%
group_by(depth) %>%
summarize(DF = first(DF), DFP = first(DFP),
Flux = sum(flux_smooth),
Flux_Prev = sum(flux_prev),
Flux_Pred = sum(flux_pred))
All <- Tot %>%
left_join(Small, by = "depth", suffix = c("", "_Small")) %>%
left_join(Big, by = "depth", suffix = c("", "_Big")) %>%
mutate(osps = Flux_Small - Flux_Pred_Small,
obpb = Flux_Pred_Big - Flux_Big,
ospsDZ = osps/DZ
)
DepthSummary_B <- DepthSummary %>%
left_join(All, by = "depth") %>% rename(Flux_Smooth = Flux)
modelReduced <- modelPostCalc %>% select(
depth, lb, flux_prev, flux_pred
)
EachSize_B <- EachSize %>%
left_join(modelReduced, by = c("depth", "lb"))
# Code does stuff here
return(list(ES = EachSize_B, DS = DepthSummary_B))
}
## Sometimes the eularian prism code breaks, in which case, I want to keep running, so I run it "safely"
## with this purr `safely` function
diagnose_disaggregation_one_profile_safe <- safely(diagnose_disaggregation_one_profile)
## Run diagnose_disaggregation_one_profile_safe over every profile and keep track of data
diagnose_disaggregation<- function(x, DepthSummary = NULL){
## Preamble
x2 <- parse_jac_input2(x, DepthSummary)
EachSize = x2[[1]]
DepthSummary = x2[[2]]
ESN <- EachSize %>%
group_by(project, profile, time) %>%
nest() %>%
rename(ES = data)
DSN <- DepthSummary %>%
group_by(project, profile, time) %>%
nest() %>%
rename(DS = data)
metaNest <- left_join(ESN, DSN, by = c("project", "profile", "time"))
metaNest <- metaNest %>% mutate(ESDS = map2(ES, DS, ~list(.x, .y)))
metaNest <- metaNest %>%
mutate(ESDS_Mod_Safe = map(ESDS, diagnose_disaggregation_one_profile_safe))
metaNest01 <- metaNest %>% mutate(ESDS_Mod = map(ESDS_Mod_Safe, ~.[[1]]),
ESDS_Err = map(ESDS_Mod_Safe, ~.[[2]]),
ES01 = map(ESDS_Mod, ~.[[1]]),
DS01 = map(ESDS_Mod, ~.[[2]]))
EachSize01 <- metaNest01 %>% select(project, profile, time, ES01) %>% unnest(ES01)
DepthSummary01 <- metaNest01 %>% select(project, profile, time, DS01) %>% unnest(DS01)
return(list(ES = EachSize01, DS = DepthSummary01))
}
## Functions about fitting flux
fit_flux_es <- function(C_f, ag, ES){
ES2 <- ES %>% mutate(flux2 = C_f * nparticles * lb ^ ag)
ES2
}
fit_flux_ds <- function(C_f, ag, ES, DS){
DS2 <- ES %>%
group_by(project, profile, time, depth) %>%
summarize(tot_flux2 = sum(flux2))
DS2 <- left_join(DS, DS2, by = c("project", "profile", "time", "depth"))
}
fit_flux <- function(C_f, ag, ES, DS){
ES2 <- fit_flux_es(C_f, ag, ES)
DS2 <- fit_flux_ds(C_f, ag, ES2, DS)
return(list(ES = ES2, DS = DS2))
}
RMSE <- function(mod){
RSS <- c(crossprod(mod$residuals))
MSE = RSS/ length(mod$residuals)
RMSE = sqrt(MSE)
}
flux_check <- function(DS){
#diff = log10(DS$tn_flux) - log10(DS$tot_flux2)
diff = log(DS$tn_flux) - log(DS$tot_flux2)
squares = diff ^2
rss = sum(squares)
mse = rss/length(rss)
rmse = sqrt(mse)
rmse
}
fit_check_flux <- function(C_f, ag, ES, DS){
FF <- fit_flux(C_f, ag, ES, DS)
FC <- flux_check(FF$DS)
FC
}
fc_wrap <- function(x, ES, DS){
C_f <- x[1]
ag <- x[2]
FC <- fit_check_flux(C_f, ag, ES, DS)
FC
}
## Recode time variable
# add_time_data <- function(x, DepthSummary = NULL){
# require(chron)
# require(lubridate)
# x2 <- parse_jac_input2(x, DepthSummary)
# EachSize = x2[[1]]
# DepthSummary = x2[[2]]
#
# timeDf <- tibble(time = unique(DepthSummary$time)) %>%
# mutate(tod <- times(strftime(time,"%H:%M:%S")))
#
# # Recode into blocks of time
#
# # create breaks
# breaks <- hour(hm("21:00", "5:00", "9:00", "18:00", "20:59"))
# # labels for the breaks
# labels <- c("Night", "Morning", "Afternoon", "Evening")
#
# timeDf <- timeDf %>%
# mutate(timeBlock = cut(x=hour(time), breaks = breaks, labels = labels, include.lowest=TRUE)
# )
#
# # hours from noon
#
# hour(timeDf$tod)
#
#
#
# return(list(ES = EachSize, DS = DepthSummary))
# } | /UVP_2017_library.R | permissive | cramjaco/POMZ-ETNP-UVP-2017 | R | false | false | 35,282 | r | ### Load stuff
library(transformr)
library(tidyverse)
library(mgcv)
library(broom)
library(furrr)
library(lubridate)
pass <- function(x, ...) {x}
ParticleSizeCutoff <- 0.5
### Function to load in initial data and prepare a data frame.
## Define global parameter
C_f = 133 # SmoothsAndFlusRevisited.Rmd, calculated from trap flux
ag = 2.00 # SmoothsAndFlusRevisited
alpha = (ag + 1) / 2 # assuming spherical drag profile
gamma = alpha - 1
C_f_global <- C_f
alpha_global <- alpha
gamma_global <- gamma
ag_global <- ag
# Get the times of the CTD casts
CTD_Unique <- read_csv("data/SKQ201617S_CTD_Profile.csv") %>%
filter(Station == "sta016") %>%
mutate(DateJ = mdy(`mon/dd/yyyy`), TimeJ = hms(`hh:mm`)) %>%
mutate(DateTimeJ = mdy_hms(paste(`mon/dd/yyyy`, `hh:mm`))) %>%
select(time = DateTimeJ, depth = `Pressure [db]`) %>%
group_by(time) %>% summarize(MaxDepth = max(depth))
## Times on the UVP are the time it was switched on, not the time the cast started. I will find the next time of the cast
Find_Next_Cast <- function(LTime){
NextTime <- min(CTD_Unique$time[CTD_Unique$time - LTime > 0]) # Find the next cast
NextTime
}
Find_Next_Cast_2 <- Vectorize(Find_Next_Cast)
Find_Next_Cast_3 <- function(LTimeVec){
NextVec <- Find_Next_Cast_2(LTimeVec)
attributes(NextVec) <- attributes(LTimeVec)
NextVec
}
bring_in_p2 <- function(){
# bring in metadata that specifies relevant files
uvpMeta <- read_tsv("data/uvpdata/export_detailed_20190304_23_14_Export_metadata_summary.tsv") %>% rename(time = `yyyy-mm-dd hh:mm`) %>% arrange(time)
# just take p2 data
uvpMetaP2 <- uvpMeta %>% filter(Site == "016")
# bring in particle data
uvp_data_path <- "data/uvpdata"
particleData <- uvpMetaP2 %>% pull(`Particle filename`) %>%
map(~read_tsv(file.path(uvp_data_path, .), locale = locale(encoding = "latin1", tz="UTC"))) %>%
reduce(rbind)
# some initial processing
particleNumbers <- particleData %>%
select(profile = Profile, time = `yyyy-mm-dd hh:mm`, depth = `Depth [m]`, vol = `Sampled volume[L]`, `LPM (102-128 µm)[#/L]`:`LPM (>26 mm)[#/L]`) %>%
gather(key = "sizeclass", value = "nparticles", `LPM (102-128 µm)[#/L]`:`LPM (>26 mm)[#/L]`) %>%
# convert to central time, originals are in UTC
#mutate(time = lubridate::with_tz(time, tzone = "Mexico/General"))
mutate(time = Find_Next_Cast_3(time)) %>%
pass()
#
classData <- particleNumbers %>% select(sizeclass) %>% unique() %>%
mutate(lb0 = as.numeric(str_extract(sizeclass,"(?<=\\().*(?=-)")),
ub0 = str_extract(sizeclass, "(?<=-).*(?=\\s)") %>% as.numeric(),
#unit0 = str_extract(sizeclass, "(?<=\\s).(?=m)"),
ismm = str_detect(sizeclass, "mm"),
lb1 = if_else(ismm, lb0, lb0 / 1000),
ub1 = if_else(ismm, ub0, ub0 / 1000),
lb2 = if_else(str_detect(sizeclass, "\\>26"),26, lb1),
ub2 = if_else(str_detect(sizeclass, "\\>26"),32, ub1) # arbitrary
) %>%
select(sizeclass, lb = lb2, ub = ub2)
particleNumbers01 <- left_join(particleNumbers, classData, by = "sizeclass") %>%
mutate(TotalParticles = nparticles * vol) %>%
mutate(binsize = ub - lb) %>%
mutate(n_nparticles = nparticles / binsize)
particleNumbers001 <- particleNumbers01 %>% select(profile, time, depth, vol, sizeclass, lb, ub, binsize, TotalParticles, nparticles, n_nparticles)
particleNumbers001
}
### Import P16 station 100
bring_in_p16_s100 <- function(){
require(tidyverse)
## initial read in
s100data00 <- read_tsv("data/p16uvp/export_detailed_20190908_00_44_PAR_p16n_100.tsv", locale = locale(encoding = "latin1"))
## just the data we need
s100data01 <- s100data00 %>%
select(profile = Profile, time = `yyyy-mm-dd hh:mm`, depth = `Depth [m]`, vol = `Sampled volume [L]`, `LPM (102-128 µm) [# l-1]`:`LPM (>26 mm) [# l-1]`) %>%
gather(key = "sizeclass", value = "nparticles", `LPM (102-128 µm) [# l-1]`:`LPM (>26 mm) [# l-1]`)
## Convert bin names to values in microns
renDf00 <- s100data01 %>% select(sizeclass) %>% unique()
renDf01 <- renDf00 %>%
mutate(
##bounds, ignoring microns or mm designation
lb0 = str_extract(sizeclass, "(?<=\\().*(?=-)") %>% str_extract( ".*(?=-)") %>% as.numeric(),
ub0 = str_extract(sizeclass, "(?<=-).*(?=\\s)") %>% str_extract(".*(?=\\s)") %>% str_extract(".*(?=\\s)") %>% as.numeric(),
## test for mm
ismm = str_detect(sizeclass, "mm"),
## set everthing in microns
lb1 = if_else(ismm, lb0, lb0 / 1000),
ub1 = if_else(ismm, ub0, ub0 / 1000),
## deal with biggist size bin
lb2 = if_else(str_detect(sizeclass, "\\>26"),26, lb1),
ub2 = if_else(str_detect(sizeclass, "\\>26"),32, ub1) # arbitrary
) %>%
select(sizeclass, lb = lb2, ub = ub2)
## join the names
s100data02 <- s100data01 %>% left_join(renDf01, by = "sizeclass")
## merge small bin with no particles wiht the bin one smaller
s100data03 <- s100data02 %>%
filter(lb != 0.128) %>% # removing small bin with no particles
mutate(ub = if_else(ub == 0.128, 0.161, ub))
## Preliminary math
s100data04 <- s100data03 %>%
mutate(
TotalParticles = nparticles * vol,
binsize = ub - lb,
n_nparticles = nparticles/binsize
)
s100data04
}
combine_projects <- function(proj1, proj2, name1 = "ETNP", name2 = "P16"){
proj1 <- proj1 %>% mutate(project = name1)
proj2 <- proj2 %>% mutate(project = name2)
bothProj = bind_rows(proj1, proj2)
bothProj
}
## I tend to work with lists of two element. One is the data of each size, and one is the depth summary.
## The following makes the early instance of that list, on whihc I work.
make_twin_df_list <- function(EachSize){
DepthSummary <- EachSize %>% group_by(project, profile, time, depth) %>%
summarize()
list(EachSize, DepthSummary)
}
## A function that takes a variable x that can either be a list of two elements, "DepthSummary" and "EachSize"
## and saves them to the parent environment. Alternatively, if DepthSummary is specified, EachSize is saved to the parent environment, and DepthSummary
## is passed forward
## I stopped using this because the passing data directly to the parent thing, while concise, I think may be confusing to some readers.
## The second approach is more conventional.
parse_jac_input <- function(x, DepthSummary = NULL){
p <- parent.frame()
if(!is.null(DepthSummary)){
p$EachSize = x
p$DepthSummary = DepthSummary
}else{
if(length(x) == 2){
p$EachSize = x[[1]]
p$DepthSummary = x[[2]]
}else{
stop("Either DepthSummary must be specified, or else the first variable must be a two element list containing EachSize, and DepthSummary elements")}
}
}
# As above, but this just sends out a two element list of EachSize and DepthSummary
parse_jac_input2 <- function(x, DepthSummary){
if(!is.null(DepthSummary)){
return(list(ES = x, DS = DepthSummary))
}else{
if(length(x) == 2){
return(x)
}else{
stop("Either DepthSummary must be specified, or else the first variable must be a two element list containing EachSize, and DepthSummary elements")}
}
}
## I dont' actually use this for anything,but these twin functions all have common elements, so here's a template
function_template <- function(x, DepthSummary = NULL){
x2 <- parse_jac_input2(x, DepthSummary)
EachSize = x2[[1]]
DepthSummary = x2[[2]]
# Code does stuff here
return(list(ES = EachSize, DS = DepthSummary))
}
### Calculate biovolume, flux, speed, both for size classes and in aggregate
calc_particle_parameters <- function(x, DepthSummary = NULL){
#Allow passing in either a two elemet list of Eachsize and DepthSummary, or passing in as two variables.
x2 <- parse_jac_input2(x, DepthSummary)
EachSize = x2[[1]]
DepthSummary = x2[[2]]
## Particle fractal dimension estimates
# alpha = 1.7 # Klips et al. 1994
# gamma = alpha - 1 # assume spherical drag profile as in Guidi
# ag_fit = 0.26 # Fitted alpha + gamma parameter; hardcoded but copied from NormalizeUVP_Flux.Rmd
# C_f_fit = 3.98 # Fitted C_f parameter
# alpha = 0.52 # Alldredge
# gamma = 0.26 # Alldredge & Gotschalk
# C_f_fit = 10.51 # Normalize_UVP_Flux.Rmd, nonlinear for now
# ag_fit = alpha + gamma # Zerod out for now; I'd like to clean this all up soon.
EachSize2 <- EachSize %>%
mutate(
biovolume = nparticles * lb ^ alpha,
speed = lb ^ gamma,
flux = biovolume * speed,
flux_fit = nparticles * C_f_global * lb ^ ag_global
)
DepthSummary2 <- EachSize2 %>%
group_by(project, profile, time, depth) %>%
summarize(
vol = first(vol),
tot_TotParticles = sum(TotalParticles),
tot_nparticles = sum(nparticles),
tot_nnparticles = sum(n_nparticles),
tot_biovolume = sum(biovolume),
tot_flux = sum(flux),
tot_flux_fit = sum(flux_fit),
tot_speed = tot_flux/tot_biovolume
)
list(ES = EachSize2, DS = DepthSummary2)
}
## Seperate parameters for small < 530 micron and big > 530 micron particles
calc_small_and_big <- function(x, DepthSummary = NULL){
#Allow passing in either a two elemet list of Eachsize and DepthSummary, or passing in as two variables.
x2 <- parse_jac_input2(x, DepthSummary)
EachSize = x2[[1]]
DepthSummary = x2[[2]]
small <- EachSize %>%
filter(lb < ParticleSizeCutoff)
big <- EachSize %>%
filter(lb >= ParticleSizeCutoff)
small2 <- small %>%
group_by(project, profile, time, depth) %>%
summarise(
small_TotParticles = sum(TotalParticles),
small_nparticles = sum(nparticles),
small_nnparticles = sum(n_nparticles),
small_biovolume = sum(biovolume),
small_flux = sum(flux),
small_flux_fit = sum(flux_fit),
small_speed = small_flux/small_biovolume
)
big2 <- big %>%
group_by(project, profile, time, depth) %>%
summarise(
big_TotParticles = sum(TotalParticles),
big_nparticles = sum(nparticles),
big_nnparticles = sum(n_nparticles),
big_biovolume = sum(biovolume),
big_flux = sum(flux),
big_flux_fit = sum(flux_fit),
big_speed = big_flux/big_biovolume
)
DepthSummary2 <- left_join(DepthSummary, small2, by = c("project","profile", "time", "depth")) %>%
left_join(big2, by = c("project","profile", "time", "depth"))
list(ES = EachSize, DS = DepthSummary2)
}
## Used in a few functions
## Poisson
fit_model = function(df) glm(TotalParticles ~ log(lb), offset = log(binsize * vol), data = df, family = "poisson")
## Neg Bin, doesn't work
fit_nb = function(df) MASS::glm.nb(TotalParticles ~ log(lb) + offset(log(vol * binsize)), data = df)
safe_fit_nb <- safely(fit_nb)
# I switched to gam because it can handle negative binomial. If one does this, one also needs to add the call `parametric = TRUE` to the tidy function
fit_nb_2 <- function(df) gam(TotalParticles ~ log(lb), offset = log(binsize * vol), data = df, family = "nb")
### Calculate particle size distribution, intercept and slope
## Takes EachSize, a data frame of particle size specific stuff, and Depth Summary, which is depth specific stuff
## Returns a list with the above.
calc_psd <- function(x, DepthSummary = NULL){
x2 <- parse_jac_input2(x, DepthSummary)
EachSize = x2[[1]]
DepthSummary = x2[[2]]
psdCalc01 <- EachSize %>%
group_by(project, profile, time, depth) %>%
nest() %>%
mutate(model = map(data, fit_nb_2)) %>%
mutate(tidied = map(model, tidy, parametric = TRUE)) %>%
select(-data, -model) %>%
unnest(tidied) %>%
select(project, profile:estimate) %>%
spread(key = "term", value = "estimate") %>%
rename(icp = `(Intercept)`, psd = `log(lb)`)
DepthSummary2 <- left_join(DepthSummary, psdCalc01, by = c("project","profile", "time", "depth"))
list(ES = EachSize, DS = DepthSummary2)
}
# Get the particle size distribution of small particles
calc_small_psd <- function(x, DepthSummary = NULL){
x2 <- parse_jac_input2(x, DepthSummary)
EachSize = x2[[1]]
DepthSummary = x2[[2]]
#fit_model = function(df) glm(TotalParticles ~ log(lb), offset = log(binsize * vol), data = df, family = "poisson")
psdCalc01 <- EachSize %>%
filter(lb < ParticleSizeCutoff) %>%
group_by(project, profile, time, depth) %>%
nest() %>%
mutate(model = map(data, fit_nb_2)) %>%
mutate(tidied = map(model, tidy, parametric = TRUE)) %>%
select(-data, -model) %>%
unnest(tidied) %>%
select(project, profile:estimate) %>%
spread(key = "term", value = "estimate") %>%
rename(small_icp = `(Intercept)`, small_psd = `log(lb)`)
DepthSummary2 <- left_join(DepthSummary, psdCalc01, by = c("project","profile", "time", "depth"))
list(ES = EachSize, DS = DepthSummary2)
}
# Get the particle size distribution of large particles
calc_big_psd <- function(x, DepthSummary = NULL){
x2 <- parse_jac_input2(x, DepthSummary)
EachSize = x2[[1]]
DepthSummary = x2[[2]]
#fit_model = function(df) glm(TotalParticles ~ log(lb), offset = log(binsize * vol), data = df, family = "poisson")
psdCalc01 <- EachSize %>%
filter(lb >= ParticleSizeCutoff) %>%
group_by(project, profile, time, depth) %>%
nest() %>%
mutate(model = map(data, fit_nb_2)) %>%
mutate(tidied = map(model, tidy, parametric = TRUE)) %>%
select(-data, -model) %>%
unnest(tidied) %>%
select(project, profile:estimate) %>%
spread(key = "term", value = "estimate") %>%
rename(big_icp = `(Intercept)`, big_psd = `log(lb)`)
DepthSummary2 <- left_join(DepthSummary, psdCalc01, by = c("project","profile", "time", "depth"))
list(ES = EachSize, DS = DepthSummary2)
}
## Old Way
calc_psd_gam <- function(x, DepthSummary = NULL){
# this currently slushes everything together. I need to nest and run over everything.
x2 <- parse_jac_input2(x, DepthSummary)
EachSize = x2[[1]]
DepthSummary = x2[[2]]
psd_gam_model <- gam(psd ~ s(depth), data = DepthSummary)
intercept_gam_model <- gam(icp ~ s(depth), data = DepthSummary)
psd_pred = predict(psd_gam_model, DepthSummary, se.fit = TRUE)
icp_pred = predict(intercept_gam_model, DepthSummary, se.fit = TRUE)
DepthSummary2 <- bind_cols(DepthSummary,
psd_gam= psd_pred$fit, psd_seg = psd_pred$se.fit,
icp_gam = icp_pred$fit, icp_seg = icp_pred$se.fit)
list(ES = EachSize, DS = DepthSummary2)
}
# Treat each cast seperately when calculating smooded particle size distributions
calc_psd_gam_multiprofile <- function(x, DepthSummary = NULL){
x2 <- parse_jac_input2(x, DepthSummary)
EachSize = x2[[1]]
DepthSummary = x2[[2]]
DSN <- DepthSummary %>% group_by(project, profile, time) %>%
nest()
DSN$psd_gam_model <- map(DSN$data, ~ gam(psd ~ s(depth), data = .))
DSN$intercept_gam_model <- map(DSN$data, ~ gam(icp ~ s(depth), data = .))
DSN$psd_pred = map2(DSN$psd_gam_model, DSN$data, ~predict(.x, .y, se.fit = TRUE))
DSN$icp_pred = map2(DSN$intercept_gam_model, DSN$data, ~predict(.x, .y, se.fit = TRUE))
DSN$DepthSummary2 <- pmap(
.l = list(DSN$data, DSN$psd_pred, DSN$icp_pred),
.f = function(DepthSummary, psd_pred, icp_pred){
bind_cols(DepthSummary,
psd_gam= psd_pred$fit, psd_seg = psd_pred$se.fit,
icp_gam = icp_pred$fit, icp_seg = icp_pred$se.fit)
}
)
DS2 <- DSN %>%
select(project, profile, time, DepthSummary2) %>% unnest(cols = c(DepthSummary2))
mods = DSN$psd_gam_model
names(mods) <- DSN$profile
#list(out = list(ES = EachSize, DS = DS2), psd_gam_mod = mods)
list(ES = EachSize, DS = DS2)
}
# predict total particles for each size class from the gam
pred_tp_gam <- function(x, DepthSummary = NULL){
x2 <- parse_jac_input2(x, DepthSummary)
EachSize = x2[[1]]
DepthSummary = x2[[2]]
EachSize2 <- DepthSummary %>% select(project, profile, time, depth, icp_gam, psd_gam) %>%
right_join(EachSize, by = c("project","profile", "time", "depth")) %>%
mutate(GamPredictTP = vol * lb * (exp(icp_gam + log(lb) * psd_gam))) %>%
select(-icp_gam, psd_gam)
list(ES = EachSize2, DS = DepthSummary)
}
### Predicted quantiles of particle numbers from gam
## Helper functions
fit_model_secondary = function(df) glm(rp ~ log(lb), offset = log(binsize * vol), data = df, family = "poisson")
pos_fit_model_secondary = quietly(fit_model_secondary)
quantilater <- function(df, niter = 10, q1 = 0.025, q2 = 0.975){
t2Test00 <- df
t2Test01 <- t2Test00 %>% mutate(poisDraw = map(GamPredictTP, ~data.frame(iter = 1:niter, rp = rpois(niter, .)))) %>%
unnest(poisDraw)
t2Test02 <- t2Test01 %>% group_by(iter) %>% nest()
t2Test03 <- t2Test02 %>% mutate(modelAndWarnings = map(data, pos_fit_model_secondary)) %>%
mutate(model = map(modelAndWarnings, ~.[[1]]))
t2Test04 <- t2Test03 %>% mutate(tidied = map(model, tidy))
t2Test05 <- t2Test04 %>% select(iter,tidied)%>% unnest(tidied) %>% select(iter, estimate, term) %>% spread(value = "estimate", key = "term")
t2Test06 <- t2Test05 %>% ungroup() %>% summarise(qpt05 = quantile(`log(lb)`, probs = 0.05), qpt95 = quantile(`log(lb)`, probs = .95))
t2Test06
}
## I don't do this analyis in the paper, but it was for deciding whether bins with zero particles are likely from some
## non power law particle size distribution function or not.
tp_quantiles <- function(x, DepthSummary = NULL, niter = 10){
x2 <- parse_jac_input2(x, DepthSummary)
EachSize = x2[[1]]
DepthSummary = x2[[2]]
t2Calc01 <- EachSize %>% select(project, profile, time, depth, binsize, lb, vol, GamPredictTP) %>%
mutate(vol2 = vol) %>%
group_by(project, profile, time, depth) %>%
nest(data = c(binsize, lb, vol, GamPredictTP))
#t2Calc02 <- t2Calc01 %>% mutate(quantiles = future_map(data, quantilater, niter = niter))
# the above doesn't work, trying an alternative
future_map(t2Calc01[["data"]][], quantilater, niter = niter) -> moo
t2Calc02 <- t2Calc01
t2Calc02$quantiles = moo
t2Calc03 <- t2Calc02 %>% select(-data) %>% unnest(quantiles)
DepthSummary2 <- DepthSummary %>% left_join(t2Calc03, by = c("project","profile", "time", "depth"))
return(list(ES = EachSize, DS = DepthSummary2))
}
### Binning
## Daniele's scheme
#unique(c(0:20:100, 100:25:200, 200:50:1000, 1000:100:2000, 2000:200:5000))
BianchiBins <- c(
seq(from = 0, to = 100, by = 20),
seq(from = 100, to = 200, by = 25),
seq(from = 200, to = 1000, by = 50),
seq(from = 1000, to = 2000, by = 100),
seq(from = 2000, to = 5600, by = 200)
) %>% unique
## Go from highly resoved, to binned data
bin_depths <- function(x, DepthSummary = NULL, bins = BianchiBins){
# Input from twin please.
#Allow passing in either a two elemet list of Eachsize and DepthSummary, or passing in as two variables.
x2 <- parse_jac_input2(x, DepthSummary)
EachSize = x2[[1]]
DepthSummary = x2[[2]]
dlb <- bins[1:length(bins)-1]
dub <- bins[2:length(bins)]
mids = (dlb + dub)/2
EachSize2 <- EachSize %>% mutate(DepthBin = cut(depth, bins, labels = mids))
EachSize3 <- EachSize2 %>%
select(-nparticles, -n_nparticles, -depth) %>%
group_by(project, profile, time, DepthBin, lb, ub, binsize) %>%
summarize(vol = sum(vol), TotalParticles = sum(TotalParticles)) %>%
ungroup()
# Recalculate nparticles and n_nparticles
EachSize4 <- EachSize3 %>%
mutate(nparticles = TotalParticles/vol,
n_nparticles = nparticles/binsize) %>%
mutate(depth = as.numeric(as.character(DepthBin))) %>%
select(-DepthBin) %>%
pass
# Match DepthSummary
DepthSummary2 <- DepthSummary %>%
mutate(DepthBin = cut(depth, bins, labels = mids)) %>%
group_by(project, profile, time, DepthBin) %>%
summarize() %>%
ungroup() %>%
mutate(depth = as.numeric(as.character(DepthBin))) %>%
select(-DepthBin)
pass
return(list(ES = EachSize4, DS = DepthSummary2))
}
# average profiles by summing TotalParticles and volume and then recalculating
sum_profiles <- function(x, DepthSummary = NULL){
# Input from twin please.
#Allow passing in either a two elemet list of Eachsize and DepthSummary, or passing in as two variables.
x2 <- parse_jac_input2(x, DepthSummary)
EachSize = x2[[1]]
DepthSummary = x2[[2]]
# dlb <- bins[1:length(bins)-1]
# dub <- bins[2:length(bins)]
# mids = (dlb + dub)/2
#EachSize2 <- EachSize %>% mutate(DepthBin = cut(depth, bins, labels = mids))
# combines the profiiles, loosing npartincles and n_nparticles
EachSize3 <- EachSize %>%
select(-nparticles, -n_nparticles) %>%
group_by(project, depth, lb) %>%
summarize(vol = sum(vol), TotalParticles = sum(TotalParticles), ub = first(ub), binsize = first(binsize)) %>%
ungroup()
# Recalculate nparticles and n_nparticles
EachSize4 <- EachSize3 %>%
mutate(nparticles = TotalParticles/vol,
n_nparticles = nparticles/binsize) %>%
#mutate(depth = as.numeric(as.character(DepthBin))) %>%
#select(-DepthBin) %>%
pass
# Match DepthSummary
DepthSummary2 <- DepthSummary %>%
#mutate(DepthBin = cut(depth, bins, labels = mids)) %>%
group_by(project, time, depth) %>%
summarize() %>%
ungroup() %>%
mutate(profile = "multiple")
#mutate(depth = as.numeric(as.character(DepthBin))) %>%
#select(-DepthBin)
pass
return(list(ES = EachSize4, DS = DepthSummary))
}
# combine_timesteps <- function(x, DepthSummary = NULL){
# # Load in data, flexably
# x2 <- parse_jac_input2(x, DepthSummary)
# EachSize = x2[[1]]
# DepthSummary = x2[[2]]
#
# # consistency with other expressions
# EachSize2 <- EachSize
#
# EachSize3 <- EachSize2 %>%
# select(-nparticles, -n_nparticles) %>%
# group_by(project, profile, time, DepthBin, lb, ub, binsize) %>%
# summarize(vol = sum(vol), TotalParticles = sum(TotalParticles)) %>%
# ungroup()
#
# }
## 24 November 2020
# my_double_gam <- function(df){
# gam(TotalParticles ~s(log(lb), log(depth)), offset = log(vol * binsize), family = nb(), data = df)
# }
#
# safe_double_gam <- safely(my_double_gam)
## The following functions are never used as far as I can tell, but if I delete them, they'll probably turn
## out to be required somewhere cryptic and break everything, so here they stay.
## I'm not sure what they do
expand_with_gam <- function(df, mod){
loc_pred <- predict(mod, type = "link", se.fit = TRUE) %>% as.data.frame %>% mutate(lower = fit - 2 * se.fit, upper = fit + 2 * se.fit) %>% mutate(resp_fit = exp(fit), resp_lower = exp(lower), resp_upper = exp(upper))
loc_df <- bind_cols(df, loc_pred)
loc_df
}
gam_size_ggplot <- function(df){
ggplot(df, aes(x = lb)) + geom_point(aes(y = resp_fit), shape = 1) +
geom_errorbar(aes(ymin = resp_lower, ymax = resp_upper)) +
geom_point(aes(y = n_nparticles)) + scale_x_log10() + scale_y_log10()
}
gam_size_ggplot_2d <- function(df){
df %>% ggplot(aes(x = resp_fit, y = depth, col = log(lb), group = lb)) +
scale_y_reverse() + geom_point() + scale_x_log10(limits = c(10^-8, NA)) +
scale_color_viridis_c() + geom_path() + geom_vline(xintercept = 1) + geom_vline(xintercept = 5) +
geom_errorbar(aes(xmin = resp_lower, xmax = resp_upper), width = 10, alpha = 0.5) + theme_bw()
}
nnp_size_ggplot_2d <- function(df){
df %>% ggplot(aes(x = n_nparticles, y = depth, col = log(lb), group = lb)) +
scale_y_reverse() + geom_point() + scale_x_log10(limits = c(10^-8, NA)) +
scale_color_viridis_c() + geom_path() + geom_vline(xintercept = 1) + geom_vline(xintercept = 5) +
theme_bw()
}
##Here's a commented out function. I think a newer version is defined below.
# double_gam_smooth <- function(x, DepthSummary = NULL){
# # Input from twin please.
# #Allow passing in either a two elemet list of Eachsize and DepthSummary, or passing in as two variables.
#
# x2 <- parse_jac_input2(x, DepthSummary)
# EachSize = x2[[1]]
# DepthSummary = x2[[2]]
#
# withGamFit <- EachSize %>% group_by(project) %>% nest() %>%
# mutate(mod = map(data, safe_double_gam),
# modOnly = map(mod, ~.[[1]]),
# pred = map2(modOnly, data, safely(predict), se.fit = TRUE),
# predOnly = map(pred, ~.[[1]]),
# data01 = map2(data, predOnly,
# ~bind_cols(.x, link = .y$fit, lse = .y$se.fit))) %>%
# select(project, data01) %>%
# unnest(data01) %>%
# mutate(link_lower = link - lse,
# link_upper = link + lse,
# nnp_smooth = exp(link),
# nnp_lower = exp(link_lower),
# nnp_upper = exp(link_upper),
# np_smooth = nnp_smooth * binsize,
# tp_smooth = np_smooth * vol,
# flux_smooth = np_smooth * (C_f_global * lb ^ ag_global)
# )
#
# TotalStuff <- withGamFit %>% group_by(project, profile, time, depth) %>%
# summarize(smooth_TotParticles = sum(tp_smooth),
# smooth_nparticles = sum(np_smooth),
# smooth_nnparticles = sum(nnp_smooth),
# smooth_flux_fit = sum(flux_smooth)
# )
#
# DepthSummary_B <- left_join(DepthSummary, TotalStuff, by = c("project", "profile", "time", "depth"))
#
# return(list(ES = withGamFit, DS = DepthSummary_B))
#
# }
## Functions for smoothing data, using size and depth informaiton together
## originally coded in SmoothsAndFluxRevisited
## I loose at DRY coding, but also if I try to fix this, it will break everything and I'll spend a week fixing things.
my_double_gam <- function(df){
gam(TotalParticles ~s(log(lb), log(depth), by = factor(profile)), offset = log(vol * binsize), family = nb(), data = df)
}
safe_double_gam <- safely(my_double_gam)
double_gam_smooth <- function(x, DepthSummary = NULL){
# Input from twin please.
#Allow passing in either a two elemet list of Eachsize and DepthSummary, or passing in as two variables.
x2 <- parse_jac_input2(x, DepthSummary)
EachSize = x2[[1]]
DepthSummary = x2[[2]]
withGamFit <- EachSize %>% group_by(project) %>% nest() %>%
mutate(mod = map(data, safe_double_gam),
modOnly = map(mod, ~.[[1]]),
pred = map2(modOnly, data, safely(predict), se.fit = TRUE),
predOnly = map(pred, ~.[[1]]),
data01 = map2(data, pred,
~bind_cols(.x, link = .y$result$fit, lse = .y$result$se.fit))) %>%
select(project, data01) %>%
unnest(data01) %>%
mutate(link_lower = link - lse,
link_upper = link + lse,
nnp_smooth = exp(link),
nnp_lower = exp(link_lower),
nnp_upper = exp(link_upper),
np_smooth = nnp_smooth * binsize,
tp_smooth = np_smooth * vol,
flux_smooth = np_smooth * (C_f_global * lb ^ ag_global)
)
TotalStuff <- withGamFit %>% group_by(project, profile, time, depth) %>%
summarize(smooth_TotParticles = sum(tp_smooth),
smooth_nparticles = sum(np_smooth),
smooth_nnparticles = sum(nnp_smooth),
smooth_flux_fit = sum(flux_smooth)
)
DepthSummary_B <- left_join(DepthSummary, TotalStuff, by = c("project", "profile", "time", "depth"))
return(list(ES = withGamFit, DS = DepthSummary_B))
}
## Sometimes I want to run things on just one profile, especially when building functions that I will later
## run over every profile. This filters out just one profile (aka cast)
filter_profile <- function(x, DepthSummary = NULL, profile = "stn_043"){
prof2 <- profile
x2 <- parse_jac_input2(x, DepthSummary)
EachSize = x2[[1]] %>% ungroup()
DepthSummary = x2[[2]] %>% ungroup()
EachSize <- EachSize %>% filter(profile == prof2) %>% select(-c(project, profile, time))
DepthSummary <- DepthSummary %>% filter(profile == prof2) %>% select(-c(project, profile, time))
return(list(ES = EachSize, DS = DepthSummary))
}
## This calls the eularian prism model and calculates a bunch of relevant parameters
diagnose_disaggregation_one_profile <- function(x, DepthSummary = NULL){
## Preamble
x2 <- parse_jac_input2(x, DepthSummary)
EachSize = x2[[1]]
DepthSummary = x2[[2]]
lb_vec <- sort(unique(EachSize$lb))
m_vec = Cm * lb_vec ^ alpha;
w_vec = Cw * lb_vec ^ gamma;
llb_01 <- little_lb <- lb_vec[1] - (lb_vec[2] - lb_vec[1])/2
specData <- EachSize %>% select(depth, lb, np_smooth, nnp_smooth, flux_smooth) %>%
nest(spec_meta = c(lb, np_smooth, nnp_smooth, flux_smooth))
preparedData <- DepthSummary %>% left_join(specData, by = c("depth")) %>%
arrange(depth) %>%
mutate(spec_only = map(spec_meta, ~pull(., np_smooth)),
spec_prev = lag(spec_only),
flux_prev = lag(smooth_flux_fit),
DF = smooth_flux_fit - flux_prev,
#DFP = 1 - DF/flux_prev, # I was using this for a while.
DFP = smooth_flux_fit/flux_prev,
depth_prev = lag(depth),
DZ = depth - depth_prev,
)
minDepth = min(preparedData$depth)
saveFirstDepth = preparedData %>% filter(depth == minDepth) %>% select(depth, spec_meta) %>% unnest(spec_meta)
modelRun <- preparedData %>%
.[-1,] %>%
# fix flux leak here
mutate(use_this_DFP = map2_dbl(spec_prev, DFP, optFun, lbv = lb_vec, mv = m_vec, wv = w_vec, llb = llb_01, alpha = alpha_global, gamma = gamma_global)) %>%
mutate(spec_pred = map2(spec_prev, use_this_DFP, remin_smooth_shuffle, lbv = lb_vec, mv = m_vec, wv = w_vec, llb = llb_01, alpha = alpha_global, gamma = gamma_global))
#modelRunFixLine1 <- bind_rows(preparedData[1,], modelRun)
modelConcise <- modelRun %>%
mutate(spec_meta = map2(spec_meta, spec_prev, ~tibble(.x, np_prev = .y))) %>%
mutate(spec_meta = map2(spec_meta, spec_pred, ~tibble(.x, np_pred = .y))) %>%
select(depth, depth_prev, DZ, DF, DFP, use_this_DFP, spec_meta)
modelUnnest <- modelConcise %>%
#select(depth, spec_meta) %>%
unnest(spec_meta) %>%
ungroup()
modelUnnestWithFirstDepth <- bind_rows(saveFirstDepth, modelUnnest)
modelPostCalc <- modelUnnestWithFirstDepth %>%
mutate(
flux_prev = np_prev * (C_f_global * lb ^ ag_global),
flux_pred = np_pred * (C_f_global * lb ^ ag_global)
)
Tot <- modelPostCalc %>%
group_by(depth) %>%
summarize(depth_prev = first(depth_prev), DZ = first(DZ) ,DF = first(DF), DFP = first(DFP), use_this_DFP = first(use_this_DFP),
Flux = sum(flux_smooth),
Flux_Prev = sum(flux_prev),
Flux_Pred = sum(flux_pred))
Small <- modelPostCalc %>%
filter(lb <= ParticleSizeCutoff) %>%
group_by(depth) %>%
summarize(DF = first(DF), DFP = first(DFP),
Flux = sum(flux_smooth),
Flux_Prev = sum(flux_prev),
Flux_Pred = sum(flux_pred))
Big <- modelPostCalc %>%
filter(lb > ParticleSizeCutoff) %>%
group_by(depth) %>%
summarize(DF = first(DF), DFP = first(DFP),
Flux = sum(flux_smooth),
Flux_Prev = sum(flux_prev),
Flux_Pred = sum(flux_pred))
All <- Tot %>%
left_join(Small, by = "depth", suffix = c("", "_Small")) %>%
left_join(Big, by = "depth", suffix = c("", "_Big")) %>%
mutate(osps = Flux_Small - Flux_Pred_Small,
obpb = Flux_Pred_Big - Flux_Big,
ospsDZ = osps/DZ
)
DepthSummary_B <- DepthSummary %>%
left_join(All, by = "depth") %>% rename(Flux_Smooth = Flux)
modelReduced <- modelPostCalc %>% select(
depth, lb, flux_prev, flux_pred
)
EachSize_B <- EachSize %>%
left_join(modelReduced, by = c("depth", "lb"))
# Code does stuff here
return(list(ES = EachSize_B, DS = DepthSummary_B))
}
## Sometimes the eularian prism code breaks, in which case, I want to keep running, so I run it "safely"
## with this purr `safely` function
diagnose_disaggregation_one_profile_safe <- safely(diagnose_disaggregation_one_profile)
## Run diagnose_disaggregation_one_profile_safe over every profile and keep track of data
diagnose_disaggregation<- function(x, DepthSummary = NULL){
## Preamble
x2 <- parse_jac_input2(x, DepthSummary)
EachSize = x2[[1]]
DepthSummary = x2[[2]]
ESN <- EachSize %>%
group_by(project, profile, time) %>%
nest() %>%
rename(ES = data)
DSN <- DepthSummary %>%
group_by(project, profile, time) %>%
nest() %>%
rename(DS = data)
metaNest <- left_join(ESN, DSN, by = c("project", "profile", "time"))
metaNest <- metaNest %>% mutate(ESDS = map2(ES, DS, ~list(.x, .y)))
metaNest <- metaNest %>%
mutate(ESDS_Mod_Safe = map(ESDS, diagnose_disaggregation_one_profile_safe))
metaNest01 <- metaNest %>% mutate(ESDS_Mod = map(ESDS_Mod_Safe, ~.[[1]]),
ESDS_Err = map(ESDS_Mod_Safe, ~.[[2]]),
ES01 = map(ESDS_Mod, ~.[[1]]),
DS01 = map(ESDS_Mod, ~.[[2]]))
EachSize01 <- metaNest01 %>% select(project, profile, time, ES01) %>% unnest(ES01)
DepthSummary01 <- metaNest01 %>% select(project, profile, time, DS01) %>% unnest(DS01)
return(list(ES = EachSize01, DS = DepthSummary01))
}
## Functions about fitting flux
fit_flux_es <- function(C_f, ag, ES){
ES2 <- ES %>% mutate(flux2 = C_f * nparticles * lb ^ ag)
ES2
}
fit_flux_ds <- function(C_f, ag, ES, DS){
DS2 <- ES %>%
group_by(project, profile, time, depth) %>%
summarize(tot_flux2 = sum(flux2))
DS2 <- left_join(DS, DS2, by = c("project", "profile", "time", "depth"))
}
fit_flux <- function(C_f, ag, ES, DS){
ES2 <- fit_flux_es(C_f, ag, ES)
DS2 <- fit_flux_ds(C_f, ag, ES2, DS)
return(list(ES = ES2, DS = DS2))
}
RMSE <- function(mod){
RSS <- c(crossprod(mod$residuals))
MSE = RSS/ length(mod$residuals)
RMSE = sqrt(MSE)
}
flux_check <- function(DS){
#diff = log10(DS$tn_flux) - log10(DS$tot_flux2)
diff = log(DS$tn_flux) - log(DS$tot_flux2)
squares = diff ^2
rss = sum(squares)
mse = rss/length(rss)
rmse = sqrt(mse)
rmse
}
fit_check_flux <- function(C_f, ag, ES, DS){
FF <- fit_flux(C_f, ag, ES, DS)
FC <- flux_check(FF$DS)
FC
}
fc_wrap <- function(x, ES, DS){
C_f <- x[1]
ag <- x[2]
FC <- fit_check_flux(C_f, ag, ES, DS)
FC
}
## Recode time variable
# add_time_data <- function(x, DepthSummary = NULL){
# require(chron)
# require(lubridate)
# x2 <- parse_jac_input2(x, DepthSummary)
# EachSize = x2[[1]]
# DepthSummary = x2[[2]]
#
# timeDf <- tibble(time = unique(DepthSummary$time)) %>%
# mutate(tod <- times(strftime(time,"%H:%M:%S")))
#
# # Recode into blocks of time
#
# # create breaks
# breaks <- hour(hm("21:00", "5:00", "9:00", "18:00", "20:59"))
# # labels for the breaks
# labels <- c("Night", "Morning", "Afternoon", "Evening")
#
# timeDf <- timeDf %>%
# mutate(timeBlock = cut(x=hour(time), breaks = breaks, labels = labels, include.lowest=TRUE)
# )
#
# # hours from noon
#
# hour(timeDf$tod)
#
#
#
# return(list(ES = EachSize, DS = DepthSummary))
# } |
############################################################
#This is a file for the Virus and Tx App
#it contains additional information that helps properly process it
############################################################
appsettings = list()
#Title of app, to be displayed on top of analyze tab
appsettings$apptitle = "Antiviral treatment model"
#name of underlying simulation function(s)
appsettings$simfunction = 'simulate_virusandtx_ode'
#name of underlying mbmodel - if exists
#if not exists, set to NULL
appsettings$mbmodelfile = NULL
#number of plots to produce for output
appsettings$nplots = 1
#specify the type of model that will be run
#if model type is provided as UI input, it should be set to NULL here
#otherwise it needs to be provided
appsettings$modeltype = "_ode_"
#additional input elements for app that are shown on UI
appsettings$otherinputs = shiny::tagList(
shiny::selectInput("steadystate", "Start at steady state",c("yes" = TRUE, 'no' = FALSE), selected = FALSE),
shiny::selectInput("plotscale", "log-scale for plot",c("none" = "none", 'x-axis' = "x", 'y-axis' = "y", 'both axes' = "both")),
shiny::selectInput("plotengine", "plot engine",c("ggplot" = "ggplot", "plotly" = "plotly"))
) #end taglist
| /inst/appinformation/virusandtx_settings.R | no_license | mohrosidi/DSAIRM | R | false | false | 1,283 | r | ############################################################
#This is a file for the Virus and Tx App
#it contains additional information that helps properly process it
############################################################
appsettings = list()
#Title of app, to be displayed on top of analyze tab
appsettings$apptitle = "Antiviral treatment model"
#name of underlying simulation function(s)
appsettings$simfunction = 'simulate_virusandtx_ode'
#name of underlying mbmodel - if exists
#if not exists, set to NULL
appsettings$mbmodelfile = NULL
#number of plots to produce for output
appsettings$nplots = 1
#specify the type of model that will be run
#if model type is provided as UI input, it should be set to NULL here
#otherwise it needs to be provided
appsettings$modeltype = "_ode_"
#additional input elements for app that are shown on UI
appsettings$otherinputs = shiny::tagList(
shiny::selectInput("steadystate", "Start at steady state",c("yes" = TRUE, 'no' = FALSE), selected = FALSE),
shiny::selectInput("plotscale", "log-scale for plot",c("none" = "none", 'x-axis' = "x", 'y-axis' = "y", 'both axes' = "both")),
shiny::selectInput("plotengine", "plot engine",c("ggplot" = "ggplot", "plotly" = "plotly"))
) #end taglist
|
#====================================================================
# COMPUTATIONAL STATISTICS - LAB 1
#====================================================================
library(ggplot2)
library(tidyverse)
library(matlib)
#--------------------------------------------------------------------
# QUESTION 1 - BE CAREFUL WHEN COMPARING
#--------------------------------------------------------------------
# Code snippet nr 1.
x1 <- 1/3
x2 <- 1/4
if (x1-x2 == 1/12){
print("Subtraction is correct")
}else{
print("Subtraction is wrong")
}
# Notes: Gives "Subtraction is wrong".
# Code snippet nr 2.
x1 <- 1
x2 <- 1/2
if (x1-x2 == 1/2){
print("Subtraction is correct")
}else{
print("Subtraction is wrong")
}
# Notes: "Subtraction is correct".
# 1. Check the results of the snippets. Comment what is going on.
# Comments: Nr 1 and nr 2 should both give Subtraction is correct according to mathematical arithmetics.
# It can be difficult to compare floats. Nr 1 is a rational number and can only be expressed as a
# fraction of two integers to be precise, and it can only approximately be expressed in decimal form.
# Since there is a problem when comparing floats, we should consider other solutions to the problem.
# Code snippet nr 1 with improvement
# Use isTRUE(), which handles the case when not true in `if` expressions.
# all.equal(), tests if two objects are nearly equal, that is testing near equality.
x1 <- 1/3
x2 <- 1/4
if (isTRUE(all.equal(x1-x2,1/12))){
print("Subtraction is correct")
}else{
print("Subtraction is wrong")
}
#--------------------------------------------------------------------
# QUESTION 2 - DERIVATIVE
#--------------------------------------------------------------------
f <- function(x) x
epsilon <- 1e-15
x<- 100
f(x+epsilon) - f(x)
derivative <- function(f, x, e){
(f(x+epsilon) - f(x))/epsilon
}
derivative(f, 0.999, epsilon) #1.11
derivative(f, 1, epsilon) #1.11
derivative(f,100000, epsilon) #0
# What values did you obtain? What are the true values? Explain the reasons behind the
# discovered differences.
# The derivative of a function at a particular point, will not affect this since
# f(x)=x, f'(x)=1, so evaluating a point a such that f'(a), will still equal 1.
# True values are 1.
# However,
# x=1 -> 1.1
# x=10000 -> 0
# This is due to the limitation of the storage in R.
# In both cases we divide by a tiny number, i.e. epsilon.
# If we consider x within the range [0,1[, we obtain approximately 1.
# The difference between large numbers, dominates the small epsilon,
# which gives us zero in the denominator and thus the derivative evaluated at large points
# is 0.
#--------------------------------------------------------------------
# QUESTION 3 - VARIANCE
#--------------------------------------------------------------------
# Variance function
myvar <- function(x){
n <- length(x)
(1/(n-1)) * ( sum(x^2) - (1/n)*(sum(x))^2)
}
# Generate random numbers
x <- rnorm(10000, mean = 10^8 , sd = 1)
# Difference between the different funtions for the variance, for each subset
Y <- c()
for (i in seq_along(x)){
Y[i] <- myvar(x[1:i]) - var(x[1:i])
}
# Create data frame
df <- data.frame(i = seq_along(x), y)
# Plot the results
ggplot(df, aes(x = i, y=y)) +
geom_point()
# Second implementation of the variance
myvar2 <- function(x){
mean((x-mean(x))^2)
}
# Difference between the different funtions for the variance, for each subset
y2 <- c()
for (i in seq_along(x)){
y2[i] <- myvar2(x[1:i]) - var(x[1:i])
}
# Create data frame
df2 <- data.frame(i=seq_along(x), y2)
# Plot the results
ggplot(df2, aes(x=i, y=y2)) +
geom_point()
#--------------------------------------------------------------------
# QUESTION 4 - LINEAR ALGEBRA
#--------------------------------------------------------------------
# Read the data
tecator <- read.csv(file="tecator.csv")
y <- tecator$Protein
X <- tecator %>%
select(-c(Sample, Protein)) %>%
as.matrix()
# Computing A and B by matrix multiplication
A <- t(X)%*%X
b <- t(X)*y
# Solving the linear system
solve(A,b) # Resukts in a singular design matrix
# Check the condition numbers
kappa(A) # high condition number indicates bad sign of solving the linear system
# Improve the results by scaling
X <- tecator %>%
select(-c(Sample, Protein)) %>%
as.matrix() %>%
scale()
y <- as.vector(scale(tecator$Protein))
# Solving the linear system
A <- t(X)%*%X
b <- t(X)*y
solve(A,b) # works
det(A)
# Check the condition numbers
kappa(A) # high condition number indicates bad sign of solving the linear system
| /Lab1/lab1_sj.R | no_license | hradave/Computational-Statistics | R | false | false | 4,607 | r | #====================================================================
# COMPUTATIONAL STATISTICS - LAB 1
#====================================================================
library(ggplot2)
library(tidyverse)
library(matlib)
#--------------------------------------------------------------------
# QUESTION 1 - BE CAREFUL WHEN COMPARING
#--------------------------------------------------------------------
# Code snippet nr 1.
x1 <- 1/3
x2 <- 1/4
if (x1-x2 == 1/12){
print("Subtraction is correct")
}else{
print("Subtraction is wrong")
}
# Notes: Gives "Subtraction is wrong".
# Code snippet nr 2.
x1 <- 1
x2 <- 1/2
if (x1-x2 == 1/2){
print("Subtraction is correct")
}else{
print("Subtraction is wrong")
}
# Notes: "Subtraction is correct".
# 1. Check the results of the snippets. Comment what is going on.
# Comments: Nr 1 and nr 2 should both give Subtraction is correct according to mathematical arithmetics.
# It can be difficult to compare floats. Nr 1 is a rational number and can only be expressed as a
# fraction of two integers to be precise, and it can only approximately be expressed in decimal form.
# Since there is a problem when comparing floats, we should consider other solutions to the problem.
# Code snippet nr 1 with improvement
# Use isTRUE(), which handles the case when not true in `if` expressions.
# all.equal(), tests if two objects are nearly equal, that is testing near equality.
x1 <- 1/3
x2 <- 1/4
if (isTRUE(all.equal(x1-x2,1/12))){
print("Subtraction is correct")
}else{
print("Subtraction is wrong")
}
#--------------------------------------------------------------------
# QUESTION 2 - DERIVATIVE
#--------------------------------------------------------------------
f <- function(x) x
epsilon <- 1e-15
x<- 100
f(x+epsilon) - f(x)
derivative <- function(f, x, e){
(f(x+epsilon) - f(x))/epsilon
}
derivative(f, 0.999, epsilon) #1.11
derivative(f, 1, epsilon) #1.11
derivative(f,100000, epsilon) #0
# What values did you obtain? What are the true values? Explain the reasons behind the
# discovered differences.
# The derivative of a function at a particular point, will not affect this since
# f(x)=x, f'(x)=1, so evaluating a point a such that f'(a), will still equal 1.
# True values are 1.
# However,
# x=1 -> 1.1
# x=10000 -> 0
# This is due to the limitation of the storage in R.
# In both cases we divide by a tiny number, i.e. epsilon.
# If we consider x within the range [0,1[, we obtain approximately 1.
# The difference between large numbers, dominates the small epsilon,
# which gives us zero in the denominator and thus the derivative evaluated at large points
# is 0.
#--------------------------------------------------------------------
# QUESTION 3 - VARIANCE
#--------------------------------------------------------------------
# Variance function
myvar <- function(x){
n <- length(x)
(1/(n-1)) * ( sum(x^2) - (1/n)*(sum(x))^2)
}
# Generate random numbers
x <- rnorm(10000, mean = 10^8 , sd = 1)
# Difference between the different funtions for the variance, for each subset
Y <- c()
for (i in seq_along(x)){
Y[i] <- myvar(x[1:i]) - var(x[1:i])
}
# Create data frame
df <- data.frame(i = seq_along(x), y)
# Plot the results
ggplot(df, aes(x = i, y=y)) +
geom_point()
# Second implementation of the variance
myvar2 <- function(x){
mean((x-mean(x))^2)
}
# Difference between the different funtions for the variance, for each subset
y2 <- c()
for (i in seq_along(x)){
y2[i] <- myvar2(x[1:i]) - var(x[1:i])
}
# Create data frame
df2 <- data.frame(i=seq_along(x), y2)
# Plot the results
ggplot(df2, aes(x=i, y=y2)) +
geom_point()
#--------------------------------------------------------------------
# QUESTION 4 - LINEAR ALGEBRA
#--------------------------------------------------------------------
# Read the data
tecator <- read.csv(file="tecator.csv")
y <- tecator$Protein
X <- tecator %>%
select(-c(Sample, Protein)) %>%
as.matrix()
# Computing A and B by matrix multiplication
A <- t(X)%*%X
b <- t(X)*y
# Solving the linear system
solve(A,b) # Resukts in a singular design matrix
# Check the condition numbers
kappa(A) # high condition number indicates bad sign of solving the linear system
# Improve the results by scaling
X <- tecator %>%
select(-c(Sample, Protein)) %>%
as.matrix() %>%
scale()
y <- as.vector(scale(tecator$Protein))
# Solving the linear system
A <- t(X)%*%X
b <- t(X)*y
solve(A,b) # works
det(A)
# Check the condition numbers
kappa(A) # high condition number indicates bad sign of solving the linear system
|
library(factoextra)
### Name: multishapes
### Title: A dataset containing clusters of multiple shapes
### Aliases: multishapes
### ** Examples
## No test:
data(multishapes)
plot(multishapes[,1], multishapes[, 2],
col = multishapes[, 3], pch = 19, cex = 0.8)
## End(No test)
| /data/genthat_extracted_code/factoextra/examples/multishapes.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 287 | r | library(factoextra)
### Name: multishapes
### Title: A dataset containing clusters of multiple shapes
### Aliases: multishapes
### ** Examples
## No test:
data(multishapes)
plot(multishapes[,1], multishapes[, 2],
col = multishapes[, 3], pch = 19, cex = 0.8)
## End(No test)
|
# Function to edit the training data
# train : training data to create new modified training file from
# retentionPercent : what percent of the training data should be retained
# addRandomFunny : Adds random funny sequences inside just to mess with the classifier.
# modificationPercent : What percent of the seqeunces would you like to modify at maximum
activateMutation <- function(train, retentionPercent, mutationPercent) {
# Get the indices that we want to modify
retentionIndices <- sample(length(train), round(retentionPercent*length(train)))
# get the subset at the indices
modtrain <- train[retentionIndices]
# Add insertions of random between A,T,G,C
modifiedSequences <- sapply(as.character(modtrain), function(x) {
# option <- sample(5,1)
# if(option == 1) {
# # Indels
# } else if (option == 2) {
# # Deletions
# } else if (option == 3) {
# # Reverse complement a subsequence
# } else if (option == 4) {
# # Swap two subsets
# }
# Pick a random position to split
splitPos <- sample(nchar(x),1)
# Pick the nucleotide to clone at this position
splitDNA <- unname(unlist(strsplit(x, '')))
nucleotide <- splitDNA[splitPos]
# Clone this nucleotide a buncha times
# based on mutationPercent
mutation <- unname(rep(nucleotide, nchar(x)*mutationPercent))
# Insert this mutation at the position of the split
mutatedDNA <- paste0(c(splitDNA[1:splitPos-1], mutation, splitDNA[splitPos:length(splitDNA)][-1]), collapse = '')
# Return the mutatedDNA
return(mutatedDNA)
})
# Create the file to store the new seqeunces
FILE = 'mutatedTestData.fa'
sink(FILE)
# store the new values inside the new file
for(i in seq_along(modifiedSequences)) {
cat(paste0(c('>TestSequence', i), collapse = ''),'\n')
cat(modifiedSequences[[i]],'\n')
}
# close
sink()
return(FILE)
} | /mutation.R | no_license | TurbulentCupcake/tfidfPredictorScripts | R | false | false | 1,926 | r |
# Function to edit the training data
# train : training data to create new modified training file from
# retentionPercent : what percent of the training data should be retained
# addRandomFunny : Adds random funny sequences inside just to mess with the classifier.
# modificationPercent : What percent of the seqeunces would you like to modify at maximum
activateMutation <- function(train, retentionPercent, mutationPercent) {
# Get the indices that we want to modify
retentionIndices <- sample(length(train), round(retentionPercent*length(train)))
# get the subset at the indices
modtrain <- train[retentionIndices]
# Add insertions of random between A,T,G,C
modifiedSequences <- sapply(as.character(modtrain), function(x) {
# option <- sample(5,1)
# if(option == 1) {
# # Indels
# } else if (option == 2) {
# # Deletions
# } else if (option == 3) {
# # Reverse complement a subsequence
# } else if (option == 4) {
# # Swap two subsets
# }
# Pick a random position to split
splitPos <- sample(nchar(x),1)
# Pick the nucleotide to clone at this position
splitDNA <- unname(unlist(strsplit(x, '')))
nucleotide <- splitDNA[splitPos]
# Clone this nucleotide a buncha times
# based on mutationPercent
mutation <- unname(rep(nucleotide, nchar(x)*mutationPercent))
# Insert this mutation at the position of the split
mutatedDNA <- paste0(c(splitDNA[1:splitPos-1], mutation, splitDNA[splitPos:length(splitDNA)][-1]), collapse = '')
# Return the mutatedDNA
return(mutatedDNA)
})
# Create the file to store the new seqeunces
FILE = 'mutatedTestData.fa'
sink(FILE)
# store the new values inside the new file
for(i in seq_along(modifiedSequences)) {
cat(paste0(c('>TestSequence', i), collapse = ''),'\n')
cat(modifiedSequences[[i]],'\n')
}
# close
sink()
return(FILE)
} |
Sys.setenv(TZ='Etc/GMT+1') # issue#612@rstan
#job.args <- c("cov2", "ast_parsars_ptm", "snaut_parsars_ptm_20200907", "20200920", "20200920", "0", "39")
#job.args <- c("cov2", "ast_parsars_ptm", "snaut_parsars_phospho_20201005", "20201012", "20201012", "0", "39")
if (!exists('job.args')) {
job.args <- commandArgs(trailingOnly = TRUE)
}
project_id <- job.args[[1]]
message('Project ID=', project_id)
job_name <- as.character(job.args[[2]])
job_version <- job.args[[5]]
msfolder <- job.args[[3]]
data_version <- job.args[[4]]
fit_version <- job_version
job_id <- as.integer(job.args[[6]])
job_chunk <- as.integer(job.args[[7]])#job_chunk <- filter(mutate(modelobjs_df, chunk=row_number()), str_detect(ptmn_label, "^Phospho_C1orf198_S289_M1"))$chunk
message('Job ', job_name, '(id=', job_id, '_', job_chunk,
" data_version=", data_version, " fit_version=", fit_version, " running on ", Sys.info()["nodename"], ")")
#source("~/R/config.R")
source("/projects/R/config.R")
source(file.path(base_scripts_path, 'R/misc/setup_base_paths.R'))
source(file.path(base_scripts_path, 'R/misc/setup_project_paths.R'))
rdata_filepath <- file.path(scratch_path, paste0(project_id, '_msglm_data_', msfolder, '_', data_version, '.RData'))
message('Loading data from ', rdata_filepath)
load(rdata_filepath)
if (Sys.getenv('SLURM_CPUS_PER_TASK') != '') {
mcmc_nchains <- as.integer(Sys.getenv('SLURM_CPUS_PER_TASK')) # SLURM way
} else if (Sys.getenv('NSLOTS') != '') {
mcmc_nchains <- as.integer(Sys.getenv('NSLOTS')) # SGE way
} else {
mcmc_nchains <- 8
}
require(rlang)
require(dplyr)
require(msglm)
require(rstan)
require(tidyr)
require(stringr)
require(maxquantUtils)
modelobj <- "ptmn"
quantobj <- "pepmodstate"
source(file.path(project_scripts_path, 'setup_modelobj.R'))
sel_object_ids <- modelobjs_df[[modelobj_idcol]][[job_chunk]]
message(sel_object_ids, " ", modelobj, " ID(s): ",
paste0(sort(unique(dplyr::pull(modelobjs_df[modelobjs_df[[modelobj_idcol]] %in% sel_object_ids, ], object_label))), collapse=' '))
sel_ptm_type <- modelobjs_df$ptm_type[[job_chunk]]
msdata.df <- dplyr::filter(msdata$ptmn2pepmodstate, ptmn_id %in% sel_object_ids) %>%
dplyr::inner_join(dplyr::select(msdata$pepmodstate_intensities, pepmodstate_id, msrun,
intensity=intensity_norm, psm_qvalue)) %>% # !!! use SN-normalized intensity as the raw intensity
dplyr::inner_join(dplyr::select(msdata$ptmn_locprobs, ptmn_id, ptm_locprob, pepmodstate_id, msrun)) %>%
dplyr::inner_join(dplyr::select(msdata$msruns, msrun, condition, treatment, timepoint)) %>%
dplyr::filter((coalesce(psm_qvalue, 1) <= data_info$qvalue_max) &
(coalesce(ptm_locprob, 0) >= data_info$locprob_min)) %>% # filter valid PTM localization and identification probabilities
dplyr::mutate(object_id = ptmn_id)
message('Preparing MS GLM data...')
model_data <- list()
model_data$mschannels <- dplyr::select(msdata$msruns, dataset, condition, msrun, replicate) %>%
dplyr::filter(dataset == case_when(sel_ptm_type == "GlyGly" ~ "ubi",
sel_ptm_type == "Phospho" ~ "phospho",
TRUE ~ "unknown")) %>%
dplyr::inner_join(dplyr::select(total_msrun_shifts.df, msrun, total_msrun_shift)) %>%
dplyr::arrange(condition, replicate, msrun) %>% dplyr::distinct() %>%
dplyr::mutate(mschannel_ix = row_number(),
msrun_ix = as.integer(factor(msrun, levels=unique(msrun))),
msproto_ix = 1L,
zero_msrun_shift = 0L)
experiment_shift_col <- 'total_msrun_shift'
model_data$mschannels$model_mschannel_shift <- model_data$mschannels[[experiment_shift_col]]
model_data$conditions <- conditions.df %>%
mutate(condition_ix = row_number())
model_data$interactions <- tidyr::crossing(object_id = sel_object_ids,
condition_ix = model_data$conditions$condition_ix) %>%
dplyr::inner_join(dplyr::select(model_data$conditions, condition_ix, condition) %>% dplyr::distinct()) %>%
dplyr::left_join(msdata.df %>% dplyr::select(condition, object_id) %>% dplyr::distinct() %>%
dplyr::mutate(is_virtual = FALSE)) %>%
dplyr::mutate(is_virtual = is.na(is_virtual),
iaction_id = paste(condition, object_id))
model_data$interactions <- dplyr::arrange(model_data$interactions, condition_ix, object_id) %>%
dplyr::mutate(glm_iaction_ix = row_number(),
glm_object_ix = as.integer(factor(object_id)))
model_data$objects <- dplyr::transmute(model_data$interactions, glm_object_ix, object_id, is_underdefined=FALSE) %>%
dplyr::distinct() %>% dplyr::arrange(glm_object_ix) %>%
dplyr::inner_join(dplyr::select(modelobjs_df, ptm_id, ptmn_id, ptmn_label, nselptms, object_id, object_label, ptmn_label_no_ptm_type)) %>%
dplyr::inner_join(dplyr::select(filter(msdata$ptm2gene, ptm_is_reference), ptm_id, protein_ac, ptm_pos, ptm_AA_seq, ptm_type, contains("is_"))) %>%
dplyr::inner_join(dplyr::select(msdata$proteins, protein_ac, gene_name=genename, protein_name, contains("is_"))) %>%
#dplyr::select(-is_fit) %>%
dplyr::arrange(glm_object_ix)
# arrange pepmodstates by object, by profile cluster and by the number of quantitations
model_data$subobjects <- msdata.df %>%
dplyr::inner_join(msdata$ptmn2pepmodstate) %>%
dplyr::group_by(ptmn_id, pepmodstate_id) %>%
dplyr::summarise(n_quant = sum(!is.na(intensity)),
intensity_med = median(intensity, na.rm=TRUE)) %>% ungroup() %>%
dplyr::inner_join(dplyr::select(model_data$objects, ptmn_id, glm_object_ix)) %>%
# FIXME cluster per object
dplyr::inner_join(cluster_msprofiles(msdata.df, msdata$msrun_pepmodstate_stats, obj_col='pepmodstate_id', msrun_col='msrun')) %>%
dplyr::arrange(glm_object_ix, profile_cluster, desc(n_quant), desc(intensity_med)) %>%
dplyr::group_by(ptmn_id, glm_object_ix, profile_cluster) %>%
dplyr::mutate(subobject_group_ix = row_number() %/% 20, # put objects within cluster into groups of 20
subobject_local_ix = row_number() %% 20) %>%
dplyr::ungroup() %>%
# take the first group of 10 objects from each cluster, then continue with the second group etc
dplyr::arrange(glm_object_ix, subobject_group_ix, profile_cluster, subobject_local_ix) %>%
dplyr::mutate(glm_subobject_ix = row_number()) %>%
dplyr::filter(glm_subobject_ix <= 20) # remove less abundant subobjects of rich objects
# entries for an interaction in all replicate experiments
model_data$observations <- dplyr::inner_join(model_data$interactions, model_data$mschannels) %>%
dplyr::arrange(glm_object_ix, glm_iaction_ix, mschannel_ix) %>%
dplyr::mutate(glm_observation_ix = seq_len(n()),
observation_id = paste(msrun, object_id))
model_data$msdata <- dplyr::inner_join(model_data$observations, model_data$subobjects) %>%
dplyr::left_join(msdata.df) %>%
dplyr::arrange(glm_observation_ix, glm_subobject_ix) %>%
dplyr::mutate(glm_subobservation_ix = seq_len(n()))
model_data$msdata <- mutate(model_data$msdata,
qdata_ix = if_else(!is.na(intensity), cumsum(!is.na(intensity)), NA_integer_),
mdata_ix = if_else(is.na(intensity), cumsum(is.na(intensity)), NA_integer_))
model_data <- prepare_effects(model_data, underdefined_iactions=FALSE)
dims_info <- msglm.prepare_dims_info(model_data, object_cols=c('object_id', modelobj_idcol, "object_label", "ptmn_label_no_ptm_type",
"ptm_type", "ptm_AA_seq", "ptm_pos", "nselptms",
"protein_ac", "gene_name", "protein_name",
"is_viral", "is_contaminant"#, "is_decoy"
))
# remove unneeded data to free some memory
msdata <- NULL
gc()
msglm.stan_data <- stan.prepare_data(mscalib, model_data,
global_labu_shift = global_labu_shift,
obj_labu_min_scale = 1,
iact_repl_shift_df = 2,
suo_fdr=0.001, reliable_obs_fdr = 0.01, specific_iaction_fdr = 1,
empty_observation_sigmoid_scale = data_info$empty_observation_sigmoid_scale)
message('Running STAN in NUTS mode...')
options(mc.cores=mcmc_nchains)
msglm.stan_fit <- stan.sampling(msglm.stan_data, adapt_delta=0.9, max_treedepth=11L,
iter=4000L, chains=mcmc_nchains)
min.iteration <- as.integer(1.25 * msglm.stan_fit@sim$warmup)
msglm_results <- process.stan_fit(msglm.stan_fit, dims_info)
res_prefix <- paste0(project_id, "_", msfolder, "_msglm", modelobj_suffix)
if (!dir.exists(file.path(scratch_path, res_prefix))) {
dir.create(file.path(scratch_path, res_prefix))
}
rfit_filepath <- file.path(scratch_path, res_prefix, paste0(res_prefix, '_', fit_version, '_', job_chunk, '.RData'))
message('Saving STAN results to ', rfit_filepath, '...')
results_info <- list(project_id = project_id, msfolder = msfolder,
data_version = data_version, fit_version = fit_version,
job_name = job_name, job_chunk = job_chunk, modelobj = modelobj, quantobj = quantobj)
save(data_info, results_info,
model_data, msglm.stan_data, msglm_results,
dims_info, file = rfit_filepath)
message('Done.')
on.exit(unlink(tempdir(), force = TRUE), add=TRUE)
| /msglm_fit_chunk_parsars_ptm.R | no_license | innatelab/cov2 | R | false | false | 9,559 | r | Sys.setenv(TZ='Etc/GMT+1') # issue#612@rstan
#job.args <- c("cov2", "ast_parsars_ptm", "snaut_parsars_ptm_20200907", "20200920", "20200920", "0", "39")
#job.args <- c("cov2", "ast_parsars_ptm", "snaut_parsars_phospho_20201005", "20201012", "20201012", "0", "39")
if (!exists('job.args')) {
job.args <- commandArgs(trailingOnly = TRUE)
}
project_id <- job.args[[1]]
message('Project ID=', project_id)
job_name <- as.character(job.args[[2]])
job_version <- job.args[[5]]
msfolder <- job.args[[3]]
data_version <- job.args[[4]]
fit_version <- job_version
job_id <- as.integer(job.args[[6]])
job_chunk <- as.integer(job.args[[7]])#job_chunk <- filter(mutate(modelobjs_df, chunk=row_number()), str_detect(ptmn_label, "^Phospho_C1orf198_S289_M1"))$chunk
message('Job ', job_name, '(id=', job_id, '_', job_chunk,
" data_version=", data_version, " fit_version=", fit_version, " running on ", Sys.info()["nodename"], ")")
#source("~/R/config.R")
source("/projects/R/config.R")
source(file.path(base_scripts_path, 'R/misc/setup_base_paths.R'))
source(file.path(base_scripts_path, 'R/misc/setup_project_paths.R'))
rdata_filepath <- file.path(scratch_path, paste0(project_id, '_msglm_data_', msfolder, '_', data_version, '.RData'))
message('Loading data from ', rdata_filepath)
load(rdata_filepath)
if (Sys.getenv('SLURM_CPUS_PER_TASK') != '') {
mcmc_nchains <- as.integer(Sys.getenv('SLURM_CPUS_PER_TASK')) # SLURM way
} else if (Sys.getenv('NSLOTS') != '') {
mcmc_nchains <- as.integer(Sys.getenv('NSLOTS')) # SGE way
} else {
mcmc_nchains <- 8
}
require(rlang)
require(dplyr)
require(msglm)
require(rstan)
require(tidyr)
require(stringr)
require(maxquantUtils)
modelobj <- "ptmn"
quantobj <- "pepmodstate"
source(file.path(project_scripts_path, 'setup_modelobj.R'))
sel_object_ids <- modelobjs_df[[modelobj_idcol]][[job_chunk]]
message(sel_object_ids, " ", modelobj, " ID(s): ",
paste0(sort(unique(dplyr::pull(modelobjs_df[modelobjs_df[[modelobj_idcol]] %in% sel_object_ids, ], object_label))), collapse=' '))
sel_ptm_type <- modelobjs_df$ptm_type[[job_chunk]]
msdata.df <- dplyr::filter(msdata$ptmn2pepmodstate, ptmn_id %in% sel_object_ids) %>%
dplyr::inner_join(dplyr::select(msdata$pepmodstate_intensities, pepmodstate_id, msrun,
intensity=intensity_norm, psm_qvalue)) %>% # !!! use SN-normalized intensity as the raw intensity
dplyr::inner_join(dplyr::select(msdata$ptmn_locprobs, ptmn_id, ptm_locprob, pepmodstate_id, msrun)) %>%
dplyr::inner_join(dplyr::select(msdata$msruns, msrun, condition, treatment, timepoint)) %>%
dplyr::filter((coalesce(psm_qvalue, 1) <= data_info$qvalue_max) &
(coalesce(ptm_locprob, 0) >= data_info$locprob_min)) %>% # filter valid PTM localization and identification probabilities
dplyr::mutate(object_id = ptmn_id)
message('Preparing MS GLM data...')
model_data <- list()
model_data$mschannels <- dplyr::select(msdata$msruns, dataset, condition, msrun, replicate) %>%
dplyr::filter(dataset == case_when(sel_ptm_type == "GlyGly" ~ "ubi",
sel_ptm_type == "Phospho" ~ "phospho",
TRUE ~ "unknown")) %>%
dplyr::inner_join(dplyr::select(total_msrun_shifts.df, msrun, total_msrun_shift)) %>%
dplyr::arrange(condition, replicate, msrun) %>% dplyr::distinct() %>%
dplyr::mutate(mschannel_ix = row_number(),
msrun_ix = as.integer(factor(msrun, levels=unique(msrun))),
msproto_ix = 1L,
zero_msrun_shift = 0L)
experiment_shift_col <- 'total_msrun_shift'
model_data$mschannels$model_mschannel_shift <- model_data$mschannels[[experiment_shift_col]]
model_data$conditions <- conditions.df %>%
mutate(condition_ix = row_number())
model_data$interactions <- tidyr::crossing(object_id = sel_object_ids,
condition_ix = model_data$conditions$condition_ix) %>%
dplyr::inner_join(dplyr::select(model_data$conditions, condition_ix, condition) %>% dplyr::distinct()) %>%
dplyr::left_join(msdata.df %>% dplyr::select(condition, object_id) %>% dplyr::distinct() %>%
dplyr::mutate(is_virtual = FALSE)) %>%
dplyr::mutate(is_virtual = is.na(is_virtual),
iaction_id = paste(condition, object_id))
model_data$interactions <- dplyr::arrange(model_data$interactions, condition_ix, object_id) %>%
dplyr::mutate(glm_iaction_ix = row_number(),
glm_object_ix = as.integer(factor(object_id)))
model_data$objects <- dplyr::transmute(model_data$interactions, glm_object_ix, object_id, is_underdefined=FALSE) %>%
dplyr::distinct() %>% dplyr::arrange(glm_object_ix) %>%
dplyr::inner_join(dplyr::select(modelobjs_df, ptm_id, ptmn_id, ptmn_label, nselptms, object_id, object_label, ptmn_label_no_ptm_type)) %>%
dplyr::inner_join(dplyr::select(filter(msdata$ptm2gene, ptm_is_reference), ptm_id, protein_ac, ptm_pos, ptm_AA_seq, ptm_type, contains("is_"))) %>%
dplyr::inner_join(dplyr::select(msdata$proteins, protein_ac, gene_name=genename, protein_name, contains("is_"))) %>%
#dplyr::select(-is_fit) %>%
dplyr::arrange(glm_object_ix)
# arrange pepmodstates by object, by profile cluster and by the number of quantitations
model_data$subobjects <- msdata.df %>%
dplyr::inner_join(msdata$ptmn2pepmodstate) %>%
dplyr::group_by(ptmn_id, pepmodstate_id) %>%
dplyr::summarise(n_quant = sum(!is.na(intensity)),
intensity_med = median(intensity, na.rm=TRUE)) %>% ungroup() %>%
dplyr::inner_join(dplyr::select(model_data$objects, ptmn_id, glm_object_ix)) %>%
# FIXME cluster per object
dplyr::inner_join(cluster_msprofiles(msdata.df, msdata$msrun_pepmodstate_stats, obj_col='pepmodstate_id', msrun_col='msrun')) %>%
dplyr::arrange(glm_object_ix, profile_cluster, desc(n_quant), desc(intensity_med)) %>%
dplyr::group_by(ptmn_id, glm_object_ix, profile_cluster) %>%
dplyr::mutate(subobject_group_ix = row_number() %/% 20, # put objects within cluster into groups of 20
subobject_local_ix = row_number() %% 20) %>%
dplyr::ungroup() %>%
# take the first group of 10 objects from each cluster, then continue with the second group etc
dplyr::arrange(glm_object_ix, subobject_group_ix, profile_cluster, subobject_local_ix) %>%
dplyr::mutate(glm_subobject_ix = row_number()) %>%
dplyr::filter(glm_subobject_ix <= 20) # remove less abundant subobjects of rich objects
# entries for an interaction in all replicate experiments
model_data$observations <- dplyr::inner_join(model_data$interactions, model_data$mschannels) %>%
dplyr::arrange(glm_object_ix, glm_iaction_ix, mschannel_ix) %>%
dplyr::mutate(glm_observation_ix = seq_len(n()),
observation_id = paste(msrun, object_id))
model_data$msdata <- dplyr::inner_join(model_data$observations, model_data$subobjects) %>%
dplyr::left_join(msdata.df) %>%
dplyr::arrange(glm_observation_ix, glm_subobject_ix) %>%
dplyr::mutate(glm_subobservation_ix = seq_len(n()))
model_data$msdata <- mutate(model_data$msdata,
qdata_ix = if_else(!is.na(intensity), cumsum(!is.na(intensity)), NA_integer_),
mdata_ix = if_else(is.na(intensity), cumsum(is.na(intensity)), NA_integer_))
model_data <- prepare_effects(model_data, underdefined_iactions=FALSE)
dims_info <- msglm.prepare_dims_info(model_data, object_cols=c('object_id', modelobj_idcol, "object_label", "ptmn_label_no_ptm_type",
"ptm_type", "ptm_AA_seq", "ptm_pos", "nselptms",
"protein_ac", "gene_name", "protein_name",
"is_viral", "is_contaminant"#, "is_decoy"
))
# remove unneeded data to free some memory
msdata <- NULL
gc()
msglm.stan_data <- stan.prepare_data(mscalib, model_data,
global_labu_shift = global_labu_shift,
obj_labu_min_scale = 1,
iact_repl_shift_df = 2,
suo_fdr=0.001, reliable_obs_fdr = 0.01, specific_iaction_fdr = 1,
empty_observation_sigmoid_scale = data_info$empty_observation_sigmoid_scale)
message('Running STAN in NUTS mode...')
options(mc.cores=mcmc_nchains)
msglm.stan_fit <- stan.sampling(msglm.stan_data, adapt_delta=0.9, max_treedepth=11L,
iter=4000L, chains=mcmc_nchains)
min.iteration <- as.integer(1.25 * msglm.stan_fit@sim$warmup)
msglm_results <- process.stan_fit(msglm.stan_fit, dims_info)
res_prefix <- paste0(project_id, "_", msfolder, "_msglm", modelobj_suffix)
if (!dir.exists(file.path(scratch_path, res_prefix))) {
dir.create(file.path(scratch_path, res_prefix))
}
rfit_filepath <- file.path(scratch_path, res_prefix, paste0(res_prefix, '_', fit_version, '_', job_chunk, '.RData'))
message('Saving STAN results to ', rfit_filepath, '...')
results_info <- list(project_id = project_id, msfolder = msfolder,
data_version = data_version, fit_version = fit_version,
job_name = job_name, job_chunk = job_chunk, modelobj = modelobj, quantobj = quantobj)
save(data_info, results_info,
model_data, msglm.stan_data, msglm_results,
dims_info, file = rfit_filepath)
message('Done.')
on.exit(unlink(tempdir(), force = TRUE), add=TRUE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{SiSetProcessingOptions}
\alias{SiSetProcessingOptions}
\title{Sets processing options for each spectrum type.}
\usage{
SiSetProcessingOptions(option, value, specType)
}
\arguments{
\item{option}{Option to set (see below for a list of valid option strings).}
\item{value}{Value to set the for the given option.}
\item{specType}{Spectrum type index. -1 is a wildcard for all spectrum types.}
}
\description{
\code{SiSetProcessingOptions} sets processing options for each spectrum type.
}
\details{
Options:
\tabular{llc}{
Name \tab Description \tab Default value \cr
\code{MassCalibMode} \tab Mass calibration mode in use (see \code{\link{MassCalibrate}}). \tab 0 \cr
\code{MassCalibParamn} \tab Mass calibration parameters n =
1..number of calibration parameters for the given mass calibration mode. \tab \code{c(1000, 0)} \cr
\code{FullScale} \tab Full digitizing range of the ADC in the same units as
the spectra to be analyzed (typically mV). \tab 500 \cr
\code{NbrBits} \tab ADC resolution (8 for AP240, 14 for ADQ114 etc.). \tab 8 \cr
\code{SampleInterval} \tab Sample interval in ns. \tab 1 \cr
\code{PreampGain} \tab Gain of external preamp. \tab 1 \cr
\code{PreSamples} \tab Number of samples before a threshold crosser taken into account. \tab 0 \cr
\code{PostSamples} \tab Number of samples after a negative threshold crosser taken into account. \tab 0 \cr
\code{BaselineAndThresholdFromData} \tab If >0 the baseline and threshold
values will be determined based on \code{NbrStdDevs} for every processed
spectrum. If >1.5 baseline noise is determined from a fit to a histogram of
all data (instead of from the standard deviation of all data). This makes the
noise determination more robust when real peaks are present in the spectrum. \tab 0 \cr
\code{NbrStdDevs} \tab Number of standard deviations of baseline noise that
defines the threshold. Only relevant if \code{BaselineAndThresholdFromData>0}. \tab 6 \cr
\code{Baseline} \tab Baseline value used for calculation of intensities. Has
no meaning if \code{BaselineAndThresholdFromData>0}. \tab 5 \cr
\code{Threshold} \tab Threshold value used for calculation of intensities.
Has no meaning if \code{BaselineAndThresholdFromData!=0}. \tab 8 \cr
\code{NegativeSignal} \tab Indicates peak polarity with respect to the baseline. \tab 0 (=FALSE) \cr
\code{BaselineAndThresholdInCodes} \tab Indicates whether the values in \code{Baseline}
and \code{Threshold} are interpreted as ADC codes or mV. \tab 1 (=TRUE)
}
}
| /man/SiSetProcessingOptions.Rd | no_license | pasturm/TofDaqR | R | false | true | 2,571 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{SiSetProcessingOptions}
\alias{SiSetProcessingOptions}
\title{Sets processing options for each spectrum type.}
\usage{
SiSetProcessingOptions(option, value, specType)
}
\arguments{
\item{option}{Option to set (see below for a list of valid option strings).}
\item{value}{Value to set the for the given option.}
\item{specType}{Spectrum type index. -1 is a wildcard for all spectrum types.}
}
\description{
\code{SiSetProcessingOptions} sets processing options for each spectrum type.
}
\details{
Options:
\tabular{llc}{
Name \tab Description \tab Default value \cr
\code{MassCalibMode} \tab Mass calibration mode in use (see \code{\link{MassCalibrate}}). \tab 0 \cr
\code{MassCalibParamn} \tab Mass calibration parameters n =
1..number of calibration parameters for the given mass calibration mode. \tab \code{c(1000, 0)} \cr
\code{FullScale} \tab Full digitizing range of the ADC in the same units as
the spectra to be analyzed (typically mV). \tab 500 \cr
\code{NbrBits} \tab ADC resolution (8 for AP240, 14 for ADQ114 etc.). \tab 8 \cr
\code{SampleInterval} \tab Sample interval in ns. \tab 1 \cr
\code{PreampGain} \tab Gain of external preamp. \tab 1 \cr
\code{PreSamples} \tab Number of samples before a threshold crosser taken into account. \tab 0 \cr
\code{PostSamples} \tab Number of samples after a negative threshold crosser taken into account. \tab 0 \cr
\code{BaselineAndThresholdFromData} \tab If >0 the baseline and threshold
values will be determined based on \code{NbrStdDevs} for every processed
spectrum. If >1.5 baseline noise is determined from a fit to a histogram of
all data (instead of from the standard deviation of all data). This makes the
noise determination more robust when real peaks are present in the spectrum. \tab 0 \cr
\code{NbrStdDevs} \tab Number of standard deviations of baseline noise that
defines the threshold. Only relevant if \code{BaselineAndThresholdFromData>0}. \tab 6 \cr
\code{Baseline} \tab Baseline value used for calculation of intensities. Has
no meaning if \code{BaselineAndThresholdFromData>0}. \tab 5 \cr
\code{Threshold} \tab Threshold value used for calculation of intensities.
Has no meaning if \code{BaselineAndThresholdFromData!=0}. \tab 8 \cr
\code{NegativeSignal} \tab Indicates peak polarity with respect to the baseline. \tab 0 (=FALSE) \cr
\code{BaselineAndThresholdInCodes} \tab Indicates whether the values in \code{Baseline}
and \code{Threshold} are interpreted as ADC codes or mV. \tab 1 (=TRUE)
}
}
|
library(testthat)
library(dataIrony)
test_check("dataIrony")
| /tests/testthat.R | no_license | hojsgaard/dataIrony | R | false | false | 62 | r | library(testthat)
library(dataIrony)
test_check("dataIrony")
|
#* @apiTitle BSSEnsembleR
#* @apiDescription a plumber back-end for real-time ensemble modelling
# ---- GENERICS ------ #
isValidString<-function(x){
!all(is.null(x) || is.na(x) || !is.atomic(x) || identical(x,"") || !is.character(x))
} #Done
OBID <- function(){ #Done
ei <- as.hexmode(as.integer(Sys.time())) # 4-byte
mi <- as.hexmode(6666666) #3-byte (I don't really care about the machine suplying this)
pi <- as.hexmode(Sys.getpid()) # 2-byte
ci <- as.hexmode(sample(1048576:16777215,1)) # 3-byte
return(paste0(ei,mi,pi,ci))
}
assim <- function(exp,msg){
a<-tryCatch(exp,error=function(e){a<-as.character(e)})
if(!identical(a,T)){
if(identical(a,F)){
stop(paste0("Asserted that ",msg))
}else{
stop(paste0("Fail to asssert: ",msg,", cause: ",as.character(a)))
}
}
}
classNumber<-function(x){
inherits(x,"numeric") || inherits(x,"integer")
}
# ----- FILTERS ------ #
#* @filter cors
cors <- function(res) { #Done
res$setHeader("Access-Control-Allow-Origin", "*")
plumber::forward()
} #Done
#* @filter tokenizer
tokenizer <-function(req){ #Done MUST BE VERIFIED
body<-jsonlite::fromJSON(req$postBody)
assertion<-getTokenValidation(body)
if(assertion$Valid){
plumber::forward()
}else{
out <- list(error = assertion$Message)
res$status <- 404
return(out)
}
}
# ----- QUERIES ---- #
queryByID <- function(obid,field='_id'){
q<-list(list("$oid" = unbox(obid)))
names(q)<-field
return(jsonlite::toJSON(q))
} # Done
queryByField<-function(obj,field){
q<-list(unbox(obj))
names(q)<-field
return(jsonlite::toJSON(q))
} #Done Verified
queryByUsername<-function(username){ #Done
out<-queryByField(username,"username")
return(out)
} #Done Verified
# ----- GETTERS ---- #
getUserByUsername<-function(username){
out<-.GlobalEnv$users$find(queryByUsername(username),'{}')
return(out)
} #Done Verified
getUserByID<-function(userid){
out<-.GlobalEnv$users$find(queryByID(userid),'{}')
return(out)
} #Done Verified
getFileIDByObjectID<- function(col,obid){
col$find(queryByID(obid),'{"file":1,"_id":0}')$file
} #Done
getFileGridFS <- function(grid,fileID){
t <- tempfile()
out <- grid$read(paste0("id:", fileID),t, progress = FALSE)
return(t)
} #Done
# -- HELPERS -- #
createNewUser<-function(username,password){
id<-OBID()
hash<-bcrypt::hashpw(password)
.GlobalEnv$users$insert(jsonlite::toJSON(list("_id"=list("$oid" = jsonlite::unbox(id)),"username"=username,"hash"=hash)))
out<-list("_id"=id,"username"=username,"hash"=hash)
return(out)
} #Done #Verified
authorizeUser<-function(user,password){
nrow(user) == 1 && isValidString(user$hash[[1]]) && bcrypt::checkpw(password, user$hash[[1]])
} #Done #Verified
authorizeToken<-function(user,token){
nrow(user) == 1 && bcrypt::checkpw(user$hash[[1]],token)
}#Done verified
registerUserFile <- function(col,userid,fileid){#Done
obid <- OBID()
q<-list(list("$oid" = unbox(obid)),list("$oid" = unbox(userid)),list("$oid" = unbox(fileid)))
names(q)<-c("_id","user","file")
data<-jsonlite::toJSON(q)
col$insert(data)
return(obid)
}
# -- VALIDATIONs -- #
getRegistrationValidation <- function(body) {
tryCatch({
assim({isValidString(body$username) == T},"username is not valid.")
assim({isValidString(body$password) == T},"passord is not valid")
assim({isValidString(body$validation) == T},"password confirmation is not valid.")
assim({body$password == body$validation},"passwords don't match.")
assim({body$invitation == .GlobalEnv$BSSEInvitation},"invitation key don't match.")
assim({length(getUserByUsername(body$username)) == 0},"username already exists.")
out <- list(Valid = T, Message = '')
return(out)
}, error = function(e) {
out <- list(Valid = F, Message = e)
return(out)
})
} #Done Verified
getLoginValidation <- function(body) {
tryCatch({
assim({isValidString(body$username)},'username is invalid.')
assim({isValidString(body$password)},'password is invalid.')
user<-getUserByUsername(body$username);
assim({authorizeUser(user,body$password)},'username does not exist or password is wrong.')
out <- list(Valid = T, Message = '')
return(out)
}, error = function(e) {
out <- list(Valid = F, Message = as.character(e))
return(out)
})
} #Done Verified
getTokenValidation<-function(body){
tryCatch({
assim({isValidString(body$userid)},'userid is missing, token is invalid.')
assim({isValidString(body$token)},'token is invalid.')
user <- getUserByID(body$userid)
assim({authorizeToken(user,body$token)},'token is invalid.')
out <- list(Valid = T, Message = '')
return(out)
}, error = function(e) {
out <- list(Valid = F, Message = as.character(e))
return(out)
})
} # Done verified
getDatasetValidation <- function(file){
tryCatch({
load(file)
X<-as.data.frame(X)
Y<-as.data.frame(Y)
#X Validation
assim({ncol(X)>2},paste0('X has insufficient number of predictors inputs:',as.character(ncol(X))))
assim({nrow(X)>0},paste0('X has insufficient number of observations:',as.character(nrow(X))))
assim({is.integer(X[,1])},paste0('Firts column of X is class ',class(X[,1]),', and not integer class.'))
assim({is.factor(X[,2])},paste0('Second column of X is class ',class(X[,2]),', and not factor class.'))
assim({all(sapply(X[,3:ncol(X)], classNumber))},'All supplied predictors inputs, except for column one and two, should be of integer or numeric class.')
#Y validation
assim({ncol(Y)>0},paste0('Y has insufficient number of predictors outputs:',as.character(ncol(Y))))
assim({nrow(Y)>0},paste0('Y has insufficient number of observations:',as.character(nrow(Y))))
assim({classNumber(Y[,1])},'The Supplied predictor output should be of integer or numeric class.')
#mutual validation
assim({nrow(X)==nrow(Y)},paste0('X number of observations (',as.character(nrow(X)),') differs from Y (',as.character(nrow(Y)),').'))
assim({sum((complete.cases(X) & complete.cases(Y)))>0},'X and Y have independent number of NA or null observations.')
out <- list(Valid = T, Message = '')
return(out)
}, error = function(e) {
out <- list(Valid = F, Message = as.character(e))
return(out)
})
} #Done
# -- AUTHENTICATION -- #
#* Allow user to validate in server creating a user document (passwords should not be stored in the database)
#* @preempt tokenizer
#* @post /register
function(req, res) {
body <- jsonlite::fromJSON(req$postBody)
assertion <- getRegistrationValidation(body)
if (assertion$Valid) {
newuser <- createNewUser(body$username, body$password)
out <- list(userid = newuser$'_id' ,token = bcrypt::hashpw(newuser$'hash'))
res$status <- 202
return(out)
} else{
out <- list(error = assertion$Message)
res$status <- 404
return(out)
}
} #Done Verified
#* Initial login validation
#* @preempt tokenizer
#* @post /login
function(req, res) {
body <- jsonlite::fromJSON(req$postBody)
assertion <- getLoginValidation(body)
if (assertion$Valid) {
user <- getUserByUsername(body$username)
out <-
list(userid = user$"_id",
token = bcrypt::hashpw(user$hash[[1]]))
res$status <- 202
return(out)
} else{
out <- list(error = assertion$Message)
res$status <- 404
return(out)
}
} #Done Verified
# -------------------------------------------------- DATASET ---------------------------------------------------------- #
# -- Available -- #
#* Get list of available datasets for a user
#* @post /datasets/available
function(req,res){
body<-jsonlite::fromJSON(req$postBody)
query<-queryByID(body$userid, field="user")
fields<-'{"_id":1}'
return(.GlobalEnv$datasets$find(query,fields)$'_id')
} #Done Verified
# -- Load -- #
#* Loads dataset file in BSSEmsembler
#* @preempt tokenizer
#* @param userid
#* @param token
#* @post /datasets/load
function(req,userid,token){
val<-getTokenValidation(list('userid'=userid,'token'=token))
if(val$Valid){
fileid <- MultipartDataset2GridFS(req)
obid<-registerUserFile(.GlobalEnv$datasets,userid,fileid)
return(obid)
}else{
stop(val$Message)
}
} #Done
MultipartDataset2GridFS <- function(req){
form <- Rook::Multipart$parse(req)
assim({grepl(".RData",form$file$filename)},"Input file is not a valid .RData file.")
val<-getDatasetValidation(form$file$tempfile)
if(val$Valid){
upload <-.GlobalEnv$gridFS$write(form$file$tempfile,form$file$filename)
return(upload$id)
}else{
stop(val$Message)
}
}
#-- Delete -- #
#* Gets dataset information in BSSEmsembler
#* @post /datasets/delete
function(req){
body <- jsonlite::fromJSON(req$postBody)
.GlobalEnv$datasets$remove(queryByID(body$datasetid), just_one = TRUE)
.GlobalEnv$gridFS$remove(body$datasetid)
}
#-- Info -- #
#* Gets dataset information in BSSEmsembler
#* @post /datasets/info
function(datasetid){
body <- jsonlite::fromJSON(req$postBody)
fileid <- getFileIDByObjectID(.GlobalEnv$datasets,body$datasetid)#done
file <- getFileGridFS(.GlobalEnv$gridFS, fileid)
met<-getFileMetaInfo(fileid) #done
sum<-getDatasetSummary(file) #done
val<-getDatasetValidation(file) #done
pls<-getDatasetPlots(file)
unlink(file)
return(list('Meta'=met,'Summary'=sum,'Validation'=val,'Plots'=pls))
}
getDatasetSummary <- function(file){
XSummary <- NULL
XBatchSummary <- NULL
YSummary <- NULL
YBatchSummary <- NULL
tryDo({load(file)})
tryDo(X<-as.data.frame(X))
tryDo(Y<-as.data.frame(Y))
tryDo({XSummary<-getHtmlSummary(X)})
tryDo({XBatchSummary<-getHtmlBatchSummary(X,X[,2])})
tryDo({YSummary<-getHtmlSummary(Y)})
tryDo({YSummary<-getHtmlBatchSummary(Y,X[,2])})
lst<-list(XSummary,XBatchSummary,YSummary,YBatchSummary)
names(lst)<-c('XSummary','XBatchSummary','YSummary','YBatchSummary')
return(lst)
}
getHtmlSummary <- function(df){
st<- summarytools::dfSummary(df, round.digits = 3)
stv<- summarytools::view(st,method='render',transpose =T,style="rmarkdown")
html<- htmltools::renderTags(stv)$html
return(html)
}
getHtmlDescriptive <-function(df){
st<- summarytools::descr(df)
stv<- summarytools::view(st,method='render',transpose =T,style="rmarkdown")
return( htmltools::renderTags(stv)$html)
}
getHtmlBatchSummary <-function(df,cla){
lapply(split(df,cla),getHtmlDescriptive)
}
getFileMetaInfo<-function(fileid){
.GlobalEnv$gridFS$find(queryByID(fileid),'{}')
}
getDatasetPlots<-function(file){
}
| /inst/plumber.R | no_license | AndreGuerra123/BSSEmsembleR | R | false | false | 10,451 | r | #* @apiTitle BSSEnsembleR
#* @apiDescription a plumber back-end for real-time ensemble modelling
# ---- GENERICS ------ #
isValidString<-function(x){
!all(is.null(x) || is.na(x) || !is.atomic(x) || identical(x,"") || !is.character(x))
} #Done
OBID <- function(){ #Done
ei <- as.hexmode(as.integer(Sys.time())) # 4-byte
mi <- as.hexmode(6666666) #3-byte (I don't really care about the machine suplying this)
pi <- as.hexmode(Sys.getpid()) # 2-byte
ci <- as.hexmode(sample(1048576:16777215,1)) # 3-byte
return(paste0(ei,mi,pi,ci))
}
assim <- function(exp,msg){
a<-tryCatch(exp,error=function(e){a<-as.character(e)})
if(!identical(a,T)){
if(identical(a,F)){
stop(paste0("Asserted that ",msg))
}else{
stop(paste0("Fail to asssert: ",msg,", cause: ",as.character(a)))
}
}
}
classNumber<-function(x){
inherits(x,"numeric") || inherits(x,"integer")
}
# ----- FILTERS ------ #
#* @filter cors
cors <- function(res) { #Done
res$setHeader("Access-Control-Allow-Origin", "*")
plumber::forward()
} #Done
#* @filter tokenizer
tokenizer <-function(req){ #Done MUST BE VERIFIED
body<-jsonlite::fromJSON(req$postBody)
assertion<-getTokenValidation(body)
if(assertion$Valid){
plumber::forward()
}else{
out <- list(error = assertion$Message)
res$status <- 404
return(out)
}
}
# ----- QUERIES ---- #
queryByID <- function(obid,field='_id'){
q<-list(list("$oid" = unbox(obid)))
names(q)<-field
return(jsonlite::toJSON(q))
} # Done
queryByField<-function(obj,field){
q<-list(unbox(obj))
names(q)<-field
return(jsonlite::toJSON(q))
} #Done Verified
queryByUsername<-function(username){ #Done
out<-queryByField(username,"username")
return(out)
} #Done Verified
# ----- GETTERS ---- #
getUserByUsername<-function(username){
out<-.GlobalEnv$users$find(queryByUsername(username),'{}')
return(out)
} #Done Verified
getUserByID<-function(userid){
out<-.GlobalEnv$users$find(queryByID(userid),'{}')
return(out)
} #Done Verified
getFileIDByObjectID<- function(col,obid){
col$find(queryByID(obid),'{"file":1,"_id":0}')$file
} #Done
getFileGridFS <- function(grid,fileID){
t <- tempfile()
out <- grid$read(paste0("id:", fileID),t, progress = FALSE)
return(t)
} #Done
# -- HELPERS -- #
createNewUser<-function(username,password){
id<-OBID()
hash<-bcrypt::hashpw(password)
.GlobalEnv$users$insert(jsonlite::toJSON(list("_id"=list("$oid" = jsonlite::unbox(id)),"username"=username,"hash"=hash)))
out<-list("_id"=id,"username"=username,"hash"=hash)
return(out)
} #Done #Verified
authorizeUser<-function(user,password){
nrow(user) == 1 && isValidString(user$hash[[1]]) && bcrypt::checkpw(password, user$hash[[1]])
} #Done #Verified
authorizeToken<-function(user,token){
nrow(user) == 1 && bcrypt::checkpw(user$hash[[1]],token)
}#Done verified
registerUserFile <- function(col,userid,fileid){#Done
obid <- OBID()
q<-list(list("$oid" = unbox(obid)),list("$oid" = unbox(userid)),list("$oid" = unbox(fileid)))
names(q)<-c("_id","user","file")
data<-jsonlite::toJSON(q)
col$insert(data)
return(obid)
}
# -- VALIDATIONs -- #
getRegistrationValidation <- function(body) {
tryCatch({
assim({isValidString(body$username) == T},"username is not valid.")
assim({isValidString(body$password) == T},"passord is not valid")
assim({isValidString(body$validation) == T},"password confirmation is not valid.")
assim({body$password == body$validation},"passwords don't match.")
assim({body$invitation == .GlobalEnv$BSSEInvitation},"invitation key don't match.")
assim({length(getUserByUsername(body$username)) == 0},"username already exists.")
out <- list(Valid = T, Message = '')
return(out)
}, error = function(e) {
out <- list(Valid = F, Message = e)
return(out)
})
} #Done Verified
getLoginValidation <- function(body) {
tryCatch({
assim({isValidString(body$username)},'username is invalid.')
assim({isValidString(body$password)},'password is invalid.')
user<-getUserByUsername(body$username);
assim({authorizeUser(user,body$password)},'username does not exist or password is wrong.')
out <- list(Valid = T, Message = '')
return(out)
}, error = function(e) {
out <- list(Valid = F, Message = as.character(e))
return(out)
})
} #Done Verified
getTokenValidation<-function(body){
tryCatch({
assim({isValidString(body$userid)},'userid is missing, token is invalid.')
assim({isValidString(body$token)},'token is invalid.')
user <- getUserByID(body$userid)
assim({authorizeToken(user,body$token)},'token is invalid.')
out <- list(Valid = T, Message = '')
return(out)
}, error = function(e) {
out <- list(Valid = F, Message = as.character(e))
return(out)
})
} # Done verified
getDatasetValidation <- function(file){
tryCatch({
load(file)
X<-as.data.frame(X)
Y<-as.data.frame(Y)
#X Validation
assim({ncol(X)>2},paste0('X has insufficient number of predictors inputs:',as.character(ncol(X))))
assim({nrow(X)>0},paste0('X has insufficient number of observations:',as.character(nrow(X))))
assim({is.integer(X[,1])},paste0('Firts column of X is class ',class(X[,1]),', and not integer class.'))
assim({is.factor(X[,2])},paste0('Second column of X is class ',class(X[,2]),', and not factor class.'))
assim({all(sapply(X[,3:ncol(X)], classNumber))},'All supplied predictors inputs, except for column one and two, should be of integer or numeric class.')
#Y validation
assim({ncol(Y)>0},paste0('Y has insufficient number of predictors outputs:',as.character(ncol(Y))))
assim({nrow(Y)>0},paste0('Y has insufficient number of observations:',as.character(nrow(Y))))
assim({classNumber(Y[,1])},'The Supplied predictor output should be of integer or numeric class.')
#mutual validation
assim({nrow(X)==nrow(Y)},paste0('X number of observations (',as.character(nrow(X)),') differs from Y (',as.character(nrow(Y)),').'))
assim({sum((complete.cases(X) & complete.cases(Y)))>0},'X and Y have independent number of NA or null observations.')
out <- list(Valid = T, Message = '')
return(out)
}, error = function(e) {
out <- list(Valid = F, Message = as.character(e))
return(out)
})
} #Done
# -- AUTHENTICATION -- #
#* Allow user to validate in server creating a user document (passwords should not be stored in the database)
#* @preempt tokenizer
#* @post /register
function(req, res) {
body <- jsonlite::fromJSON(req$postBody)
assertion <- getRegistrationValidation(body)
if (assertion$Valid) {
newuser <- createNewUser(body$username, body$password)
out <- list(userid = newuser$'_id' ,token = bcrypt::hashpw(newuser$'hash'))
res$status <- 202
return(out)
} else{
out <- list(error = assertion$Message)
res$status <- 404
return(out)
}
} #Done Verified
#* Initial login validation
#* @preempt tokenizer
#* @post /login
function(req, res) {
body <- jsonlite::fromJSON(req$postBody)
assertion <- getLoginValidation(body)
if (assertion$Valid) {
user <- getUserByUsername(body$username)
out <-
list(userid = user$"_id",
token = bcrypt::hashpw(user$hash[[1]]))
res$status <- 202
return(out)
} else{
out <- list(error = assertion$Message)
res$status <- 404
return(out)
}
} #Done Verified
# -------------------------------------------------- DATASET ---------------------------------------------------------- #
# -- Available -- #
#* Get list of available datasets for a user
#* @post /datasets/available
function(req,res){
body<-jsonlite::fromJSON(req$postBody)
query<-queryByID(body$userid, field="user")
fields<-'{"_id":1}'
return(.GlobalEnv$datasets$find(query,fields)$'_id')
} #Done Verified
# -- Load -- #
#* Loads dataset file in BSSEmsembler
#* @preempt tokenizer
#* @param userid
#* @param token
#* @post /datasets/load
function(req,userid,token){
val<-getTokenValidation(list('userid'=userid,'token'=token))
if(val$Valid){
fileid <- MultipartDataset2GridFS(req)
obid<-registerUserFile(.GlobalEnv$datasets,userid,fileid)
return(obid)
}else{
stop(val$Message)
}
} #Done
MultipartDataset2GridFS <- function(req){
form <- Rook::Multipart$parse(req)
assim({grepl(".RData",form$file$filename)},"Input file is not a valid .RData file.")
val<-getDatasetValidation(form$file$tempfile)
if(val$Valid){
upload <-.GlobalEnv$gridFS$write(form$file$tempfile,form$file$filename)
return(upload$id)
}else{
stop(val$Message)
}
}
#-- Delete -- #
#* Gets dataset information in BSSEmsembler
#* @post /datasets/delete
function(req){
body <- jsonlite::fromJSON(req$postBody)
.GlobalEnv$datasets$remove(queryByID(body$datasetid), just_one = TRUE)
.GlobalEnv$gridFS$remove(body$datasetid)
}
#-- Info -- #
#* Gets dataset information in BSSEmsembler
#* @post /datasets/info
function(datasetid){
body <- jsonlite::fromJSON(req$postBody)
fileid <- getFileIDByObjectID(.GlobalEnv$datasets,body$datasetid)#done
file <- getFileGridFS(.GlobalEnv$gridFS, fileid)
met<-getFileMetaInfo(fileid) #done
sum<-getDatasetSummary(file) #done
val<-getDatasetValidation(file) #done
pls<-getDatasetPlots(file)
unlink(file)
return(list('Meta'=met,'Summary'=sum,'Validation'=val,'Plots'=pls))
}
getDatasetSummary <- function(file){
XSummary <- NULL
XBatchSummary <- NULL
YSummary <- NULL
YBatchSummary <- NULL
tryDo({load(file)})
tryDo(X<-as.data.frame(X))
tryDo(Y<-as.data.frame(Y))
tryDo({XSummary<-getHtmlSummary(X)})
tryDo({XBatchSummary<-getHtmlBatchSummary(X,X[,2])})
tryDo({YSummary<-getHtmlSummary(Y)})
tryDo({YSummary<-getHtmlBatchSummary(Y,X[,2])})
lst<-list(XSummary,XBatchSummary,YSummary,YBatchSummary)
names(lst)<-c('XSummary','XBatchSummary','YSummary','YBatchSummary')
return(lst)
}
getHtmlSummary <- function(df){
st<- summarytools::dfSummary(df, round.digits = 3)
stv<- summarytools::view(st,method='render',transpose =T,style="rmarkdown")
html<- htmltools::renderTags(stv)$html
return(html)
}
getHtmlDescriptive <-function(df){
st<- summarytools::descr(df)
stv<- summarytools::view(st,method='render',transpose =T,style="rmarkdown")
return( htmltools::renderTags(stv)$html)
}
getHtmlBatchSummary <-function(df,cla){
lapply(split(df,cla),getHtmlDescriptive)
}
getFileMetaInfo<-function(fileid){
.GlobalEnv$gridFS$find(queryByID(fileid),'{}')
}
getDatasetPlots<-function(file){
}
|
#載入套件
library(jiebaR)
library(jiebaRD)
library(wordcloud2)
library(text2vec)
library(stringr)
#讀文檔
setwd("C:/Users/jeff6/Desktop/Github/alan/Week2-4")
data <- readChar("CUG.txt",720000)
str(data)
#开始分词
wk = worker()
seg_MK=wk[data]
#注意:这里要转成list格式
tokens=list(seg_MK)
class(tokens)
# 构造词库(对文中分词结果进行汇总)
it = itoken(tokens, progressbar = FALSE)
vocab = create_vocabulary(it)
#删除一个字的词
vocab=vocab[which(nchar(vocab$term)>1),]
#删除出现小于5次的词
vocab = prune_vocabulary(vocab, term_count_min = 5L)
#查看最高频的词
tail(vocab,20)
vectorizer = vocab_vectorizer(vocab)
# 考虑词的前后5个词
tcm = create_tcm(it, vectorizer,skip_grams_window = 5L)
#设置词向量是4维的
#glove算法:https://nlp.stanford.edu/projects/glove/
glove = GlobalVectors$new(word_vectors_size = 40, vocabulary = vocab, x_max = 10)
wv_main = glove$fit_transform(tcm, n_iter = 10, convergence_tol = 0.01)
dim(wv_main)
wv_context = glove$components
dim(wv_context)
wv_main[1,1]
t(wv_context)[1,1]
word_vectors = wv_main + t(wv_context)
#构造“唐僧+徒弟”向量
relation = word_vectors["唐僧", , drop = FALSE] +
word_vectors["徒弟", , drop = FALSE]
#计算相关性,查看相关性最高的词
cos_sim = sim2(x = word_vectors, y = relation, method = "cosine", norm = "l2")
head(sort(cos_sim[,1], decreasing = TRUE), 20)
| /practice/資料向量分析/西遊記分析(網路).R | no_license | jeff6578/alan | R | false | false | 1,430 | r | #載入套件
library(jiebaR)
library(jiebaRD)
library(wordcloud2)
library(text2vec)
library(stringr)
#讀文檔
setwd("C:/Users/jeff6/Desktop/Github/alan/Week2-4")
data <- readChar("CUG.txt",720000)
str(data)
#开始分词
wk = worker()
seg_MK=wk[data]
#注意:这里要转成list格式
tokens=list(seg_MK)
class(tokens)
# 构造词库(对文中分词结果进行汇总)
it = itoken(tokens, progressbar = FALSE)
vocab = create_vocabulary(it)
#删除一个字的词
vocab=vocab[which(nchar(vocab$term)>1),]
#删除出现小于5次的词
vocab = prune_vocabulary(vocab, term_count_min = 5L)
#查看最高频的词
tail(vocab,20)
vectorizer = vocab_vectorizer(vocab)
# 考虑词的前后5个词
tcm = create_tcm(it, vectorizer,skip_grams_window = 5L)
#设置词向量是4维的
#glove算法:https://nlp.stanford.edu/projects/glove/
glove = GlobalVectors$new(word_vectors_size = 40, vocabulary = vocab, x_max = 10)
wv_main = glove$fit_transform(tcm, n_iter = 10, convergence_tol = 0.01)
dim(wv_main)
wv_context = glove$components
dim(wv_context)
wv_main[1,1]
t(wv_context)[1,1]
word_vectors = wv_main + t(wv_context)
#构造“唐僧+徒弟”向量
relation = word_vectors["唐僧", , drop = FALSE] +
word_vectors["徒弟", , drop = FALSE]
#计算相关性,查看相关性最高的词
cos_sim = sim2(x = word_vectors, y = relation, method = "cosine", norm = "l2")
head(sort(cos_sim[,1], decreasing = TRUE), 20)
|
############################
# Week 3: General practices
############################
# Construct a 5x6 matrix
X <- matrix(rnorm(30), nrow=5, ncol=6)
# Sum the values of each column with `apply()`
apply(X, 1, sum)
X
# Create a list of matrices
MyList <- list(A = matrix(1:9, 3, 3), B = matrix(4:15, 3, 2), C = matrix(8:10, 3, 2))
# Extract the 2nd column from `MyList` with the selection operator `[` with `lapply()`
lapply(MyList,"[", , 2)
# Extract the 1st row from `MyList`
lapply(MyList,"[", 1, )
# Extract all the elements og the third line of B
lapply(MyList, "[", 1, 2)
# Create a new list
x <- list(a = 1:4 , b = rnorm(10), c = rnorm(20, 1), d = rnorm(100, 5))
lapply(x, mean)
x <- 1:4
lapply(x, runif)
y <- list(a = matrix(1:4, 2, 2), b = matrix(1:6, 3, 2))
lapply(y,"[", 1, )
lapply(y,"[", 1, 2)
f <- function(elt)
{
elt[1, ]
}
lapply(y, f)
sapply(y, mean)
lapply(y, mean)
# Use split and lapply and sapply
x <- c(rnorm(10), runif(10), rnorm(10, 1))
f <- gl(3, 10)
split(x, f)
lapply(split(x,f), mean)
sapply(split(x,f), mean)
# Splitting a Data Frame
library(datasets)
head(airquality)
s <- split(airquality, airquality$Month)
str(s)
lapply(s, function(x) {
colMeans(x[, c("Ozone", "Solar.R", "Wind")])
})
sapply(s, function(x) {
colMeans(x[, c("Ozone", "Solar.R", "Wind")],
na.rm = TRUE)
})
library(swirl)
swirl()
# tapply() is an interesting function to split our data into groups based on
# the value of some variable
| /week_3/general_practice.R | no_license | gusahu/rprogramming_coursera | R | false | false | 1,785 | r | ############################
# Week 3: General practices
############################
# Construct a 5x6 matrix
X <- matrix(rnorm(30), nrow=5, ncol=6)
# Sum the values of each column with `apply()`
apply(X, 1, sum)
X
# Create a list of matrices
MyList <- list(A = matrix(1:9, 3, 3), B = matrix(4:15, 3, 2), C = matrix(8:10, 3, 2))
# Extract the 2nd column from `MyList` with the selection operator `[` with `lapply()`
lapply(MyList,"[", , 2)
# Extract the 1st row from `MyList`
lapply(MyList,"[", 1, )
# Extract all the elements og the third line of B
lapply(MyList, "[", 1, 2)
# Create a new list
x <- list(a = 1:4 , b = rnorm(10), c = rnorm(20, 1), d = rnorm(100, 5))
lapply(x, mean)
x <- 1:4
lapply(x, runif)
y <- list(a = matrix(1:4, 2, 2), b = matrix(1:6, 3, 2))
lapply(y,"[", 1, )
lapply(y,"[", 1, 2)
f <- function(elt)
{
elt[1, ]
}
lapply(y, f)
sapply(y, mean)
lapply(y, mean)
# Use split and lapply and sapply
x <- c(rnorm(10), runif(10), rnorm(10, 1))
f <- gl(3, 10)
split(x, f)
lapply(split(x,f), mean)
sapply(split(x,f), mean)
# Splitting a Data Frame
library(datasets)
head(airquality)
s <- split(airquality, airquality$Month)
str(s)
lapply(s, function(x) {
colMeans(x[, c("Ozone", "Solar.R", "Wind")])
})
sapply(s, function(x) {
colMeans(x[, c("Ozone", "Solar.R", "Wind")],
na.rm = TRUE)
})
library(swirl)
swirl()
# tapply() is an interesting function to split our data into groups based on
# the value of some variable
|
library(glmnet)
mydata = read.table("./TrainingSet/LassoBIC/kidney.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.3,family="gaussian",standardize=FALSE)
sink('./Model/EN/Lasso/kidney/kidney_042.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/Lasso/kidney/kidney_042.R | no_license | leon1003/QSMART | R | false | false | 351 | r | library(glmnet)
mydata = read.table("./TrainingSet/LassoBIC/kidney.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.3,family="gaussian",standardize=FALSE)
sink('./Model/EN/Lasso/kidney/kidney_042.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
# Set the working directory
# I have commented it as it is already set
# setwd("D:/Documents/Coursera/Exploratory Data Analysis/Week 1")
# Read the source file into a data frame
mydata <- read.table("household_power_consumption.txt", header=T, sep=";", stringsAsFactors=FALSE)
mydata <- na.omit(mydata)
# View the data frame once to check if it is properly loaded
# View(mydata)
# Set the class of the columns to be used in the plot appropriately
# lapply(mydata, class)
mydata$Date <- as.Date(mydata$Date, format="%d/%m/%Y")
mydata$Global_active_power <- as.numeric(mydata$Global_active_power)
mydata$Sub_metering_1 <- as.numeric(mydata$Sub_metering_1)
mydata$Sub_metering_2 <- as.numeric(mydata$Sub_metering_2)
mydata$Sub_metering_3 <- as.numeric(mydata$Sub_metering_3)
# We will get warning message if there are NA records in any of these columns
# Subset the data as requested in the assignment
mydata <- mydata[(mydata$Date=="2007-02-01") | (mydata$Date=="2007-02-02"),]
# Create one column that has both date and time as it will go into X axis
mydata <- transform(mydata, timestamp=as.POSIXct(paste(Date, Time)), "%d/%m/%Y %H:%M:%S")
# Set the destination file name and the expected size
png("plot3.png", width=480, height=480)
# Create the plot with appropriate labels
plot(mydata$timestamp,mydata$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering")
lines(mydata$timestamp,mydata$Sub_metering_2,col="red")
lines(mydata$timestamp,mydata$Sub_metering_3,col="blue")
legend("topright", col=c("black","red","blue"), c("Sub_metering_1 ","Sub_metering_2 ", "Sub_metering_3 "),lty=c(1,1), lwd=c(1,1))
dev.off() | /plot3.R | no_license | lakmepal/ExData_Plotting1 | R | false | false | 1,634 | r | # Set the working directory
# I have commented it as it is already set
# setwd("D:/Documents/Coursera/Exploratory Data Analysis/Week 1")
# Read the source file into a data frame
mydata <- read.table("household_power_consumption.txt", header=T, sep=";", stringsAsFactors=FALSE)
mydata <- na.omit(mydata)
# View the data frame once to check if it is properly loaded
# View(mydata)
# Set the class of the columns to be used in the plot appropriately
# lapply(mydata, class)
mydata$Date <- as.Date(mydata$Date, format="%d/%m/%Y")
mydata$Global_active_power <- as.numeric(mydata$Global_active_power)
mydata$Sub_metering_1 <- as.numeric(mydata$Sub_metering_1)
mydata$Sub_metering_2 <- as.numeric(mydata$Sub_metering_2)
mydata$Sub_metering_3 <- as.numeric(mydata$Sub_metering_3)
# We will get warning message if there are NA records in any of these columns
# Subset the data as requested in the assignment
mydata <- mydata[(mydata$Date=="2007-02-01") | (mydata$Date=="2007-02-02"),]
# Create one column that has both date and time as it will go into X axis
mydata <- transform(mydata, timestamp=as.POSIXct(paste(Date, Time)), "%d/%m/%Y %H:%M:%S")
# Set the destination file name and the expected size
png("plot3.png", width=480, height=480)
# Create the plot with appropriate labels
plot(mydata$timestamp,mydata$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering")
lines(mydata$timestamp,mydata$Sub_metering_2,col="red")
lines(mydata$timestamp,mydata$Sub_metering_3,col="blue")
legend("topright", col=c("black","red","blue"), c("Sub_metering_1 ","Sub_metering_2 ", "Sub_metering_3 "),lty=c(1,1), lwd=c(1,1))
dev.off() |
testlist <- list(vec = NULL, kmax = 0L, data_vec = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ))
result <- do.call(binsegRcpp:::rcpp_binseg_normal,testlist)
str(result) | /binsegRcpp/inst/testfiles/rcpp_binseg_normal/libFuzzer_rcpp_binseg_normal/rcpp_binseg_normal_valgrind_files/1610580680-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 275 | r | testlist <- list(vec = NULL, kmax = 0L, data_vec = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ))
result <- do.call(binsegRcpp:::rcpp_binseg_normal,testlist)
str(result) |
rawData.plinkfiles.setup <- function(formula, null, pathway, family, geno.files, lambda, subset = NULL, options = NULL){
start.time <- date()
validate.family(family)
validate.lambda.rawData(lambda)
# merge and reset options
options <- options.setup(options, family, lambda, NULL, NULL, NULL)
# encoding the row numbers of phenotype data so that I can extract proper subset of genotype data
null <- assign.subject.id(null)
# subset of data
null <- data.subset(null, subset, options)
# load definition of pathway
pathway <- load.pathway.definition(pathway, options)
# Expand the pathway
pathway <- expand_pathway(pathway, geno.files)
# check if all genotype files can be found. Missing files will be given in warning messages
geno.files <- validate.plinkfiles(geno.files)
sf <- map.SNPs.to.plinkfiles(geno.files, pathway)
# deleted snps and their reason
deleted.snps <- data.frame(SNP = NULL, reason = NULL, comment = NULL, stringsAsFactors = FALSE)
deleted.genes <- data.frame(Gene = NULL, reason = NULL, comment = NULL, stringsAsFactors = FALSE)
exc.snps <- intersect(pathway$SNP, options$excluded.snps)
exc.snps <- setdiff(exc.snps, options$selected.snps)
deleted.snps <- update.deleted.snps(deleted.snps, exc.snps, reason = "RM_BY_SNP_NAMES", comment = "")
pathway <- update.pathway.definition(pathway, exc.snps)
sf <- update.sf(sf, exc.snps)
#
exc.snps <- setdiff(pathway$SNP, sf$SNP)
deleted.snps <- update.deleted.snps(deleted.snps, exc.snps, reason = "NO_RAW_GENO", comment = "")
pathway <- update.pathway.definition(pathway, exc.snps)
sf <- update.sf(sf, exc.snps)
#
ini <- data.parse(formula, null, family)
rm(null)
gc()
null <- ini$null
resp.var <- ini$resp.var
comp.id <- which(complete.cases(null))
if(length(comp.id) < nrow(null)){
msg <- paste0(nrow(null) - length(comp.id), " samples are excluded due to missing covariates. ", length(comp.id), " samples are used")
message(msg)
null <- null[comp.id, ]
}
null <- validate.covar(null, resp.var)
control.id <- which(null[, resp.var] == 0)
uni.chr <- unique(pathway$Chr)
V <- list()
score0 <- list()
raw.geno <- NULL
name <- NULL
msg <- paste("Filtering SNPs:", date())
if(options$print) message(msg)
for(i in 1:length(uni.chr)){
cid <- which(pathway$Chr == uni.chr[i])
if(length(cid) == 0){
next
}
uni.gene <- unique(pathway$Gene[cid])
raw.geno.chr <- NULL
for(g in uni.gene){
gid <- which(pathway$Gene == g)
if(length(gid) == 0){
next
}
rs <- pathway$SNP[gid]
tmp <- sf[sf$SNP %in% rs, ]
iplink <- unique(tmp$iplink)
geno <- NULL
for(ip in iplink){
fam <- geno.files$fam[ip]
bim <- geno.files$bim[ip]
bed <- geno.files$bed[ip]
rg <- read.bed(bed, bim, fam, sel.snps = tmp$SNP, sel.subs = rownames(null))
if(is.null(geno)){
geno <- rg
}else{
geno <- cbind(geno, rg)
}
rm(rg)
gc()
}
rm(tmp)
gc()
class(geno) <- 'data.frame'
# SNP filtering based on options
filtered.data <- filter.raw.geno(geno, pathway[gid, , drop = FALSE], options, control.id)
filtered.markers <- filtered.data$deleted.snps
gc()
# update with valid/available SNPs
exc.snps <- filtered.markers$SNP
deleted.snps <- update.deleted.snps(deleted.snps, exc.snps,
reason = filtered.markers$reason,
comment = filtered.markers$comment)
pathway <- update.pathway.definition(pathway, exc.snps)
sf <- update.sf(sf, exc.snps)
geno <- update.raw.geno(geno, exc.snps)
if(is.null(raw.geno.chr)){
raw.geno.chr <- geno
}else{
id <- setdiff(colnames(geno), colnames(raw.geno.chr))
if(length(id) == 0){
next
}
raw.geno.chr <- cbind(raw.geno.chr, geno[, id, drop = FALSE])
}
rm(geno)
gc()
}
cid <- which(pathway$Chr == uni.chr[i])
if(length(cid) == 0){
next
}
# SNP filtering based on options
filtered.data <- filter.raw.geno(raw.geno.chr, pathway[cid, , drop = FALSE], options, control.id)
filtered.markers <- filtered.data$deleted.snps
filtered.genes <- filtered.data$deleted.genes
gc()
# update with valid/available SNPs
exc.snps <- filtered.markers$SNP
exc.genes <- filtered.genes$Gene
deleted.snps <- update.deleted.snps(deleted.snps, exc.snps,
reason = filtered.markers$reason,
comment = filtered.markers$comment)
deleted.genes <- update.deleted.genes(deleted.genes, exc.genes, filtered.genes$reason)
pathway <- update.pathway.definition(pathway, exc.snps, exc.genes)
sf <- update.sf(sf, exc.snps)
raw.geno.chr <- update.raw.geno(raw.geno.chr, exc.snps)
gc()
# calculate normal covariance and mean
stat <- generate.normal.statistics(resp.var, null, raw.geno.chr, pathway, family, lambda, options)
if(options$keep.geno){
if(is.null(raw.geno)){
raw.geno <- raw.geno.chr
}else{
raw.geno <- cbind(raw.geno, raw.geno.chr)
}
}
rm(raw.geno.chr)
gc()
V[[i]] <- stat$V[[1]]
score0[[i]] <- stat$score0[[1]]
name <- c(name, names(stat$V))
rm(stat)
gc()
}
names(V) <- name
names(score0) <- name
norm.stat <- list(V = V, score0 = score0)
if(!options$keep.geno){
rm(raw.geno)
rm(null)
gc()
raw.geno <- NULL
null <- NULL
}
# trim the information of deleted SNPs
deleted.snps <- trim.deleted.snps(deleted.snps, options)
msg <- paste0("Setup completed: ", date())
if(options$print) message(msg)
end.time <- date()
setup.timing <- as.integer(difftime(strptime(end.time, "%c"), strptime(start.time, "%c"), units = "secs"))
yx <- create.yx(resp.var, null)
formula <- create.formula(resp.var, yx)
setup <- list(deleted.snps = deleted.snps, deleted.genes = deleted.genes,
options = options, pathway = pathway, norm.stat = norm.stat,
formula = formula, yx = yx, raw.geno = raw.geno,
setup.timing = setup.timing)
if(options$save.setup){
save(setup, file = options$path.setup)
msg <- paste0("setup file has been saved at ", options$path.setup)
message(msg)
}
setup
}
| /R/rawData.plinkfiles.setup.R | no_license | cran/ARTP2 | R | false | false | 6,639 | r |
rawData.plinkfiles.setup <- function(formula, null, pathway, family, geno.files, lambda, subset = NULL, options = NULL){
start.time <- date()
validate.family(family)
validate.lambda.rawData(lambda)
# merge and reset options
options <- options.setup(options, family, lambda, NULL, NULL, NULL)
# encoding the row numbers of phenotype data so that I can extract proper subset of genotype data
null <- assign.subject.id(null)
# subset of data
null <- data.subset(null, subset, options)
# load definition of pathway
pathway <- load.pathway.definition(pathway, options)
# Expand the pathway
pathway <- expand_pathway(pathway, geno.files)
# check if all genotype files can be found. Missing files will be given in warning messages
geno.files <- validate.plinkfiles(geno.files)
sf <- map.SNPs.to.plinkfiles(geno.files, pathway)
# deleted snps and their reason
deleted.snps <- data.frame(SNP = NULL, reason = NULL, comment = NULL, stringsAsFactors = FALSE)
deleted.genes <- data.frame(Gene = NULL, reason = NULL, comment = NULL, stringsAsFactors = FALSE)
exc.snps <- intersect(pathway$SNP, options$excluded.snps)
exc.snps <- setdiff(exc.snps, options$selected.snps)
deleted.snps <- update.deleted.snps(deleted.snps, exc.snps, reason = "RM_BY_SNP_NAMES", comment = "")
pathway <- update.pathway.definition(pathway, exc.snps)
sf <- update.sf(sf, exc.snps)
#
exc.snps <- setdiff(pathway$SNP, sf$SNP)
deleted.snps <- update.deleted.snps(deleted.snps, exc.snps, reason = "NO_RAW_GENO", comment = "")
pathway <- update.pathway.definition(pathway, exc.snps)
sf <- update.sf(sf, exc.snps)
#
ini <- data.parse(formula, null, family)
rm(null)
gc()
null <- ini$null
resp.var <- ini$resp.var
comp.id <- which(complete.cases(null))
if(length(comp.id) < nrow(null)){
msg <- paste0(nrow(null) - length(comp.id), " samples are excluded due to missing covariates. ", length(comp.id), " samples are used")
message(msg)
null <- null[comp.id, ]
}
null <- validate.covar(null, resp.var)
control.id <- which(null[, resp.var] == 0)
uni.chr <- unique(pathway$Chr)
V <- list()
score0 <- list()
raw.geno <- NULL
name <- NULL
msg <- paste("Filtering SNPs:", date())
if(options$print) message(msg)
for(i in 1:length(uni.chr)){
cid <- which(pathway$Chr == uni.chr[i])
if(length(cid) == 0){
next
}
uni.gene <- unique(pathway$Gene[cid])
raw.geno.chr <- NULL
for(g in uni.gene){
gid <- which(pathway$Gene == g)
if(length(gid) == 0){
next
}
rs <- pathway$SNP[gid]
tmp <- sf[sf$SNP %in% rs, ]
iplink <- unique(tmp$iplink)
geno <- NULL
for(ip in iplink){
fam <- geno.files$fam[ip]
bim <- geno.files$bim[ip]
bed <- geno.files$bed[ip]
rg <- read.bed(bed, bim, fam, sel.snps = tmp$SNP, sel.subs = rownames(null))
if(is.null(geno)){
geno <- rg
}else{
geno <- cbind(geno, rg)
}
rm(rg)
gc()
}
rm(tmp)
gc()
class(geno) <- 'data.frame'
# SNP filtering based on options
filtered.data <- filter.raw.geno(geno, pathway[gid, , drop = FALSE], options, control.id)
filtered.markers <- filtered.data$deleted.snps
gc()
# update with valid/available SNPs
exc.snps <- filtered.markers$SNP
deleted.snps <- update.deleted.snps(deleted.snps, exc.snps,
reason = filtered.markers$reason,
comment = filtered.markers$comment)
pathway <- update.pathway.definition(pathway, exc.snps)
sf <- update.sf(sf, exc.snps)
geno <- update.raw.geno(geno, exc.snps)
if(is.null(raw.geno.chr)){
raw.geno.chr <- geno
}else{
id <- setdiff(colnames(geno), colnames(raw.geno.chr))
if(length(id) == 0){
next
}
raw.geno.chr <- cbind(raw.geno.chr, geno[, id, drop = FALSE])
}
rm(geno)
gc()
}
cid <- which(pathway$Chr == uni.chr[i])
if(length(cid) == 0){
next
}
# SNP filtering based on options
filtered.data <- filter.raw.geno(raw.geno.chr, pathway[cid, , drop = FALSE], options, control.id)
filtered.markers <- filtered.data$deleted.snps
filtered.genes <- filtered.data$deleted.genes
gc()
# update with valid/available SNPs
exc.snps <- filtered.markers$SNP
exc.genes <- filtered.genes$Gene
deleted.snps <- update.deleted.snps(deleted.snps, exc.snps,
reason = filtered.markers$reason,
comment = filtered.markers$comment)
deleted.genes <- update.deleted.genes(deleted.genes, exc.genes, filtered.genes$reason)
pathway <- update.pathway.definition(pathway, exc.snps, exc.genes)
sf <- update.sf(sf, exc.snps)
raw.geno.chr <- update.raw.geno(raw.geno.chr, exc.snps)
gc()
# calculate normal covariance and mean
stat <- generate.normal.statistics(resp.var, null, raw.geno.chr, pathway, family, lambda, options)
if(options$keep.geno){
if(is.null(raw.geno)){
raw.geno <- raw.geno.chr
}else{
raw.geno <- cbind(raw.geno, raw.geno.chr)
}
}
rm(raw.geno.chr)
gc()
V[[i]] <- stat$V[[1]]
score0[[i]] <- stat$score0[[1]]
name <- c(name, names(stat$V))
rm(stat)
gc()
}
names(V) <- name
names(score0) <- name
norm.stat <- list(V = V, score0 = score0)
if(!options$keep.geno){
rm(raw.geno)
rm(null)
gc()
raw.geno <- NULL
null <- NULL
}
# trim the information of deleted SNPs
deleted.snps <- trim.deleted.snps(deleted.snps, options)
msg <- paste0("Setup completed: ", date())
if(options$print) message(msg)
end.time <- date()
setup.timing <- as.integer(difftime(strptime(end.time, "%c"), strptime(start.time, "%c"), units = "secs"))
yx <- create.yx(resp.var, null)
formula <- create.formula(resp.var, yx)
setup <- list(deleted.snps = deleted.snps, deleted.genes = deleted.genes,
options = options, pathway = pathway, norm.stat = norm.stat,
formula = formula, yx = yx, raw.geno = raw.geno,
setup.timing = setup.timing)
if(options$save.setup){
save(setup, file = options$path.setup)
msg <- paste0("setup file has been saved at ", options$path.setup)
message(msg)
}
setup
}
|
library(tidyverse)
library(janitor)
library(readxl)
library(readstata13)
# demographics <- read_csv("N:/Assessment_Data Returns/TCAP_End-of-Course/2018-19/Demographic Files/fall_eoc_demographics_snapshot_20181208.csv")
demographics_2020_spring <- read_csv("N:/TNReady/2019-20/spring/demographics/student_demographics_20200707.csv")
demos_filtered <- demographics_2020_spring %>%
filter(str_length(student_key) == 7) %>%
transmute(
unique_student_id = student_key,
system = district_no,
school = school_no,
gender,
hispanic = if_else(ethnicity == 'H', 'Y', 'N'),
economically_disadvantaged = case_when(
codeab == 1 ~ 'Y',
codeab == 2 ~ 'N',
TRUE ~ NA_character_
),
reported_race = reportedrace,
title_1 = title1,
gifted = isgifted,
functionally_delayed = isfunctionallydelayed,
migrant = ismigrant,
el_arrived_year_1 = elrecentlyarrivedyearone,
el_arrived_year_2 = elrecentlyarrivedyeartwo,
el = isel,
t1234 = t1t2,
special_ed = specialeducation,
enrolled_50_pct_district = district50percent,
enrolled_50_pct_school = school50percent
) %>%
mutate(
native_american = if_else(reported_race == 1, 'Y', 'N'),
asian = if_else(reported_race == 2, 'Y', 'N'),
black = if_else(reported_race == 3, 'Y', 'N'),
hawaiian_pi = if_else(reported_race == 5, 'Y', 'N'),
white = if_else(reported_race == 6, 'Y', 'N'),
reported_race = case_when(
reported_race == 1 ~ 'American Indian/Alaska Native',
reported_race == 2 ~ 'Asian',
reported_race == 3 ~ 'Black or African American',
reported_race == 4 ~ 'Hispanic/Latino',
reported_race == 5 ~ 'Native Hawaiian/Pac. Islander',
reported_race == 6 ~ 'White',
TRUE ~ 'Unknown'
),
bhn_group = if_else(!reported_race %in% c('American Indian/Alaska Native','Black or African American','Hispanic/Latino') | is.na(reported_race), 0, 1)
)
# Enrollment variables for alt and MSAA
# enrollment <- read_csv("N:/ORP_accountability/data/2018_final_accountability_files/enrollment.csv")
# EOCs
# ======================================= Fall EOC ===========================================================
# Set to blank if test code = TNSCIEBI, TNBRSCIEBI, TNSOCSUH, TNBRSOCSUH, or TNALTSCBI
fall_eoc <- read_fwf("N:/Assessment_Data Returns/TCAP_End-of-Course/2019-2020/fall EOC 2019/2019_TN_Fall_2019_EOC_CDF_20200128.txt",
col_types = 'icicccciicccciiiiiciic',
fwf_cols(system = c(33, 37), system_name = c(38, 112), school = c(113, 116),
school_name = c(117, 191), last_name = c(192, 241), first_name = c(242, 291),
middle_initial = c(292, 292), unique_student_id = c(302, 310), grade = c(346, 347), # tested grade
content_area_code = c(350, 352), attempted = c(429, 429), modified_format = c(430, 431),
school_type = c(546, 548),teacher_of_record_tln = c(441, 460), reason_not_tested = c(543, 543),
ri_status = c(544, 544),
raw_score = c(733, 735), scale_score= c(742, 745), performance_level = c(746, 760),
scale_score_lb_ci = c(764,766), scale_score_ub_ci = c(761,763), item_response_array=c(781,910)))
fall_eoc_total <- fall_eoc %>%
mutate(
school = case_when(
system == 961L & school == 961L ~ 5L,
system == 963L & school == 963L ~ 5L,
TRUE ~ school
),
ri_status = if_else(ri_status == 6L & reason_not_tested == 1L, 0L, ri_status)
) %>%
left_join(demos_filtered, by = c( "system", "school", "unique_student_id")) %>%
select(system:attempted, gender, reported_race, bhn_group, hispanic, native_american:white, economically_disadvantaged, title_1, gifted, functionally_delayed,
migrant, el, el_arrived_year_1:special_ed, modified_format, enrolled_50_pct_district, enrolled_50_pct_school,
teacher_of_record_tln:item_response_array) %>%
filter(
system <= 986, # Private School districts
school != 981, # Homeschool
grade %in% 1:12 | is.na(grade) # Grade 13
) %>%
mutate(grade = if_else(grade %in% 1:2, NA_integer_, grade)) %>%
replace_na(list(bhn_group = 0)) %>% # race = 'Unknown',
select(-(hispanic:white)) %>%
mutate(
test= 'EOC',
semester = 'Fall'
)
# =============================================== TCAP 3-8 ========================================================================
# grade_3_8_TCAP <- read_fwf("N:\\Assessment_Data Returns\\TCAP_Grades 3-8\\2018-19\\2018-2019 TN 2019 Spring 3-8 CDF Final Scores-20190730_updated2019-08-01.Txt",
# col_types = 'icicccciicccciiiiiciici',
# #n_max = 993639,
# fwf_cols(system = c(7, 11), system_name = c(12, 86), school = c(87, 90),
# school_name = c(91, 165), last_name = c(166, 200), first_name = c(201, 235),
# middle_initial = c(236, 236), unique_student_id = c(245, 253), grade = c(276, 277),
# content_area_code = c(278, 280), attempted = c(378, 378), modified_format = c(379, 380),
# school_type = c(597, 597),teacher_of_record_tln = c(390, 409), reason_not_tested = c(592, 593), ri_status = c(594, 595),
# raw_score = c(702, 704), scale_score= c(708, 711), performance_level = c(712, 726),
# scale_score_lb_ci = c(730,732), scale_score_ub_ci = c(727,729), item_response_array=c(747,876),
# enrolled_grade = c(274, 275)))
#
# grade_3_8_total <- grade_3_8_TCAP %>%
# # mutate(
# # grade = case_when(
# # grade =='K' ~ '0',
# # grade == 'T7' ~ '07',
# # grade == 'T8' ~ '08',
# # TRUE ~ grade
# # ),
# # grade = as.numeric(grade)
# # ) %>%
# mutate(
# school = case_when(
# system == 961L & school == 961L ~ 5L,
# system == 963L & school == 963L ~ 5L,
# TRUE ~ school
# ),
# ri_status = if_else(ri_status == 6L & reason_not_tested == 1L, 0L, ri_status)
# ) %>%
# left_join(demos_filtered, by = c("system", "school", "unique_student_id")) %>%
# select(system:attempted, gender, reported_race, bhn_group, hispanic, native_american:white, economically_disadvantaged, title_1, gifted, functionally_delayed,
# migrant, el, el_arrived_year_1:special_ed, modified_format, enrolled_50_pct_district, enrolled_50_pct_school,
# teacher_of_record_tln:item_response_array) %>%
# replace_na(list(bhn_group = 0)) %>% # race = 'Unknown',
# mutate(grade = if_else(grade %in% 1:2, NA_integer_, grade)) %>%
# filter(
# system <= 986, # Private School districts
# school != 981, # Homeschool
# grade %in% 1:12 | is.na(grade) # Grade 13
# ) %>%
# select(-(hispanic:white)) %>%
# mutate(
# test= 'TNReady',
# semester = 'Spring'
# )
# =================================== Spring EOC ==================================================
# spring_eoc <- read_fwf("N:\\Assessment_Data Returns\\TCAP_End-of-Course\\2018-19\\Spring EOC 2019\\2018-2019 TN 2019 Spring EOC CDF Final Scores-20190629.txt",
# col_types = 'icicccciicccciiiiiciic',
# fwf_cols(system = c(7, 11), system_name = c(12, 86), school = c(87, 90),
# school_name = c(91, 165), last_name = c(166, 200), first_name = c(201, 235),
# middle_initial = c(236, 236), unique_student_id = c(245, 253), grade = c(274, 275),
# content_area_code = c(278, 280), attempted = c(378, 378), modified_format = c(379, 380),
# school_type = c(597, 597),teacher_of_record_tln = c(390, 409), reason_not_tested = c(592, 593), ri_status = c(594, 595),
# raw_score = c(702, 704), scale_score= c(708, 711), performance_level = c(712, 726),
# scale_score_lb_ci = c(730,732), scale_score_ub_ci = c(727,729), item_response_array=c(747,876)))
#
#
# spring_eoc_total <- spring_eoc %>%
# mutate(
# school = case_when(
# system == 961L & school == 961L ~ 5L,
# system == 963L & school == 963L ~ 5L,
# TRUE ~ school
# ),
# ri_status = if_else(ri_status == 6L & reason_not_tested == 1L, 0L, ri_status)
# ) %>%
# left_join(demos_filtered, by = c("system", "school", "unique_student_id")) %>%
# select(system:attempted, gender, reported_race, bhn_group, hispanic, native_american:white, economically_disadvantaged, title_1, gifted, functionally_delayed,
# migrant, el, el_arrived_year_1:special_ed, modified_format, enrolled_50_pct_district, enrolled_50_pct_school,
# teacher_of_record_tln:item_response_array) %>%
# replace_na(list( bhn_group = 0)) %>% # race = 'Unknown',
# filter(
# system <= 986, # Private School districts
# school != 981, # Homeschool
# grade %in% 1:12 | is.na(grade) # Grade 13
# ) %>%
# mutate(grade = if_else(grade %in% 1:2, NA_integer_, grade)) %>%
# select(-(hispanic:white)) %>%
# mutate(
# test= 'EOC',
# semester = 'Spring'
# )
# spring_eoc <- read_csv('N:/Assessment_Data Returns/TCAP_End-of-Course/2018-19/Spring EOC 2019/2018-2019 TN 2019 Spring EOC CDF Final Scores-20190613.csv')
# ================================= ALT Social Studies =====================================
# alt_science_ss <- read_csv("N:/ORP_accountability/data/2019_cdf/2019_alt_ss_cdf.csv") %>%
# mutate(
# test = "Alt-Social Studies",
# semester = "Spring",
# special_ed = 1L,
# performance_level = case_when(
# performance_level == "Level 3" ~ "Mastered",
# performance_level == "Level 2" ~ "On Track",
# performance_level == "Level 1" ~ "Approaching"
# )
# ) %>%
# mutate(
# economically_disadvantaged = if_else(economically_disadvantaged == 1, 'Y', 'N'),
# bhn_group = if_else(reported_race %in% c("Black or African American", "Hispanic/Latino", "American Indian/Alaska Native"), 1, 0),
# ri_status = if_else(ri_status == 6L & reason_not_tested == 1L, 0L, as.integer(ri_status))
# ) %>%
# filter(!(system == 750 & school == 0))
#
# =================================== Total TCAP/EOC ================================================
total_cdf <- bind_rows(fall_eoc_total) %>% # , spring_eoc_total, grade_3_8_total, alt_science_ss
filter(content_area_code != 'E3') %>%
filter(!(unique_student_id == 4244992 & content_area_code == 'ENG')) %>%
mutate(
performance_level = if_else(performance_level == "On track", "On Track", performance_level),
absent = if_else(reason_not_tested == 1, 1,0),
not_enrolled = if_else(reason_not_tested == 2, 1,0),
not_scheduled = if_else(reason_not_tested == 3, 1 ,0),
medically_exempt = if_else(reason_not_tested == 4, 1,0),
residential_facility = if_else(reason_not_tested == 5, 1,0),
did_not_submit = if_else(reason_not_tested == 7, 1,0),
breach_adult = if_else(ri_status == 1, 1,0),
breach_student = if_else(ri_status == 2, 1,0),
irregular_admin = if_else(ri_status == 3, 1,0),
incorrect_grade_subject = if_else(ri_status == 4, 1,0),
refused_to_test = if_else(ri_status == 5, 1,0),
failed_attemptedness = if_else(ri_status == 6, 1,0),
original_subject = case_when(
content_area_code == "EN" | content_area_code == "ENG" ~ "ELA",
content_area_code == "MA" | content_area_code == "MAT" ~ "Math",
# content_area_code == "SCI" ~ "Science",
content_area_code == "SS" | content_area_code == "SOC" | content_area_code == "SCI" ~ "Social Studies",
content_area_code == "A1" ~ "Algebra I",
content_area_code == "A2" ~ "Algebra II",
content_area_code == "B1" ~ "Biology I",
content_area_code == "C1" ~ "Chemistry",
content_area_code == "E1" ~ "English I",
content_area_code == "E2" ~ "English II",
content_area_code == "E3" ~ "English III",
content_area_code == "G1" ~ "Geometry",
content_area_code == "M1" ~ "Integrated Math I",
content_area_code == "M2" ~ "Integrated Math II",
content_area_code == "M3" ~ "Integrated Math III",
content_area_code == "U1" ~ "US History",
TRUE ~ NA_character_
)
)
math_eoc <- c("Algebra I", "Algebra II", "Geometry", "Integrated Math I", "Integrated Math II", "Integrated Math III")
english_eoc <- c("English I", "English II")
science_eoc <- c("Biology I", "Chemistry")
# Integrated Math districts for reassigning MSAA subjects
int_math_systems <- total_cdf %>%
filter(content_area_code %in% c("A1", "M1")) %>%
count(system, content_area_code) %>%
group_by(system) %>%
mutate(temp = max(n)) %>%
filter(n == temp, content_area_code == "M1") %>%
magrittr::extract2("system") %>%
as.integer()
# ========================================= MSAA ========================================
# # MSAA
# msaa <- read_csv("N:\\ORP_accountability\\data\\2019_cdf\\2019_msaa_cdf.csv") %>%
# mutate(
# school = case_when(
# system == 961 & school == 961 ~ 5,
# system == 963 & school == 963 ~ 5,
# TRUE ~ school
# )
# ) %>%
# filter(!reporting_status %in% c("WDR", "NLE")) %>%
# # rename(race = reported_race) %>%
# mutate(
# test = "MSAA",
# semester = "Spring",
# special_ed = 1,# = 1
# performance_level = if_else(reporting_status != "TES", NA_character_, performance_level),
# # absent = 0,
# # enrolled = 1,
# tested = if_else(reporting_status == "DNT", 0, 1)
# ) %>%
# # mutate_at(c("refused_to_test", "residential_facility"), function(x) x = 0) %>%
# # mutate_at(c("functionally_delayed"), function(x) x = 0) %>%
# left_join(demos_filtered %>% select(system, school, unique_student_id, bhn_group), by = c("system", "school", "unique_student_id")) %>%
# replace_na(list(bhn_group = 0)) # %>%reported_race = 'Unknown',
# # select( -reporting_status)
#
# ================================================ Student Level =====================================
student_level <- bind_rows(total_cdf %>% mutate(economically_disadvantaged=if_else(economically_disadvantaged=='Y', 1, 0))) %>% # , alt_science_ss , msaa
mutate(
enrolled = 1,
tested = 1, # MSAA already has a tested field if_else(test != "MSAA", 1, tested)
valid_test = NA_integer_, # initialize valid tests and assign it later
# economically_disadvantaged = if_else(economically_disadvantaged == 'Y', 1, 0),
el = if_else(el == 1, 1, 0),
el_recently_arrived = if_else(el_arrived_year_1 == 1 | el_arrived_year_2 == 1, 1, 0),
t1234 = if_else(t1234 %in% 1:4, 1, 0), # Transitional given a 0 or 1 instead of 0-4
special_ed = if_else(special_ed == 1, 1, 0),
functionally_delayed = if_else(functionally_delayed == 1, 1,0),
# homebound = homebound == "Y",
original_performance_level = performance_level,
subject = original_subject,
reporting_status = NA
) %>%
select(system, school, test, original_subject, subject,
original_performance_level, performance_level, scale_score,
enrolled, tested, valid_test, state_student_id = unique_student_id,
last_name, first_name, grade, gender, reported_race, bhn_group, gifted, functionally_delayed, special_ed,
economically_disadvantaged, migrant, el, t1234, el_recently_arrived,
enrolled_50_pct_district, enrolled_50_pct_school, absent, not_enrolled, not_scheduled,
breach_adult, breach_student, irregular_admin, incorrect_grade_subject,
refused_to_test, failed_attemptedness, residential_facility, did_not_submit,
semester, ri_status, medically_exempt, teacher_of_record_tln, reporting_status) %>%
# Drop excluded records
filter(!is.na(system),
grade != 13 | is.na(grade),
!(school %in% c(981,982) | system >= 990)#, # 981 is homeschool residential_facility != 1 | is.na(residential_facility),
# Drop medically exempt?
) %>%
# Apply testing flag hierarchy
# Absent (reason_not_tested 1) students have a missing proficiency and are not tested
# EL Recently Arrived students with missing proficiency are not considered tested
# EL Recently Arrived students performance level are converted to missing
# Proficiency modified to missing if refused to test or failed attemptedness
# Any record with an RI status of 0 or 3 (Irregular Administration) is enrolled and tested, but do not have performance levels
# Any record with an RI status other than 0 or 3 is neither enrolled nor tested
mutate(
enrolled = case_when(
breach_adult == 1 | breach_student == 1 | irregular_admin==1 | incorrect_grade_subject == 1 | refused_to_test == 1 | failed_attemptedness == 1 ~ 0,
not_enrolled == 1 | not_scheduled == 1 ~ 0,
TRUE ~ 1
),
tested = case_when(
test == "MSAA" & reporting_status == "DNT" ~ 0,
breach_adult == 1 | breach_student ==1 | irregular_admin == 1 | incorrect_grade_subject == 1| refused_to_test == 1 | failed_attemptedness == 1 ~ 0,
absent == 1 | not_enrolled == 1 | not_scheduled == 1 ~ 0,
el_recently_arrived == 1L & is.na(original_performance_level) ~ 0,
TRUE ~ 1
),
performance_level = case_when(
# Invalid performance level for values below, used to denote valid tests
breach_adult == 1 | breach_student == 1 | irregular_admin==1 | incorrect_grade_subject == 1 | refused_to_test == 1 | failed_attemptedness == 1 ~ NA_character_,
not_enrolled == 1 | not_scheduled == 1 | absent == 1 | medically_exempt == 1 | residential_facility == 1 | did_not_submit == 1~ NA_character_,
el_recently_arrived == 1 ~ NA_character_,
TRUE ~ performance_level
),
# Modify subject for MSAA tests in grades >= 9 (6.8)
subject = case_when(
original_subject == "Math" & test == "MSAA" & grade >= 9 & system %in% int_math_systems ~ "Integrated Math I",
original_subject == "Math" & test == "MSAA" & grade >= 9 & !(system %in% int_math_systems) ~ "Algebra I",
original_subject == "ELA" & test == "MSAA" & grade >= 9 ~ "English II",
TRUE ~ subject
),
# Convert subjects per accountability rules
subject = case_when(
grade %in% 3:8 & original_subject %in% math_eoc ~ "Math",
grade %in% 3:8 & original_subject %in% english_eoc ~ "ELA",
grade %in% 3:8 & original_subject == "US History" ~ "Social Studies",
TRUE ~ subject
)
) %>%
select(-reporting_status)
# Records from Alternative, CTE, Adult HS are dropped from student level
alt_cte_adult <- read_csv("N:/ORP_accountability/data/2020_tdoe_provided_files/cte_alt_adult_schools.csv") %>%
transmute(system = as.numeric(DISTRICT_NUMBER), school = as.numeric(SCHOOL_NUMBER), cte_alt_adult = 1)
# acct_system_school <- read_csv("N:\\ORP_accountability\\data\\2019_chronic_absenteeism\\student_chronic_absenteeism_Jul11.csv") %>%
# distinct() %>%
# group_by(student_id, system, school) %>%
# mutate(
# isp_days = sum(isp_days)
# ) %>%
# ungroup() %>%
# mutate(
# enrolled_pct = round(isp_days/instructional_calendar_days * 100 + 1e-10, 2)
# ) %>%
# filter(enrolled_pct >= 50) %>%
# anti_join(alt_cte_adult, by = c('system', 'school')) %>%
#
# #
# # group_by(system, student_id) %>%
# # mutate(
# # max_pct = max(enrolled_pct, na.rm = TRUE),
# # max_days = max(isp_days, na.rm = TRUE),
# # myorder = 1:n()
# # ) %>%
# # ungroup() %>%
# # mutate(
# # acct_system = case_when(
# # isp_days == max_days & max_pct >= 50 ~ system,
# # TRUE ~ NA_real_
# # )
# # ) %>%
# group_by(student_id) %>%
# mutate(
# max_pct_school = max(enrolled_pct, na.rm = TRUE),
# max_days_school = max(isp_days, na.rm = TRUE),
# myorder_school = 1:n()
# ) %>%
# ungroup() %>%
# filter(max_days_school == isp_days) %>%
# mutate(
# acct_system = case_when(
# isp_days == max_days_school & max_pct_school >= 50 ~ system,
# TRUE ~ NA_real_
# ),
# acct_school = case_when(
# isp_days == max_days_school & max_pct_school >= 50 ~ school,
# TRUE ~ NA_real_
# )
# ) %>%
# filter(!is.na(acct_system)) %>%
# # filter(enrolled_pct == max_pct_school) %>%
# # mutate(
# # acct_system = case_when(
# # isp_days == max_days & max_pct >= 50 ~ system,
# # isp_days == max_days & max_pct < 50 ~ system,
# # TRUE ~ NA_real_
# # # enrolled_pct == max_pct & max_pct >= 50 ~ system,
# # # max_pct < 50 & enrolled_pct == max_pct ~ system,
# # # TRUE ~ NA_real_
# # ),
# # acct_school = case_when(
# # isp_days == max_days & max_pct >= 50 ~ school,
# # isp_days == max_days & max_pct < 50 ~ school,
# # TRUE ~ NA_real_
# # # enrolled_pct == max_pct & max_pct >= 50 ~ school,
# # # max_pct < 50 & enrolled_pct == max_pct ~ school,
# # # TRUE ~ NA_real_
# # )
# # ) %>%
# # filter(!is.na(acct_system)) %>%
# group_by(student_id) %>%
# mutate(
# max_days = max(isp_days, na.rm = TRUE),
# min_days = min(isp_days, na.rm = TRUE),
# max_system = max(system, na.rm = TRUE),
# min_system = min(system, na.rm = TRUE),
# max_school = max(school, na.rm = TRUE),
# min_school = min(school, na.rm = TRUE),
# max_count = n()
# ) %>%
# ungroup() %>%
# filter(!(max_days == min_days & max_count > 1 & max_system != min_system), !(max_days == min_days & max_count > 1 & max_school != min_school)) %>%
# group_by(student_id, acct_system) %>%
# mutate(min_count = min(myorder_school, na.rm = TRUE)) %>%
# ungroup() %>%
# filter(myorder_school == min_count) %>%
# mutate(
# acct_school = case_when(
# acct_system == 961 & acct_school == 961 ~ 5,
# acct_system == 963 & acct_school == 963 ~ 5,
# TRUE ~ school
# )
# ) %>%
# select(unique_student_id = student_id, acct_system, acct_school)
# write_csv(acct_system_school %>% rename(state_student_id = unique_student_id), "N:/ORP_accountability/data/2019_final_accountability_files/enrollment_AM.csv")
dedup <- student_level %>%
anti_join(alt_cte_adult, by = c("system", "school")) %>%
mutate(
# For students with multiple records across test types, MSAA has priority, then EOC, then 3-8
test_priority = case_when(
test %in% c("MSAA", "Alt-Social Studies") ~ 3,
test == "EOC" ~ 2,
test == "TNReady" ~ 1
)
) %>%
group_by(state_student_id, subject) %>%
mutate(temp = max(test_priority, na.rm = TRUE)) %>%
filter(test_priority == temp | temp == -Inf) %>%
select(-test_priority, -temp) %>%
ungroup() %>%
# For students with multiple records within the same test, take highest proficiency level
mutate(
prof_priority = case_when(
performance_level %in% c("Below", "Below Basic") ~ 1,
performance_level %in% c("Approaching", "Basic") ~ 2,
performance_level %in% c("On Track", "Proficient") ~ 3,
performance_level %in% c("Mastered", "Advanced") ~ 4
)
) %>%
group_by(state_student_id, original_subject, test) %>%
mutate(temp = max(prof_priority, na.rm = TRUE)) %>%
filter(prof_priority == temp | temp == -Inf) %>% # | (is.na(state_student_id) & test == "Alt-Social Studies")) %>%
select(-prof_priority, -temp) %>%
ungroup() %>%
# For students with multiple records within the same performance level, take highest scale score
group_by(state_student_id, original_subject, test, performance_level) %>%
mutate(temp = max(scale_score, na.rm = TRUE)) %>%
filter(scale_score == temp | temp == -Inf) %>%
select(-temp) %>%
ungroup() %>%
# For students with multiple test records with the same proficiency across administrations, take the most recent
mutate(
semester_priority = case_when(
test %in% c("MSAA", "Alt-Social Studies", "Achievement") | (test == "EOC" & semester == "Spring") ~ 2,
test == "EOC" & semester == "Fall" ~ 1
)
) %>%
group_by(state_student_id, subject, test) %>%
mutate(temp = max(semester_priority, na.rm = TRUE)) %>%
filter(semester_priority == temp | temp == -Inf | (is.na(state_student_id) & test == "Alt-Social Studies")) %>%
select(-semester_priority, -temp) %>%
ungroup() %>%
# Deduplicate by missing demographic, grade
# demographic
mutate(
demo_priority = case_when(
reported_race %in% c("American Indian/Alaska Native", "Asian", "Black or African American", "Native Hawaiian/Pac. Islander",
"Hispanic/Latino", "White") ~ 2,
reported_race == 'Unknown' | is.na(reported_race) ~ 1
)
) %>%
group_by(state_student_id, original_subject, test, performance_level) %>%
mutate(temp = max(demo_priority, na.rm = TRUE)) %>%
filter(demo_priority == temp | temp == -Inf) %>%
select(-demo_priority, -temp) %>%
ungroup() %>%
# grade
mutate(
grade_priority = case_when(
!is.na(grade) ~ 2,
is.na(grade) ~ 1
)
) %>%
group_by(state_student_id, original_subject, test, performance_level) %>%
mutate(temp = max(grade_priority, na.rm = TRUE)) %>%
filter(grade_priority == temp | temp == -Inf) %>%
select(-grade_priority, -temp) %>%
ungroup() %>%
# Valid test if there is a performance level
mutate(valid_test = as.numeric(!is.na(performance_level)))
school_names <- read_csv("N:\\ORP_accountability\\data\\2020_final_accountability_files\\names.csv") # %>%
# bind_rows(
# tribble(
# ~system, ~system_name, ~school, ~school_name,
# 970, "Department of Children's Services", 25, "Gateway to Independence",
# 970, "Department of Children's Services", 45, "Wilder Youth Development Center",
# 970, "Department of Children's Services", 65, "Mountain View Youth Development Center",
# 970, "Department of Children's Services", 140, "DCS Affiliated Schools"
# )
# )
# read in WIDA ACCESS file
# wida_current <- read_csv("N:/ORP_accountability/data/2019_ELPA/wida_growth_standard_student.csv")
# add percentiles
output <- dedup %>%
filter(!(original_subject == "Science" & grade %in% c("3", "4"))) %>%
left_join(school_names, by = c("system", "school")) %>%
mutate(
system_name = case_when(
system == 970 & !is.na(system_name) ~ "Department Of Children's Services Education Division",
TRUE ~ system_name
),
school_name = case_when(
system == 792 & school == 8228 ~ "Southern Avenue Charter School Of Academic Excellence Creative Arts",
system == 330 & school == 58 ~ "Dupont Elementary",
system == 330 & school == 8002 ~ "Ivy Academy, Inc.",
TRUE ~ school_name
)
) %>%
select(system, system_name, school, school_name, test, original_subject, subject, semester,
original_performance_level, performance_level, scale_score, enrolled, tested, valid_test,
state_student_id, last_name, first_name, grade, gender, reported_race, bhn_group, teacher_of_record_tln,
functionally_delayed, special_ed, economically_disadvantaged, gifted, migrant, el, t1234, el_recently_arrived,
enrolled_50_pct_district, enrolled_50_pct_school, absent, refused_to_test, residential_facility) %>%
# mutate(
# el = if_else(state_student_id %in% wida_current$student_id, 1, el) # If student appears in WIDA file, assign el to 1
# ) %>%
group_by(test, original_subject, grade) %>%
# Percentiles by grade and original subject for 3-8
mutate(
rank = if_else(!is.na(scale_score), rank(scale_score, ties = "max"), NA_integer_),
denom = sum(!is.na(scale_score)),
percentile = if_else(test == "TNReady", round(100 * rank/denom + 1e-10, 1), NA_real_)
) %>%
ungroup() %>%
group_by(test, original_subject) %>%
# Percentiles by original subject for EOC
mutate(
rank = if_else(!is.na(scale_score), rank(scale_score, ties = "max"), NA_integer_),
denom = sum(!is.na(scale_score)),
percentile = if_else(test == 'EOC', round(100 * rank/denom + 1e-10, 1), percentile)
) %>%
ungroup() %>%
select(-rank, -denom) %>%
# left_join(acct_system_school %>% rename(state_student_id = unique_student_id), by = c('state_student_id')) %>%
# mutate(
# acct_system = if_else(is.na(acct_system), system, acct_system),
# acct_school = if_else(is.na(acct_school), school, acct_school)
# ) %>%
arrange(system, school, state_student_id)
# Write out student level
write_csv(output, 'N:/ORP_accountability/projects/2020_student_level_file/2020_student_level_file.csv', na = '')
# compare student level files
alex_comp <- read_csv("N:\\ORP_accountability\\projects\\2019_student_level_file\\2019_student_level_file.csv")
diff_df <- setdiff(alex_comp %>% select(-percentile), output %>% select(-teacher_of_record_tln, -percentile)) %>% # , -percentile %>% select(-percentile) %>% select(-acct_system, -acct_school)
bind_rows(setdiff(output %>% select(-teacher_of_record_tln, -percentile), alex_comp %>% select(-percentile))) %>% # %>% select(-percentile) , -percentile %>% select(-acct_system, -acct_school)
arrange(system, school, state_student_id, original_subject)
# Checking Completeness of data
# spring_eoc_2018 <- read_fwf("N:\\Assessment_Data Returns\\TCAP_End-of-Course\\2017-18\\Spring\\2017-2018 TN Spring EOC CDF Final Scores - 20180702 .txt",
# col_types = 'icicccciicccciiiiiciic',
# fwf_cols(system = c(7, 11), system_name = c(12, 86), school = c(87, 90),
# school_name = c(91, 165), last_name = c(166, 200), first_name = c(201, 235),
# middle_initial = c(236, 236), unique_student_id = c(245, 253), grade = c(254, 255),
# content_area_code = c(258, 260), attempted = c(331, 331), modified_format = c(355, 356),
# school_type = c(573, 573),teacher_of_record_tln = c(369, 388), reason_not_tested = c(567, 568), ri_status = c(569, 570),
# raw_score = c(702, 704), scale_score= c(708, 711), performance_level = c(712, 726),
# scale_score_lb_ci = c(730,732), scale_score_ub_ci = c(727,729), item_response_array=c(747,876)))
#
# stats_2018 <- spring_eoc_2018 %>%
# filter(content_area_code %in% c('ENG', 'MAT', 'A1', 'A2', 'E1', 'E2', 'G1', 'M1', 'M2', 'M3')) %>%
# mutate(missing_score = if_else(is.na(scale_score), 1, 0) ) %>%
# summarise(
# max_score = max(scale_score, na.rm = TRUE),
# min_score = min(scale_score, na.rm = TRUE),
# mean_score = round(mean(scale_score, na.rm = TRUE), 1),
# missing_score = sum(missing_score, na.rm= TRUE),
# n_tests = n(),
# n_systems = n_distinct(system),
# n_schools = n_distinct(school)
# )
#
# stats_2019 <- spring_eoc %>%
# mutate(missing_score = if_else(is.na(scale_score), 1, 0) ) %>%
# summarise(
# max_score = max(scale_score, na.rm = TRUE),
# min_score = min(scale_score, na.rm = TRUE),
# mean_score = round(mean(scale_score, na.rm = TRUE), 1),
# missing_score = sum(missing_score, na.rm= TRUE),
# n_tests = n(),
# n_systems = n_distinct(system),
# n_schools = n_distinct(school)
# )
#
# spring_eoc_2018 %>%
# filter(!school %in% spring_eoc$school) %>%
# distinct(school_name) %>%
# View()
#
# spring_eoc_2018 %>%
# filter(school_name == "Highland Oaks Middle")
#
# spring_eoc %>%
# filter(system == 190) %>%
# distinct(school_name) %>%
# View()
#
# spring_eoc %>%
# filter(school_name == 'HOME SCHOOL') %>%
# View()
| /2020/cdf_to_student_level.R | no_license | amarsee/acct-am | R | false | false | 31,735 | r | library(tidyverse)
library(janitor)
library(readxl)
library(readstata13)
# demographics <- read_csv("N:/Assessment_Data Returns/TCAP_End-of-Course/2018-19/Demographic Files/fall_eoc_demographics_snapshot_20181208.csv")
demographics_2020_spring <- read_csv("N:/TNReady/2019-20/spring/demographics/student_demographics_20200707.csv")
demos_filtered <- demographics_2020_spring %>%
filter(str_length(student_key) == 7) %>%
transmute(
unique_student_id = student_key,
system = district_no,
school = school_no,
gender,
hispanic = if_else(ethnicity == 'H', 'Y', 'N'),
economically_disadvantaged = case_when(
codeab == 1 ~ 'Y',
codeab == 2 ~ 'N',
TRUE ~ NA_character_
),
reported_race = reportedrace,
title_1 = title1,
gifted = isgifted,
functionally_delayed = isfunctionallydelayed,
migrant = ismigrant,
el_arrived_year_1 = elrecentlyarrivedyearone,
el_arrived_year_2 = elrecentlyarrivedyeartwo,
el = isel,
t1234 = t1t2,
special_ed = specialeducation,
enrolled_50_pct_district = district50percent,
enrolled_50_pct_school = school50percent
) %>%
mutate(
native_american = if_else(reported_race == 1, 'Y', 'N'),
asian = if_else(reported_race == 2, 'Y', 'N'),
black = if_else(reported_race == 3, 'Y', 'N'),
hawaiian_pi = if_else(reported_race == 5, 'Y', 'N'),
white = if_else(reported_race == 6, 'Y', 'N'),
reported_race = case_when(
reported_race == 1 ~ 'American Indian/Alaska Native',
reported_race == 2 ~ 'Asian',
reported_race == 3 ~ 'Black or African American',
reported_race == 4 ~ 'Hispanic/Latino',
reported_race == 5 ~ 'Native Hawaiian/Pac. Islander',
reported_race == 6 ~ 'White',
TRUE ~ 'Unknown'
),
bhn_group = if_else(!reported_race %in% c('American Indian/Alaska Native','Black or African American','Hispanic/Latino') | is.na(reported_race), 0, 1)
)
# Enrollment variables for alt and MSAA
# enrollment <- read_csv("N:/ORP_accountability/data/2018_final_accountability_files/enrollment.csv")
# EOCs
# ======================================= Fall EOC ===========================================================
# Set to blank if test code = TNSCIEBI, TNBRSCIEBI, TNSOCSUH, TNBRSOCSUH, or TNALTSCBI
fall_eoc <- read_fwf("N:/Assessment_Data Returns/TCAP_End-of-Course/2019-2020/fall EOC 2019/2019_TN_Fall_2019_EOC_CDF_20200128.txt",
col_types = 'icicccciicccciiiiiciic',
fwf_cols(system = c(33, 37), system_name = c(38, 112), school = c(113, 116),
school_name = c(117, 191), last_name = c(192, 241), first_name = c(242, 291),
middle_initial = c(292, 292), unique_student_id = c(302, 310), grade = c(346, 347), # tested grade
content_area_code = c(350, 352), attempted = c(429, 429), modified_format = c(430, 431),
school_type = c(546, 548),teacher_of_record_tln = c(441, 460), reason_not_tested = c(543, 543),
ri_status = c(544, 544),
raw_score = c(733, 735), scale_score= c(742, 745), performance_level = c(746, 760),
scale_score_lb_ci = c(764,766), scale_score_ub_ci = c(761,763), item_response_array=c(781,910)))
fall_eoc_total <- fall_eoc %>%
mutate(
school = case_when(
system == 961L & school == 961L ~ 5L,
system == 963L & school == 963L ~ 5L,
TRUE ~ school
),
ri_status = if_else(ri_status == 6L & reason_not_tested == 1L, 0L, ri_status)
) %>%
left_join(demos_filtered, by = c( "system", "school", "unique_student_id")) %>%
select(system:attempted, gender, reported_race, bhn_group, hispanic, native_american:white, economically_disadvantaged, title_1, gifted, functionally_delayed,
migrant, el, el_arrived_year_1:special_ed, modified_format, enrolled_50_pct_district, enrolled_50_pct_school,
teacher_of_record_tln:item_response_array) %>%
filter(
system <= 986, # Private School districts
school != 981, # Homeschool
grade %in% 1:12 | is.na(grade) # Grade 13
) %>%
mutate(grade = if_else(grade %in% 1:2, NA_integer_, grade)) %>%
replace_na(list(bhn_group = 0)) %>% # race = 'Unknown',
select(-(hispanic:white)) %>%
mutate(
test= 'EOC',
semester = 'Fall'
)
# =============================================== TCAP 3-8 ========================================================================
# grade_3_8_TCAP <- read_fwf("N:\\Assessment_Data Returns\\TCAP_Grades 3-8\\2018-19\\2018-2019 TN 2019 Spring 3-8 CDF Final Scores-20190730_updated2019-08-01.Txt",
# col_types = 'icicccciicccciiiiiciici',
# #n_max = 993639,
# fwf_cols(system = c(7, 11), system_name = c(12, 86), school = c(87, 90),
# school_name = c(91, 165), last_name = c(166, 200), first_name = c(201, 235),
# middle_initial = c(236, 236), unique_student_id = c(245, 253), grade = c(276, 277),
# content_area_code = c(278, 280), attempted = c(378, 378), modified_format = c(379, 380),
# school_type = c(597, 597),teacher_of_record_tln = c(390, 409), reason_not_tested = c(592, 593), ri_status = c(594, 595),
# raw_score = c(702, 704), scale_score= c(708, 711), performance_level = c(712, 726),
# scale_score_lb_ci = c(730,732), scale_score_ub_ci = c(727,729), item_response_array=c(747,876),
# enrolled_grade = c(274, 275)))
#
# grade_3_8_total <- grade_3_8_TCAP %>%
# # mutate(
# # grade = case_when(
# # grade =='K' ~ '0',
# # grade == 'T7' ~ '07',
# # grade == 'T8' ~ '08',
# # TRUE ~ grade
# # ),
# # grade = as.numeric(grade)
# # ) %>%
# mutate(
# school = case_when(
# system == 961L & school == 961L ~ 5L,
# system == 963L & school == 963L ~ 5L,
# TRUE ~ school
# ),
# ri_status = if_else(ri_status == 6L & reason_not_tested == 1L, 0L, ri_status)
# ) %>%
# left_join(demos_filtered, by = c("system", "school", "unique_student_id")) %>%
# select(system:attempted, gender, reported_race, bhn_group, hispanic, native_american:white, economically_disadvantaged, title_1, gifted, functionally_delayed,
# migrant, el, el_arrived_year_1:special_ed, modified_format, enrolled_50_pct_district, enrolled_50_pct_school,
# teacher_of_record_tln:item_response_array) %>%
# replace_na(list(bhn_group = 0)) %>% # race = 'Unknown',
# mutate(grade = if_else(grade %in% 1:2, NA_integer_, grade)) %>%
# filter(
# system <= 986, # Private School districts
# school != 981, # Homeschool
# grade %in% 1:12 | is.na(grade) # Grade 13
# ) %>%
# select(-(hispanic:white)) %>%
# mutate(
# test= 'TNReady',
# semester = 'Spring'
# )
# =================================== Spring EOC ==================================================
# spring_eoc <- read_fwf("N:\\Assessment_Data Returns\\TCAP_End-of-Course\\2018-19\\Spring EOC 2019\\2018-2019 TN 2019 Spring EOC CDF Final Scores-20190629.txt",
# col_types = 'icicccciicccciiiiiciic',
# fwf_cols(system = c(7, 11), system_name = c(12, 86), school = c(87, 90),
# school_name = c(91, 165), last_name = c(166, 200), first_name = c(201, 235),
# middle_initial = c(236, 236), unique_student_id = c(245, 253), grade = c(274, 275),
# content_area_code = c(278, 280), attempted = c(378, 378), modified_format = c(379, 380),
# school_type = c(597, 597),teacher_of_record_tln = c(390, 409), reason_not_tested = c(592, 593), ri_status = c(594, 595),
# raw_score = c(702, 704), scale_score= c(708, 711), performance_level = c(712, 726),
# scale_score_lb_ci = c(730,732), scale_score_ub_ci = c(727,729), item_response_array=c(747,876)))
#
#
# spring_eoc_total <- spring_eoc %>%
# mutate(
# school = case_when(
# system == 961L & school == 961L ~ 5L,
# system == 963L & school == 963L ~ 5L,
# TRUE ~ school
# ),
# ri_status = if_else(ri_status == 6L & reason_not_tested == 1L, 0L, ri_status)
# ) %>%
# left_join(demos_filtered, by = c("system", "school", "unique_student_id")) %>%
# select(system:attempted, gender, reported_race, bhn_group, hispanic, native_american:white, economically_disadvantaged, title_1, gifted, functionally_delayed,
# migrant, el, el_arrived_year_1:special_ed, modified_format, enrolled_50_pct_district, enrolled_50_pct_school,
# teacher_of_record_tln:item_response_array) %>%
# replace_na(list( bhn_group = 0)) %>% # race = 'Unknown',
# filter(
# system <= 986, # Private School districts
# school != 981, # Homeschool
# grade %in% 1:12 | is.na(grade) # Grade 13
# ) %>%
# mutate(grade = if_else(grade %in% 1:2, NA_integer_, grade)) %>%
# select(-(hispanic:white)) %>%
# mutate(
# test= 'EOC',
# semester = 'Spring'
# )
# spring_eoc <- read_csv('N:/Assessment_Data Returns/TCAP_End-of-Course/2018-19/Spring EOC 2019/2018-2019 TN 2019 Spring EOC CDF Final Scores-20190613.csv')
# ================================= ALT Social Studies =====================================
# alt_science_ss <- read_csv("N:/ORP_accountability/data/2019_cdf/2019_alt_ss_cdf.csv") %>%
# mutate(
# test = "Alt-Social Studies",
# semester = "Spring",
# special_ed = 1L,
# performance_level = case_when(
# performance_level == "Level 3" ~ "Mastered",
# performance_level == "Level 2" ~ "On Track",
# performance_level == "Level 1" ~ "Approaching"
# )
# ) %>%
# mutate(
# economically_disadvantaged = if_else(economically_disadvantaged == 1, 'Y', 'N'),
# bhn_group = if_else(reported_race %in% c("Black or African American", "Hispanic/Latino", "American Indian/Alaska Native"), 1, 0),
# ri_status = if_else(ri_status == 6L & reason_not_tested == 1L, 0L, as.integer(ri_status))
# ) %>%
# filter(!(system == 750 & school == 0))
#
# =================================== Total TCAP/EOC ================================================
total_cdf <- bind_rows(fall_eoc_total) %>% # , spring_eoc_total, grade_3_8_total, alt_science_ss
filter(content_area_code != 'E3') %>%
filter(!(unique_student_id == 4244992 & content_area_code == 'ENG')) %>%
mutate(
performance_level = if_else(performance_level == "On track", "On Track", performance_level),
absent = if_else(reason_not_tested == 1, 1,0),
not_enrolled = if_else(reason_not_tested == 2, 1,0),
not_scheduled = if_else(reason_not_tested == 3, 1 ,0),
medically_exempt = if_else(reason_not_tested == 4, 1,0),
residential_facility = if_else(reason_not_tested == 5, 1,0),
did_not_submit = if_else(reason_not_tested == 7, 1,0),
breach_adult = if_else(ri_status == 1, 1,0),
breach_student = if_else(ri_status == 2, 1,0),
irregular_admin = if_else(ri_status == 3, 1,0),
incorrect_grade_subject = if_else(ri_status == 4, 1,0),
refused_to_test = if_else(ri_status == 5, 1,0),
failed_attemptedness = if_else(ri_status == 6, 1,0),
original_subject = case_when(
content_area_code == "EN" | content_area_code == "ENG" ~ "ELA",
content_area_code == "MA" | content_area_code == "MAT" ~ "Math",
# content_area_code == "SCI" ~ "Science",
content_area_code == "SS" | content_area_code == "SOC" | content_area_code == "SCI" ~ "Social Studies",
content_area_code == "A1" ~ "Algebra I",
content_area_code == "A2" ~ "Algebra II",
content_area_code == "B1" ~ "Biology I",
content_area_code == "C1" ~ "Chemistry",
content_area_code == "E1" ~ "English I",
content_area_code == "E2" ~ "English II",
content_area_code == "E3" ~ "English III",
content_area_code == "G1" ~ "Geometry",
content_area_code == "M1" ~ "Integrated Math I",
content_area_code == "M2" ~ "Integrated Math II",
content_area_code == "M3" ~ "Integrated Math III",
content_area_code == "U1" ~ "US History",
TRUE ~ NA_character_
)
)
math_eoc <- c("Algebra I", "Algebra II", "Geometry", "Integrated Math I", "Integrated Math II", "Integrated Math III")
english_eoc <- c("English I", "English II")
science_eoc <- c("Biology I", "Chemistry")
# Integrated Math districts for reassigning MSAA subjects
int_math_systems <- total_cdf %>%
filter(content_area_code %in% c("A1", "M1")) %>%
count(system, content_area_code) %>%
group_by(system) %>%
mutate(temp = max(n)) %>%
filter(n == temp, content_area_code == "M1") %>%
magrittr::extract2("system") %>%
as.integer()
# ========================================= MSAA ========================================
# # MSAA
# msaa <- read_csv("N:\\ORP_accountability\\data\\2019_cdf\\2019_msaa_cdf.csv") %>%
# mutate(
# school = case_when(
# system == 961 & school == 961 ~ 5,
# system == 963 & school == 963 ~ 5,
# TRUE ~ school
# )
# ) %>%
# filter(!reporting_status %in% c("WDR", "NLE")) %>%
# # rename(race = reported_race) %>%
# mutate(
# test = "MSAA",
# semester = "Spring",
# special_ed = 1,# = 1
# performance_level = if_else(reporting_status != "TES", NA_character_, performance_level),
# # absent = 0,
# # enrolled = 1,
# tested = if_else(reporting_status == "DNT", 0, 1)
# ) %>%
# # mutate_at(c("refused_to_test", "residential_facility"), function(x) x = 0) %>%
# # mutate_at(c("functionally_delayed"), function(x) x = 0) %>%
# left_join(demos_filtered %>% select(system, school, unique_student_id, bhn_group), by = c("system", "school", "unique_student_id")) %>%
# replace_na(list(bhn_group = 0)) # %>%reported_race = 'Unknown',
# # select( -reporting_status)
#
# ================================================ Student Level =====================================
student_level <- bind_rows(total_cdf %>% mutate(economically_disadvantaged=if_else(economically_disadvantaged=='Y', 1, 0))) %>% # , alt_science_ss , msaa
mutate(
enrolled = 1,
tested = 1, # MSAA already has a tested field if_else(test != "MSAA", 1, tested)
valid_test = NA_integer_, # initialize valid tests and assign it later
# economically_disadvantaged = if_else(economically_disadvantaged == 'Y', 1, 0),
el = if_else(el == 1, 1, 0),
el_recently_arrived = if_else(el_arrived_year_1 == 1 | el_arrived_year_2 == 1, 1, 0),
t1234 = if_else(t1234 %in% 1:4, 1, 0), # Transitional given a 0 or 1 instead of 0-4
special_ed = if_else(special_ed == 1, 1, 0),
functionally_delayed = if_else(functionally_delayed == 1, 1,0),
# homebound = homebound == "Y",
original_performance_level = performance_level,
subject = original_subject,
reporting_status = NA
) %>%
select(system, school, test, original_subject, subject,
original_performance_level, performance_level, scale_score,
enrolled, tested, valid_test, state_student_id = unique_student_id,
last_name, first_name, grade, gender, reported_race, bhn_group, gifted, functionally_delayed, special_ed,
economically_disadvantaged, migrant, el, t1234, el_recently_arrived,
enrolled_50_pct_district, enrolled_50_pct_school, absent, not_enrolled, not_scheduled,
breach_adult, breach_student, irregular_admin, incorrect_grade_subject,
refused_to_test, failed_attemptedness, residential_facility, did_not_submit,
semester, ri_status, medically_exempt, teacher_of_record_tln, reporting_status) %>%
# Drop excluded records
filter(!is.na(system),
grade != 13 | is.na(grade),
!(school %in% c(981,982) | system >= 990)#, # 981 is homeschool residential_facility != 1 | is.na(residential_facility),
# Drop medically exempt?
) %>%
# Apply testing flag hierarchy
# Absent (reason_not_tested 1) students have a missing proficiency and are not tested
# EL Recently Arrived students with missing proficiency are not considered tested
# EL Recently Arrived students performance level are converted to missing
# Proficiency modified to missing if refused to test or failed attemptedness
# Any record with an RI status of 0 or 3 (Irregular Administration) is enrolled and tested, but do not have performance levels
# Any record with an RI status other than 0 or 3 is neither enrolled nor tested
mutate(
enrolled = case_when(
breach_adult == 1 | breach_student == 1 | irregular_admin==1 | incorrect_grade_subject == 1 | refused_to_test == 1 | failed_attemptedness == 1 ~ 0,
not_enrolled == 1 | not_scheduled == 1 ~ 0,
TRUE ~ 1
),
tested = case_when(
test == "MSAA" & reporting_status == "DNT" ~ 0,
breach_adult == 1 | breach_student ==1 | irregular_admin == 1 | incorrect_grade_subject == 1| refused_to_test == 1 | failed_attemptedness == 1 ~ 0,
absent == 1 | not_enrolled == 1 | not_scheduled == 1 ~ 0,
el_recently_arrived == 1L & is.na(original_performance_level) ~ 0,
TRUE ~ 1
),
performance_level = case_when(
# Invalid performance level for values below, used to denote valid tests
breach_adult == 1 | breach_student == 1 | irregular_admin==1 | incorrect_grade_subject == 1 | refused_to_test == 1 | failed_attemptedness == 1 ~ NA_character_,
not_enrolled == 1 | not_scheduled == 1 | absent == 1 | medically_exempt == 1 | residential_facility == 1 | did_not_submit == 1~ NA_character_,
el_recently_arrived == 1 ~ NA_character_,
TRUE ~ performance_level
),
# Modify subject for MSAA tests in grades >= 9 (6.8)
subject = case_when(
original_subject == "Math" & test == "MSAA" & grade >= 9 & system %in% int_math_systems ~ "Integrated Math I",
original_subject == "Math" & test == "MSAA" & grade >= 9 & !(system %in% int_math_systems) ~ "Algebra I",
original_subject == "ELA" & test == "MSAA" & grade >= 9 ~ "English II",
TRUE ~ subject
),
# Convert subjects per accountability rules
subject = case_when(
grade %in% 3:8 & original_subject %in% math_eoc ~ "Math",
grade %in% 3:8 & original_subject %in% english_eoc ~ "ELA",
grade %in% 3:8 & original_subject == "US History" ~ "Social Studies",
TRUE ~ subject
)
) %>%
select(-reporting_status)
# Records from Alternative, CTE, Adult HS are dropped from student level
alt_cte_adult <- read_csv("N:/ORP_accountability/data/2020_tdoe_provided_files/cte_alt_adult_schools.csv") %>%
transmute(system = as.numeric(DISTRICT_NUMBER), school = as.numeric(SCHOOL_NUMBER), cte_alt_adult = 1)
# acct_system_school <- read_csv("N:\\ORP_accountability\\data\\2019_chronic_absenteeism\\student_chronic_absenteeism_Jul11.csv") %>%
# distinct() %>%
# group_by(student_id, system, school) %>%
# mutate(
# isp_days = sum(isp_days)
# ) %>%
# ungroup() %>%
# mutate(
# enrolled_pct = round(isp_days/instructional_calendar_days * 100 + 1e-10, 2)
# ) %>%
# filter(enrolled_pct >= 50) %>%
# anti_join(alt_cte_adult, by = c('system', 'school')) %>%
#
# #
# # group_by(system, student_id) %>%
# # mutate(
# # max_pct = max(enrolled_pct, na.rm = TRUE),
# # max_days = max(isp_days, na.rm = TRUE),
# # myorder = 1:n()
# # ) %>%
# # ungroup() %>%
# # mutate(
# # acct_system = case_when(
# # isp_days == max_days & max_pct >= 50 ~ system,
# # TRUE ~ NA_real_
# # )
# # ) %>%
# group_by(student_id) %>%
# mutate(
# max_pct_school = max(enrolled_pct, na.rm = TRUE),
# max_days_school = max(isp_days, na.rm = TRUE),
# myorder_school = 1:n()
# ) %>%
# ungroup() %>%
# filter(max_days_school == isp_days) %>%
# mutate(
# acct_system = case_when(
# isp_days == max_days_school & max_pct_school >= 50 ~ system,
# TRUE ~ NA_real_
# ),
# acct_school = case_when(
# isp_days == max_days_school & max_pct_school >= 50 ~ school,
# TRUE ~ NA_real_
# )
# ) %>%
# filter(!is.na(acct_system)) %>%
# # filter(enrolled_pct == max_pct_school) %>%
# # mutate(
# # acct_system = case_when(
# # isp_days == max_days & max_pct >= 50 ~ system,
# # isp_days == max_days & max_pct < 50 ~ system,
# # TRUE ~ NA_real_
# # # enrolled_pct == max_pct & max_pct >= 50 ~ system,
# # # max_pct < 50 & enrolled_pct == max_pct ~ system,
# # # TRUE ~ NA_real_
# # ),
# # acct_school = case_when(
# # isp_days == max_days & max_pct >= 50 ~ school,
# # isp_days == max_days & max_pct < 50 ~ school,
# # TRUE ~ NA_real_
# # # enrolled_pct == max_pct & max_pct >= 50 ~ school,
# # # max_pct < 50 & enrolled_pct == max_pct ~ school,
# # # TRUE ~ NA_real_
# # )
# # ) %>%
# # filter(!is.na(acct_system)) %>%
# group_by(student_id) %>%
# mutate(
# max_days = max(isp_days, na.rm = TRUE),
# min_days = min(isp_days, na.rm = TRUE),
# max_system = max(system, na.rm = TRUE),
# min_system = min(system, na.rm = TRUE),
# max_school = max(school, na.rm = TRUE),
# min_school = min(school, na.rm = TRUE),
# max_count = n()
# ) %>%
# ungroup() %>%
# filter(!(max_days == min_days & max_count > 1 & max_system != min_system), !(max_days == min_days & max_count > 1 & max_school != min_school)) %>%
# group_by(student_id, acct_system) %>%
# mutate(min_count = min(myorder_school, na.rm = TRUE)) %>%
# ungroup() %>%
# filter(myorder_school == min_count) %>%
# mutate(
# acct_school = case_when(
# acct_system == 961 & acct_school == 961 ~ 5,
# acct_system == 963 & acct_school == 963 ~ 5,
# TRUE ~ school
# )
# ) %>%
# select(unique_student_id = student_id, acct_system, acct_school)
# write_csv(acct_system_school %>% rename(state_student_id = unique_student_id), "N:/ORP_accountability/data/2019_final_accountability_files/enrollment_AM.csv")
dedup <- student_level %>%
anti_join(alt_cte_adult, by = c("system", "school")) %>%
mutate(
# For students with multiple records across test types, MSAA has priority, then EOC, then 3-8
test_priority = case_when(
test %in% c("MSAA", "Alt-Social Studies") ~ 3,
test == "EOC" ~ 2,
test == "TNReady" ~ 1
)
) %>%
group_by(state_student_id, subject) %>%
mutate(temp = max(test_priority, na.rm = TRUE)) %>%
filter(test_priority == temp | temp == -Inf) %>%
select(-test_priority, -temp) %>%
ungroup() %>%
# For students with multiple records within the same test, take highest proficiency level
mutate(
prof_priority = case_when(
performance_level %in% c("Below", "Below Basic") ~ 1,
performance_level %in% c("Approaching", "Basic") ~ 2,
performance_level %in% c("On Track", "Proficient") ~ 3,
performance_level %in% c("Mastered", "Advanced") ~ 4
)
) %>%
group_by(state_student_id, original_subject, test) %>%
mutate(temp = max(prof_priority, na.rm = TRUE)) %>%
filter(prof_priority == temp | temp == -Inf) %>% # | (is.na(state_student_id) & test == "Alt-Social Studies")) %>%
select(-prof_priority, -temp) %>%
ungroup() %>%
# For students with multiple records within the same performance level, take highest scale score
group_by(state_student_id, original_subject, test, performance_level) %>%
mutate(temp = max(scale_score, na.rm = TRUE)) %>%
filter(scale_score == temp | temp == -Inf) %>%
select(-temp) %>%
ungroup() %>%
# For students with multiple test records with the same proficiency across administrations, take the most recent
mutate(
semester_priority = case_when(
test %in% c("MSAA", "Alt-Social Studies", "Achievement") | (test == "EOC" & semester == "Spring") ~ 2,
test == "EOC" & semester == "Fall" ~ 1
)
) %>%
group_by(state_student_id, subject, test) %>%
mutate(temp = max(semester_priority, na.rm = TRUE)) %>%
filter(semester_priority == temp | temp == -Inf | (is.na(state_student_id) & test == "Alt-Social Studies")) %>%
select(-semester_priority, -temp) %>%
ungroup() %>%
# Deduplicate by missing demographic, grade
# demographic
mutate(
demo_priority = case_when(
reported_race %in% c("American Indian/Alaska Native", "Asian", "Black or African American", "Native Hawaiian/Pac. Islander",
"Hispanic/Latino", "White") ~ 2,
reported_race == 'Unknown' | is.na(reported_race) ~ 1
)
) %>%
group_by(state_student_id, original_subject, test, performance_level) %>%
mutate(temp = max(demo_priority, na.rm = TRUE)) %>%
filter(demo_priority == temp | temp == -Inf) %>%
select(-demo_priority, -temp) %>%
ungroup() %>%
# grade
mutate(
grade_priority = case_when(
!is.na(grade) ~ 2,
is.na(grade) ~ 1
)
) %>%
group_by(state_student_id, original_subject, test, performance_level) %>%
mutate(temp = max(grade_priority, na.rm = TRUE)) %>%
filter(grade_priority == temp | temp == -Inf) %>%
select(-grade_priority, -temp) %>%
ungroup() %>%
# Valid test if there is a performance level
mutate(valid_test = as.numeric(!is.na(performance_level)))
school_names <- read_csv("N:\\ORP_accountability\\data\\2020_final_accountability_files\\names.csv") # %>%
# bind_rows(
# tribble(
# ~system, ~system_name, ~school, ~school_name,
# 970, "Department of Children's Services", 25, "Gateway to Independence",
# 970, "Department of Children's Services", 45, "Wilder Youth Development Center",
# 970, "Department of Children's Services", 65, "Mountain View Youth Development Center",
# 970, "Department of Children's Services", 140, "DCS Affiliated Schools"
# )
# )
# read in WIDA ACCESS file
# wida_current <- read_csv("N:/ORP_accountability/data/2019_ELPA/wida_growth_standard_student.csv")
# add percentiles
output <- dedup %>%
filter(!(original_subject == "Science" & grade %in% c("3", "4"))) %>%
left_join(school_names, by = c("system", "school")) %>%
mutate(
system_name = case_when(
system == 970 & !is.na(system_name) ~ "Department Of Children's Services Education Division",
TRUE ~ system_name
),
school_name = case_when(
system == 792 & school == 8228 ~ "Southern Avenue Charter School Of Academic Excellence Creative Arts",
system == 330 & school == 58 ~ "Dupont Elementary",
system == 330 & school == 8002 ~ "Ivy Academy, Inc.",
TRUE ~ school_name
)
) %>%
select(system, system_name, school, school_name, test, original_subject, subject, semester,
original_performance_level, performance_level, scale_score, enrolled, tested, valid_test,
state_student_id, last_name, first_name, grade, gender, reported_race, bhn_group, teacher_of_record_tln,
functionally_delayed, special_ed, economically_disadvantaged, gifted, migrant, el, t1234, el_recently_arrived,
enrolled_50_pct_district, enrolled_50_pct_school, absent, refused_to_test, residential_facility) %>%
# mutate(
# el = if_else(state_student_id %in% wida_current$student_id, 1, el) # If student appears in WIDA file, assign el to 1
# ) %>%
group_by(test, original_subject, grade) %>%
# Percentiles by grade and original subject for 3-8
mutate(
rank = if_else(!is.na(scale_score), rank(scale_score, ties = "max"), NA_integer_),
denom = sum(!is.na(scale_score)),
percentile = if_else(test == "TNReady", round(100 * rank/denom + 1e-10, 1), NA_real_)
) %>%
ungroup() %>%
group_by(test, original_subject) %>%
# Percentiles by original subject for EOC
mutate(
rank = if_else(!is.na(scale_score), rank(scale_score, ties = "max"), NA_integer_),
denom = sum(!is.na(scale_score)),
percentile = if_else(test == 'EOC', round(100 * rank/denom + 1e-10, 1), percentile)
) %>%
ungroup() %>%
select(-rank, -denom) %>%
# left_join(acct_system_school %>% rename(state_student_id = unique_student_id), by = c('state_student_id')) %>%
# mutate(
# acct_system = if_else(is.na(acct_system), system, acct_system),
# acct_school = if_else(is.na(acct_school), school, acct_school)
# ) %>%
arrange(system, school, state_student_id)
# Write out student level
write_csv(output, 'N:/ORP_accountability/projects/2020_student_level_file/2020_student_level_file.csv', na = '')
# compare student level files
alex_comp <- read_csv("N:\\ORP_accountability\\projects\\2019_student_level_file\\2019_student_level_file.csv")
diff_df <- setdiff(alex_comp %>% select(-percentile), output %>% select(-teacher_of_record_tln, -percentile)) %>% # , -percentile %>% select(-percentile) %>% select(-acct_system, -acct_school)
bind_rows(setdiff(output %>% select(-teacher_of_record_tln, -percentile), alex_comp %>% select(-percentile))) %>% # %>% select(-percentile) , -percentile %>% select(-acct_system, -acct_school)
arrange(system, school, state_student_id, original_subject)
# Checking Completeness of data
# spring_eoc_2018 <- read_fwf("N:\\Assessment_Data Returns\\TCAP_End-of-Course\\2017-18\\Spring\\2017-2018 TN Spring EOC CDF Final Scores - 20180702 .txt",
# col_types = 'icicccciicccciiiiiciic',
# fwf_cols(system = c(7, 11), system_name = c(12, 86), school = c(87, 90),
# school_name = c(91, 165), last_name = c(166, 200), first_name = c(201, 235),
# middle_initial = c(236, 236), unique_student_id = c(245, 253), grade = c(254, 255),
# content_area_code = c(258, 260), attempted = c(331, 331), modified_format = c(355, 356),
# school_type = c(573, 573),teacher_of_record_tln = c(369, 388), reason_not_tested = c(567, 568), ri_status = c(569, 570),
# raw_score = c(702, 704), scale_score= c(708, 711), performance_level = c(712, 726),
# scale_score_lb_ci = c(730,732), scale_score_ub_ci = c(727,729), item_response_array=c(747,876)))
#
# stats_2018 <- spring_eoc_2018 %>%
# filter(content_area_code %in% c('ENG', 'MAT', 'A1', 'A2', 'E1', 'E2', 'G1', 'M1', 'M2', 'M3')) %>%
# mutate(missing_score = if_else(is.na(scale_score), 1, 0) ) %>%
# summarise(
# max_score = max(scale_score, na.rm = TRUE),
# min_score = min(scale_score, na.rm = TRUE),
# mean_score = round(mean(scale_score, na.rm = TRUE), 1),
# missing_score = sum(missing_score, na.rm= TRUE),
# n_tests = n(),
# n_systems = n_distinct(system),
# n_schools = n_distinct(school)
# )
#
# stats_2019 <- spring_eoc %>%
# mutate(missing_score = if_else(is.na(scale_score), 1, 0) ) %>%
# summarise(
# max_score = max(scale_score, na.rm = TRUE),
# min_score = min(scale_score, na.rm = TRUE),
# mean_score = round(mean(scale_score, na.rm = TRUE), 1),
# missing_score = sum(missing_score, na.rm= TRUE),
# n_tests = n(),
# n_systems = n_distinct(system),
# n_schools = n_distinct(school)
# )
#
# spring_eoc_2018 %>%
# filter(!school %in% spring_eoc$school) %>%
# distinct(school_name) %>%
# View()
#
# spring_eoc_2018 %>%
# filter(school_name == "Highland Oaks Middle")
#
# spring_eoc %>%
# filter(system == 190) %>%
# distinct(school_name) %>%
# View()
#
# spring_eoc %>%
# filter(school_name == 'HOME SCHOOL') %>%
# View()
|
library(dashBootstrapComponents)
library(dashHtmlComponents)
modal <- htmlDiv(
list(
dbcButton("Open modal", id = "open-dismiss"),
dbcModal(
list(
dbcModalHeader(
dbcModalTitle("Dismissing"),
close_button = FALSE
),
dbcModalBody(
paste(
"This modal has no close button and can't be dismissed by",
"pressing ESC. Try clicking on the backdrop or the below",
"close button."
)
),
dbcModalFooter(dbcButton("Close", id = "close-dismiss"))
),
id = "modal-dismiss",
keyboard = FALSE,
backdrop = "static"
)
)
)
app$callback(
output("modal-dismiss", "is_open"),
list(
input("open-dismiss", "n_clicks"),
input("close-dismiss", "n_clicks"),
state("modal-dismiss", "is_open")
),
function(n_open, n_close, is_open) {
if (n_open > 0 | n_close > 0) {
return(!is_open)
}
return(is_open)
}
)
| /docs/components_page/components/modal/dismiss.R | permissive | tcbegley/dash-bootstrap-components | R | false | false | 980 | r | library(dashBootstrapComponents)
library(dashHtmlComponents)
modal <- htmlDiv(
list(
dbcButton("Open modal", id = "open-dismiss"),
dbcModal(
list(
dbcModalHeader(
dbcModalTitle("Dismissing"),
close_button = FALSE
),
dbcModalBody(
paste(
"This modal has no close button and can't be dismissed by",
"pressing ESC. Try clicking on the backdrop or the below",
"close button."
)
),
dbcModalFooter(dbcButton("Close", id = "close-dismiss"))
),
id = "modal-dismiss",
keyboard = FALSE,
backdrop = "static"
)
)
)
app$callback(
output("modal-dismiss", "is_open"),
list(
input("open-dismiss", "n_clicks"),
input("close-dismiss", "n_clicks"),
state("modal-dismiss", "is_open")
),
function(n_open, n_close, is_open) {
if (n_open > 0 | n_close > 0) {
return(!is_open)
}
return(is_open)
}
)
|
#PAGE=314
a=64.5
b=74.5
c=10
n=100
ux=64
uy=-55
x=a+c*ux/n
y=b+c*uy/n
sx=13.966
sy=14.925
r=0.7686
a1=r*sy/sx
a1=round(a1,3)
a2=r*sx/sy
a2=round(a2,3)
cat('Y -',x,'=',a1,'( x -',x,')')
cat('Y -',y,'=',a2,'( x -',y,')')
| /Schaum'S_Outline_Series_-_Theory_And_Problems_Of_Statistics_by_Murray_R._Spiegel/CH14/EX14.14.24/Ex14_14_24.R | permissive | FOSSEE/R_TBC_Uploads | R | false | false | 241 | r | #PAGE=314
a=64.5
b=74.5
c=10
n=100
ux=64
uy=-55
x=a+c*ux/n
y=b+c*uy/n
sx=13.966
sy=14.925
r=0.7686
a1=r*sy/sx
a1=round(a1,3)
a2=r*sx/sy
a2=round(a2,3)
cat('Y -',x,'=',a1,'( x -',x,')')
cat('Y -',y,'=',a2,'( x -',y,')')
|
# Intrinio API
#
# Welcome to the Intrinio API! Through our Financial Data Marketplace, we offer a wide selection of financial data feed APIs sourced by our own proprietary processes as well as from many data vendors. For a complete API request / response reference please view the [Intrinio API documentation](https://docs.intrinio.com/documentation/api_v2). If you need additional help in using the API, please visit the [Intrinio website](https://intrinio.com) and click on the chat icon in the lower right corner.
#
# OpenAPI spec version: 2.45.0
#
# Generated by: https://github.com/swagger-api/swagger-codegen.git
#' ApiResponseSecurityZacksAnalystRatings Class
#'
#' @field analyst_ratings
#' @field security
#' @field next_page
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
ApiResponseSecurityZacksAnalystRatings <- R6::R6Class(
'ApiResponseSecurityZacksAnalystRatings',
public = list(
`analyst_ratings` = NA,
`analyst_ratings_data_frame` = NULL,
`security` = NA,
`next_page` = NA,
initialize = function(`analyst_ratings`, `security`, `next_page`){
if (!missing(`analyst_ratings`)) {
self$`analyst_ratings` <- `analyst_ratings`
}
if (!missing(`security`)) {
self$`security` <- `security`
}
if (!missing(`next_page`)) {
self$`next_page` <- `next_page`
}
},
toJSON = function() {
ApiResponseSecurityZacksAnalystRatingsObject <- list()
if (!is.null(self$`analyst_ratings`)) {
# If the object is an empty list or a list of R6 Objects
if (is.list(self$`analyst_ratings`) && ((length(self$`analyst_ratings`) == 0) || ((length(self$`analyst_ratings`) != 0 && R6::is.R6(self$`analyst_ratings`[[1]]))))) {
ApiResponseSecurityZacksAnalystRatingsObject[['analyst_ratings']] <- lapply(self$`analyst_ratings`, function(x) x$toJSON())
} else {
ApiResponseSecurityZacksAnalystRatingsObject[['analyst_ratings']] <- jsonlite::toJSON(self$`analyst_ratings`, auto_unbox = TRUE)
}
}
if (!is.null(self$`security`)) {
# If the object is an empty list or a list of R6 Objects
if (is.list(self$`security`) && ((length(self$`security`) == 0) || ((length(self$`security`) != 0 && R6::is.R6(self$`security`[[1]]))))) {
ApiResponseSecurityZacksAnalystRatingsObject[['security']] <- lapply(self$`security`, function(x) x$toJSON())
} else {
ApiResponseSecurityZacksAnalystRatingsObject[['security']] <- jsonlite::toJSON(self$`security`, auto_unbox = TRUE)
}
}
if (!is.null(self$`next_page`)) {
# If the object is an empty list or a list of R6 Objects
if (is.list(self$`next_page`) && ((length(self$`next_page`) == 0) || ((length(self$`next_page`) != 0 && R6::is.R6(self$`next_page`[[1]]))))) {
ApiResponseSecurityZacksAnalystRatingsObject[['next_page']] <- lapply(self$`next_page`, function(x) x$toJSON())
} else {
ApiResponseSecurityZacksAnalystRatingsObject[['next_page']] <- jsonlite::toJSON(self$`next_page`, auto_unbox = TRUE)
}
}
ApiResponseSecurityZacksAnalystRatingsObject
},
fromJSON = function(ApiResponseSecurityZacksAnalystRatingsJson) {
ApiResponseSecurityZacksAnalystRatingsObject <- jsonlite::fromJSON(ApiResponseSecurityZacksAnalystRatingsJson)
if (!is.null(ApiResponseSecurityZacksAnalystRatingsObject$`analyst_ratings`)) {
self$`analyst_ratings` <- ApiResponseSecurityZacksAnalystRatingsObject$`analyst_ratings`
}
if (!is.null(ApiResponseSecurityZacksAnalystRatingsObject$`security`)) {
self$`security` <- ApiResponseSecurityZacksAnalystRatingsObject$`security`
}
if (!is.null(ApiResponseSecurityZacksAnalystRatingsObject$`next_page`)) {
self$`next_page` <- ApiResponseSecurityZacksAnalystRatingsObject$`next_page`
}
},
toJSONString = function() {
jsonlite::toJSON(self$toJSON(), auto_unbox = TRUE, pretty = TRUE)
},
fromJSONString = function(ApiResponseSecurityZacksAnalystRatingsJson) {
ApiResponseSecurityZacksAnalystRatingsObject <- jsonlite::fromJSON(ApiResponseSecurityZacksAnalystRatingsJson, simplifyDataFrame = FALSE)
self$setFromList(ApiResponseSecurityZacksAnalystRatingsObject)
},
setFromList = function(listObject) {
self$`analyst_ratings` <- lapply(listObject$`analyst_ratings`, function(x) {
ZacksAnalystRatingSummaryObject <- ZacksAnalystRatingSummary$new()
ZacksAnalystRatingSummaryObject$setFromList(x)
return(ZacksAnalystRatingSummaryObject)
})
analyst_ratings_list <- lapply(self$`analyst_ratings`, function(x) {
return(x$getAsList())
})
self$`analyst_ratings_data_frame` <- do.call(rbind, lapply(analyst_ratings_list, data.frame))
self$`security` <- SecuritySummary$new()
self$`security`$setFromList(listObject$`security`)
if (!is.null(listObject$`next_page`)) {
self$`next_page` <- listObject$`next_page`
}
else {
self$`next_page` <- NA
}
},
getAsList = function() {
listObject = list()
# listObject[["analyst_ratings"]] <- lapply(self$`analyst_ratings`, function(o) {
# return(o$getAsList())
# })
security_list <- self$`security`$getAsList()
for (x in names(security_list)) {
listObject[[paste("security_",x, sep = "")]] <- self$`security`[[x]]
}
listObject[["next_page"]] <- self$`next_page`
return(listObject)
}
)
)
| /R/ApiResponseSecurityZacksAnalystRatings.r | no_license | intrinio/r-sdk | R | false | false | 5,622 | r | # Intrinio API
#
# Welcome to the Intrinio API! Through our Financial Data Marketplace, we offer a wide selection of financial data feed APIs sourced by our own proprietary processes as well as from many data vendors. For a complete API request / response reference please view the [Intrinio API documentation](https://docs.intrinio.com/documentation/api_v2). If you need additional help in using the API, please visit the [Intrinio website](https://intrinio.com) and click on the chat icon in the lower right corner.
#
# OpenAPI spec version: 2.45.0
#
# Generated by: https://github.com/swagger-api/swagger-codegen.git
#' ApiResponseSecurityZacksAnalystRatings Class
#'
#' @field analyst_ratings
#' @field security
#' @field next_page
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
ApiResponseSecurityZacksAnalystRatings <- R6::R6Class(
'ApiResponseSecurityZacksAnalystRatings',
public = list(
`analyst_ratings` = NA,
`analyst_ratings_data_frame` = NULL,
`security` = NA,
`next_page` = NA,
initialize = function(`analyst_ratings`, `security`, `next_page`){
if (!missing(`analyst_ratings`)) {
self$`analyst_ratings` <- `analyst_ratings`
}
if (!missing(`security`)) {
self$`security` <- `security`
}
if (!missing(`next_page`)) {
self$`next_page` <- `next_page`
}
},
toJSON = function() {
ApiResponseSecurityZacksAnalystRatingsObject <- list()
if (!is.null(self$`analyst_ratings`)) {
# If the object is an empty list or a list of R6 Objects
if (is.list(self$`analyst_ratings`) && ((length(self$`analyst_ratings`) == 0) || ((length(self$`analyst_ratings`) != 0 && R6::is.R6(self$`analyst_ratings`[[1]]))))) {
ApiResponseSecurityZacksAnalystRatingsObject[['analyst_ratings']] <- lapply(self$`analyst_ratings`, function(x) x$toJSON())
} else {
ApiResponseSecurityZacksAnalystRatingsObject[['analyst_ratings']] <- jsonlite::toJSON(self$`analyst_ratings`, auto_unbox = TRUE)
}
}
if (!is.null(self$`security`)) {
# If the object is an empty list or a list of R6 Objects
if (is.list(self$`security`) && ((length(self$`security`) == 0) || ((length(self$`security`) != 0 && R6::is.R6(self$`security`[[1]]))))) {
ApiResponseSecurityZacksAnalystRatingsObject[['security']] <- lapply(self$`security`, function(x) x$toJSON())
} else {
ApiResponseSecurityZacksAnalystRatingsObject[['security']] <- jsonlite::toJSON(self$`security`, auto_unbox = TRUE)
}
}
if (!is.null(self$`next_page`)) {
# If the object is an empty list or a list of R6 Objects
if (is.list(self$`next_page`) && ((length(self$`next_page`) == 0) || ((length(self$`next_page`) != 0 && R6::is.R6(self$`next_page`[[1]]))))) {
ApiResponseSecurityZacksAnalystRatingsObject[['next_page']] <- lapply(self$`next_page`, function(x) x$toJSON())
} else {
ApiResponseSecurityZacksAnalystRatingsObject[['next_page']] <- jsonlite::toJSON(self$`next_page`, auto_unbox = TRUE)
}
}
ApiResponseSecurityZacksAnalystRatingsObject
},
fromJSON = function(ApiResponseSecurityZacksAnalystRatingsJson) {
ApiResponseSecurityZacksAnalystRatingsObject <- jsonlite::fromJSON(ApiResponseSecurityZacksAnalystRatingsJson)
if (!is.null(ApiResponseSecurityZacksAnalystRatingsObject$`analyst_ratings`)) {
self$`analyst_ratings` <- ApiResponseSecurityZacksAnalystRatingsObject$`analyst_ratings`
}
if (!is.null(ApiResponseSecurityZacksAnalystRatingsObject$`security`)) {
self$`security` <- ApiResponseSecurityZacksAnalystRatingsObject$`security`
}
if (!is.null(ApiResponseSecurityZacksAnalystRatingsObject$`next_page`)) {
self$`next_page` <- ApiResponseSecurityZacksAnalystRatingsObject$`next_page`
}
},
toJSONString = function() {
jsonlite::toJSON(self$toJSON(), auto_unbox = TRUE, pretty = TRUE)
},
fromJSONString = function(ApiResponseSecurityZacksAnalystRatingsJson) {
ApiResponseSecurityZacksAnalystRatingsObject <- jsonlite::fromJSON(ApiResponseSecurityZacksAnalystRatingsJson, simplifyDataFrame = FALSE)
self$setFromList(ApiResponseSecurityZacksAnalystRatingsObject)
},
setFromList = function(listObject) {
self$`analyst_ratings` <- lapply(listObject$`analyst_ratings`, function(x) {
ZacksAnalystRatingSummaryObject <- ZacksAnalystRatingSummary$new()
ZacksAnalystRatingSummaryObject$setFromList(x)
return(ZacksAnalystRatingSummaryObject)
})
analyst_ratings_list <- lapply(self$`analyst_ratings`, function(x) {
return(x$getAsList())
})
self$`analyst_ratings_data_frame` <- do.call(rbind, lapply(analyst_ratings_list, data.frame))
self$`security` <- SecuritySummary$new()
self$`security`$setFromList(listObject$`security`)
if (!is.null(listObject$`next_page`)) {
self$`next_page` <- listObject$`next_page`
}
else {
self$`next_page` <- NA
}
},
getAsList = function() {
listObject = list()
# listObject[["analyst_ratings"]] <- lapply(self$`analyst_ratings`, function(o) {
# return(o$getAsList())
# })
security_list <- self$`security`$getAsList()
for (x in names(security_list)) {
listObject[[paste("security_",x, sep = "")]] <- self$`security`[[x]]
}
listObject[["next_page"]] <- self$`next_page`
return(listObject)
}
)
)
|
# --------------------------- #
# Parallel computing in R
# Example 0: Basics
# James Henderson
# --------------------------- #
## load libraries ##
library(parallel)
## load data ##
foo = load('./YaleTNBC.Rdata')
AA = grep('AA',colnames(YaleTNBC))
EA = grep('EA',colnames(YaleTNBC))
#### Example 0: computing a difference in means ####
### Version 1 - for loop ###
t1 = system.time({
fold_change1 = rep(NA,nrow(YaleTNBC))
for(i in 1:nrow(YaleTNBC)){
fold_change1[i] = mean(YaleTNBC[i,AA]) - mean(YaleTNBC[i,EA])
}
})
### Version 2 - an apply function ###
t2 = system.time({
fold_change2 = sapply(1:nrow(YaleTNBC),
function(i){mean(YaleTNBC[i,AA])-mean(YaleTNBC[i,EA])}
)
})
# Version 2a: above we use a functional, here we use an explicit named function.
fc_func = function(i) mean(YaleTNBC[i,AA]) - mean(YaleTNBC[i,EA])
t2a = system.time({
fold_change2a = sapply(1:nrow(YaleTNBC),fc_func)
})
## version 3 - a different apply approach ##
t3 = system.time({
fold_change3 = apply(YaleTNBC[,AA],1,mean) - apply(YaleTNBC[,EA],1,mean)
})
## Compare timings of these options
rbind(t1,t2,t2a,t3)
## Version 4 - parallel approaches ##
t4 = system.time({
fold_change4 = mclapply(1:nrow(YaleTNBC),fc_func)
})
## Results are returned as list
class(fold_change1)
class(fold_change4)
fold_change4 = unlist(fold_change4)
class(fold_change4)
# Check and increase number of cores used #
getOption("mc.cores", 2L)
detectCores()
## Version 5 - More cores ##
t5 = system.time({
fold_change = mclapply(1:nrow(YaleTNBC),fc_func,mc.cores=4)
})
print(rbind(t1,t2,t3,t4,t5))
## An example of parallelism gone wrong ##
#help(mc.preschedule)
t6 = system.time({
fold_change = mclapply(1:nrow(YaleTNBC), fc_func, mc.cores=2, mc.preschedule = FALSE)
})
t6
| /Stats506/Examples/R/parallel/Example0.R | no_license | Irishikitty/STATS506 | R | false | false | 1,891 | r | # --------------------------- #
# Parallel computing in R
# Example 0: Basics
# James Henderson
# --------------------------- #
## load libraries ##
library(parallel)
## load data ##
foo = load('./YaleTNBC.Rdata')
AA = grep('AA',colnames(YaleTNBC))
EA = grep('EA',colnames(YaleTNBC))
#### Example 0: computing a difference in means ####
### Version 1 - for loop ###
t1 = system.time({
fold_change1 = rep(NA,nrow(YaleTNBC))
for(i in 1:nrow(YaleTNBC)){
fold_change1[i] = mean(YaleTNBC[i,AA]) - mean(YaleTNBC[i,EA])
}
})
### Version 2 - an apply function ###
t2 = system.time({
fold_change2 = sapply(1:nrow(YaleTNBC),
function(i){mean(YaleTNBC[i,AA])-mean(YaleTNBC[i,EA])}
)
})
# Version 2a: above we use a functional, here we use an explicit named function.
fc_func = function(i) mean(YaleTNBC[i,AA]) - mean(YaleTNBC[i,EA])
t2a = system.time({
fold_change2a = sapply(1:nrow(YaleTNBC),fc_func)
})
## version 3 - a different apply approach ##
t3 = system.time({
fold_change3 = apply(YaleTNBC[,AA],1,mean) - apply(YaleTNBC[,EA],1,mean)
})
## Compare timings of these options
rbind(t1,t2,t2a,t3)
## Version 4 - parallel approaches ##
t4 = system.time({
fold_change4 = mclapply(1:nrow(YaleTNBC),fc_func)
})
## Results are returned as list
class(fold_change1)
class(fold_change4)
fold_change4 = unlist(fold_change4)
class(fold_change4)
# Check and increase number of cores used #
getOption("mc.cores", 2L)
detectCores()
## Version 5 - More cores ##
t5 = system.time({
fold_change = mclapply(1:nrow(YaleTNBC),fc_func,mc.cores=4)
})
print(rbind(t1,t2,t3,t4,t5))
## An example of parallelism gone wrong ##
#help(mc.preschedule)
t6 = system.time({
fold_change = mclapply(1:nrow(YaleTNBC), fc_func, mc.cores=2, mc.preschedule = FALSE)
})
t6
|
library(AppliedPredictiveModeling)
library(caret)
library(ggplot2)
library(rpart)
library(MASS)
library(Hmisc)
library(psych)
library(klaR)
library(rattle)
tdata <- read.csv("pml-training.csv", na.strings = c("NA", "", "#DIV/0!"))
testdata <- read.csv("pml-testing.csv", na.strings = c("NA", "", "#DIV/0!"))
str(tdata)
##cleaning training data
nzv_cols <- nearZeroVar(tdata)
if(length(nzv_cols) > 0) tdata <- tdata [, -nzv_cols]
trainingclean <- tdata[, colSums(is.na(tdata)) < nrow(tdata) * 0.95]
trainingclean <- trainingclean[,-(1:6)]
str(trainingclean)
##cleaning testing data
nzvtest_cols <- nearZeroVar(testdata)
if(length(nzvtest_cols) > 0) testdata <- testdata [, -nzvtest_cols]
testclean <- testdata[, colSums(is.na(testdata)) < nrow(testdata) * 0.95]
testclean <- testclean[,-(1:6)]
str(testclean)
inTrain <- createDataPartition( y = trainingclean$classe, p = 0.7, list = FALSE)
training <- trainingclean[inTrain, ]
testing <- trainingclean[-inTrain,]
dim(training)
dim(testing)
## Random Forest
set.seed(82)
modrf <- train(classe~., data = training, method = "rf", prox = TRUE)
predrf <- predict(modrf, newdata = testing)
confusionMatrix(testing$classe, predrf)
varImp(modrf)
predict(modrf, newdata=testclean)
## Out of the sample error rate
1-sum(predrf == testing$classe) / length(predrf)
modrfred <- train(classe~roll_belt+pitch_forearm+yaw_belt+magnet_dumbbell_z+pitch_belt +
magnet_dumbbell_y+roll_forearm, data = training, method="rf", prox=TRUE)
summary(modrfred)
predrfred <- predict(modrfred, newdata = testing)
confusionMatrix(testing$classe, predrfred)
varImp(modrfred)
predict(modrfred, newdata=testclean)
## Out of sample error
1-sum(predrfred == testing$classe) / length(predrfred)
## rpart
set.seed(125)
moddt <- rpart ( classe ~ ., method = "class", data = training)
fancyRpartPlot(moddt, main = "Decission Tree")
preddt <- predict(moddt, newdata = testing, type = "class")
confusionMatrix(testing$classe, data = preddt)
1-sum(preddt == testing$classe) / length(preddt)
| /Machine learning assignment code.R | no_license | ikotermin/PML-Assignment | R | false | false | 2,044 | r | library(AppliedPredictiveModeling)
library(caret)
library(ggplot2)
library(rpart)
library(MASS)
library(Hmisc)
library(psych)
library(klaR)
library(rattle)
tdata <- read.csv("pml-training.csv", na.strings = c("NA", "", "#DIV/0!"))
testdata <- read.csv("pml-testing.csv", na.strings = c("NA", "", "#DIV/0!"))
str(tdata)
##cleaning training data
nzv_cols <- nearZeroVar(tdata)
if(length(nzv_cols) > 0) tdata <- tdata [, -nzv_cols]
trainingclean <- tdata[, colSums(is.na(tdata)) < nrow(tdata) * 0.95]
trainingclean <- trainingclean[,-(1:6)]
str(trainingclean)
##cleaning testing data
nzvtest_cols <- nearZeroVar(testdata)
if(length(nzvtest_cols) > 0) testdata <- testdata [, -nzvtest_cols]
testclean <- testdata[, colSums(is.na(testdata)) < nrow(testdata) * 0.95]
testclean <- testclean[,-(1:6)]
str(testclean)
inTrain <- createDataPartition( y = trainingclean$classe, p = 0.7, list = FALSE)
training <- trainingclean[inTrain, ]
testing <- trainingclean[-inTrain,]
dim(training)
dim(testing)
## Random Forest
set.seed(82)
modrf <- train(classe~., data = training, method = "rf", prox = TRUE)
predrf <- predict(modrf, newdata = testing)
confusionMatrix(testing$classe, predrf)
varImp(modrf)
predict(modrf, newdata=testclean)
## Out of the sample error rate
1-sum(predrf == testing$classe) / length(predrf)
modrfred <- train(classe~roll_belt+pitch_forearm+yaw_belt+magnet_dumbbell_z+pitch_belt +
magnet_dumbbell_y+roll_forearm, data = training, method="rf", prox=TRUE)
summary(modrfred)
predrfred <- predict(modrfred, newdata = testing)
confusionMatrix(testing$classe, predrfred)
varImp(modrfred)
predict(modrfred, newdata=testclean)
## Out of sample error
1-sum(predrfred == testing$classe) / length(predrfred)
## rpart
set.seed(125)
moddt <- rpart ( classe ~ ., method = "class", data = training)
fancyRpartPlot(moddt, main = "Decission Tree")
preddt <- predict(moddt, newdata = testing, type = "class")
confusionMatrix(testing$classe, data = preddt)
1-sum(preddt == testing$classe) / length(preddt)
|
`CROSSL` <-
function(A1, A2)
{
if(!is.numeric(A1$x)) A1 = tocartL(A1)
if(!is.numeric(A2$x)) A2 = tocartL(A2)
x = A1$y*A2$z-A1$z*A2$y
y = A1$z*A2$x-A1$x*A2$z
z = A1$x*A2$y-A2$x*A1$y
a = TOSPHERE(x, y, z)
return(list(x=x, y=y, z=z, az=a$az, dip=a$dip))
}
| /R/CROSSL.R | no_license | cran/RFOC | R | false | false | 274 | r | `CROSSL` <-
function(A1, A2)
{
if(!is.numeric(A1$x)) A1 = tocartL(A1)
if(!is.numeric(A2$x)) A2 = tocartL(A2)
x = A1$y*A2$z-A1$z*A2$y
y = A1$z*A2$x-A1$x*A2$z
z = A1$x*A2$y-A2$x*A1$y
a = TOSPHERE(x, y, z)
return(list(x=x, y=y, z=z, az=a$az, dip=a$dip))
}
|
testlist <- list(n = 1195706880L)
result <- do.call(breakfast:::setBitNumber,testlist)
str(result) | /breakfast/inst/testfiles/setBitNumber/libFuzzer_setBitNumber/setBitNumber_valgrind_files/1609962399-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 98 | r | testlist <- list(n = 1195706880L)
result <- do.call(breakfast:::setBitNumber,testlist)
str(result) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ngramComponents.R
\name{textformat}
\alias{textformat}
\title{Text Formatter}
\usage{
textformat(text, punct = FALSE)
}
\arguments{
\item{text}{character Vector of strings to clean.}
\item{punct}{logical Should punctuation be kept as tokens? Default is TRUE.}
}
\value{
character Vector of cleaned strings.
}
\description{
background function to load.
}
\keyword{internal}
| /man/textformat.Rd | no_license | kbenoit/doc2concrete | R | false | true | 452 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ngramComponents.R
\name{textformat}
\alias{textformat}
\title{Text Formatter}
\usage{
textformat(text, punct = FALSE)
}
\arguments{
\item{text}{character Vector of strings to clean.}
\item{punct}{logical Should punctuation be kept as tokens? Default is TRUE.}
}
\value{
character Vector of cleaned strings.
}
\description{
background function to load.
}
\keyword{internal}
|
rm(list = ls())
setwd("I:/SMU/MSDS 6306 Doing Data Science/Lecture Assignments/Live Session Unit 11 Assignment")
setwd("J:/SMU/MSDS 6306 Doing Data Science/Lecture Assignments/Live Session Unit 11 Assignment")
setwd("P:/SMU/MSDS 6306 Doing Data Science/Lecture Assignments/Live Session Unit 11 Assignment")
library(fpp)
library(fpp2)
library(dplyr)
library("dygraphs")
library("forecast")
library("xts")
# Read EuStockMarkets data 1991 - 1999
EUdata <- window(EuStockMarkets)
# Diagnostics
str(EUdata)
head(EUdata)
# Extract DAX data only
DAXdata <- EUdata[,1]
# Diagnostics
str(DAXdata)
head(DAXdata)
# Plot time series data
plot(DAXdata, ylab="Closing Price", xlab="Year", main="DAX Index (1991-1999)", col = "blue")
abline(v=1997, col="red")
# Decompose components using multiplicative methods
DAXdataComp <- decompose(DAXdata, type = c("multiplicative"))
plot(DAXdataComp, col="blue")
abline(v=1997, col="red")
# 2.a. Temperature data ==========================================================================
?maxtemp
# Maximum annual temperatures (degrees Celsius) for Moorabbin Airport, Melbourne. 1971-2016.
# Annual time series of class ts.
# Examples
autoplot(maxtemp)
# 2.b. =========================================================================================
maxtemp_1990_2016 <- window(maxtemp,start=1990,end=2016)
#utils::View(maxtemp)
# 2.c. ============================================================================================
# SES analysis
# exponential smoothing
# simple means start with first sample, h=5 to predict out 5 years
maxtemp_ses_1990_2016_5y <- ses(maxtemp_1990_2016, alpha=0.8,beta=0.2,h=5)
# Predicted Values
plot(maxtemp_ses_1990_2016_5y, ylab="Temperature", xlab="Year", fcol="white", type="o", col="blue", main="Maximum Temperature Forecasts from Simple Exponential Smoothing")
# Fitted Values
fit_maxtemp_ses_1990_2016_5y <- fitted(maxtemp_ses_1990_2016_5y)
lines(fit_maxtemp_ses_1990_2016_5y, col="red", type="o")
# Forecasted Values
lines(maxtemp_ses_1990_2016_5y$mean, col="green", type="o")
# plot datelime from 2016
abline(v=2016, col="red")
legend("topleft", lty=1, col=c("blue", "red", "green"), c("Predicted", "Fitted", "5 Year Forecast"), pch=1)
# AICc of this ses model: 148.3759
maxtemp_ses_1990_2016_5y$model
# 2.d. ==============================================================================
# Holt analysis
# Damped linear trend
# optimal means the initial values are optimized along with the smoothing parameters, h=5 to predict out 5 years
maxtemp_holt_1990_2016 <- holt(maxtemp_1990_2016, alpha=0.8, beta=0.2, damped=TRUE, initial='optimal',main="Maximum Temperature Forecasts from Holt's Damped Smoothing", h=5)
# Predicted Values
plot(maxtemp_holt_1990_2016, ylab="Temperature", xlab="Year", fcol="white", type="o", col="blue")
# Fitted Values
fit_holt_1990_2016 <- fitted(maxtemp_holt_1990_2016)
lines(fit_holt_1990_2016, col="red", type="o")
# The Forecasted Values
lines(maxtemp_holt_1990_2016$mean, col="green", type="o")
legend("topleft", lty=1, col=c("blue", "red", "green"), c("Predicted", "Fitted", "5 Year Forecast"), pch=1)
# AICc of this holt model: 157.9802
maxtemp_holt_1990_2016$model
# 2.e. ============================================================================================================
# AICc of this ses model: 148.3759
# AICc of this holt model: 157.9802
## Based on the Wikipedia entry on AICc, the one with the smaller value is the better fitting model so the ses model is the best of the two cases.
# 3.a ==================================================================================================
# Read in csv datasets for Ollivander and Gregorovitch
# Rename columns
Ollivander <- read.csv("./Unit11TimeSeries_Ollivander.csv", header=FALSE)
Ollivander <- dplyr::rename(Ollivander, "Year" = V1, "Wands" = V2)
Gregorovitch <- read.csv("./Unit11TimeSeries_Gregorovitch.csv", header=FALSE)
Gregorovitch <- dplyr::rename(Gregorovitch, "Year" = V1, "Wands" = V2)
# 3.b ==================================================================================================
# Convert "Year" to Date class variable for Ollivander and Gregorovitch
Ollivander$Year <- as.Date(Ollivander$Year,"%m/%d/%Y")
class(Ollivander$Year)
Gregorovitch$Year <- as.Date(Gregorovitch$Year,"%m/%d/%Y")
class(Gregorovitch$Year)
# 3.c ==================================================================================================
# Convert Ollivander and Gregorovitch data frames into xts (time series) objects
OTS <- as.xts(Ollivander, order.by = Ollivander$Year)
OTS <- xts(Ollivander$Wands, order.by=Ollivander$Year)
utils::View(OTS)
str(OTS)
class(OTS)
OTS
GTS <- as.xts(Gregorovitch, order.by = Gregorovitch$Year)
GTS <- xts(Gregorovitch$Wands, order.by=Gregorovitch$Year)
utils::View(GTS)
str(GTS)
class(GTS)
GTS
# 3.d ==================================================================================================
# Bind both xts (time series) objects
wandTS <-cbind(OTS, GTS)
colnames(wandTS) <- c("Ollivander", "Gregorovitch")
#wandTS <- merge(OTS, GTS, join = "right", fill = 9999)
#colnames(wandTS) <- c("Ollivander", "Gregorovitch")
#wandTS <- sapply(wandTS, as.numeric)
utils::View(wandTS)
str(wandTS)
#dygraphs
dygraph(wandTS, main="Ollivander vs. Gregorovitch Wand Sales", ylab="Wands", xlab="Year") %>%
dyOptions(rightGap=20) %>%
dyLegend(width=291) %>%
dyAxis('y', rangePad=10) %>%
dySeries(name = "Ollivander", label = "Ollivander", color = "red") %>%
dySeries(name = "Gregorovitch", label = "Gregorovitch", color = "darkgreen") %>%
dyRangeSelector(height=100, fillColor = "purple") %>%
dyShading(from = "1995-1-1", to = "1999-1-1", color = "#87f89b") %>%
dyHighlight(highlightCircleSize = 5,
highlightSeriesBackgroundAlpha = 0.5,
hideOnMouseOut = FALSE) %>%
dyEvent("1995-1-1", "Voldemort Rises", labelLoc = "top") %>%
dyEvent("1999-1-1", "Voldemort Defeated", labelLoc = "top")
# ================================================================================
# sapply(wandTS, as.numeric)
| /LiveSession11/LectureAssignment11.R | no_license | lp5510/Homework | R | false | false | 6,287 | r | rm(list = ls())
setwd("I:/SMU/MSDS 6306 Doing Data Science/Lecture Assignments/Live Session Unit 11 Assignment")
setwd("J:/SMU/MSDS 6306 Doing Data Science/Lecture Assignments/Live Session Unit 11 Assignment")
setwd("P:/SMU/MSDS 6306 Doing Data Science/Lecture Assignments/Live Session Unit 11 Assignment")
library(fpp)
library(fpp2)
library(dplyr)
library("dygraphs")
library("forecast")
library("xts")
# Read EuStockMarkets data 1991 - 1999
EUdata <- window(EuStockMarkets)
# Diagnostics
str(EUdata)
head(EUdata)
# Extract DAX data only
DAXdata <- EUdata[,1]
# Diagnostics
str(DAXdata)
head(DAXdata)
# Plot time series data
plot(DAXdata, ylab="Closing Price", xlab="Year", main="DAX Index (1991-1999)", col = "blue")
abline(v=1997, col="red")
# Decompose components using multiplicative methods
DAXdataComp <- decompose(DAXdata, type = c("multiplicative"))
plot(DAXdataComp, col="blue")
abline(v=1997, col="red")
# 2.a. Temperature data ==========================================================================
?maxtemp
# Maximum annual temperatures (degrees Celsius) for Moorabbin Airport, Melbourne. 1971-2016.
# Annual time series of class ts.
# Examples
autoplot(maxtemp)
# 2.b. =========================================================================================
maxtemp_1990_2016 <- window(maxtemp,start=1990,end=2016)
#utils::View(maxtemp)
# 2.c. ============================================================================================
# SES analysis
# exponential smoothing
# simple means start with first sample, h=5 to predict out 5 years
maxtemp_ses_1990_2016_5y <- ses(maxtemp_1990_2016, alpha=0.8,beta=0.2,h=5)
# Predicted Values
plot(maxtemp_ses_1990_2016_5y, ylab="Temperature", xlab="Year", fcol="white", type="o", col="blue", main="Maximum Temperature Forecasts from Simple Exponential Smoothing")
# Fitted Values
fit_maxtemp_ses_1990_2016_5y <- fitted(maxtemp_ses_1990_2016_5y)
lines(fit_maxtemp_ses_1990_2016_5y, col="red", type="o")
# Forecasted Values
lines(maxtemp_ses_1990_2016_5y$mean, col="green", type="o")
# plot datelime from 2016
abline(v=2016, col="red")
legend("topleft", lty=1, col=c("blue", "red", "green"), c("Predicted", "Fitted", "5 Year Forecast"), pch=1)
# AICc of this ses model: 148.3759
maxtemp_ses_1990_2016_5y$model
# 2.d. ==============================================================================
# Holt analysis
# Damped linear trend
# optimal means the initial values are optimized along with the smoothing parameters, h=5 to predict out 5 years
maxtemp_holt_1990_2016 <- holt(maxtemp_1990_2016, alpha=0.8, beta=0.2, damped=TRUE, initial='optimal',main="Maximum Temperature Forecasts from Holt's Damped Smoothing", h=5)
# Predicted Values
plot(maxtemp_holt_1990_2016, ylab="Temperature", xlab="Year", fcol="white", type="o", col="blue")
# Fitted Values
fit_holt_1990_2016 <- fitted(maxtemp_holt_1990_2016)
lines(fit_holt_1990_2016, col="red", type="o")
# The Forecasted Values
lines(maxtemp_holt_1990_2016$mean, col="green", type="o")
legend("topleft", lty=1, col=c("blue", "red", "green"), c("Predicted", "Fitted", "5 Year Forecast"), pch=1)
# AICc of this holt model: 157.9802
maxtemp_holt_1990_2016$model
# 2.e. ============================================================================================================
# AICc of this ses model: 148.3759
# AICc of this holt model: 157.9802
## Based on the Wikipedia entry on AICc, the one with the smaller value is the better fitting model so the ses model is the best of the two cases.
# 3.a ==================================================================================================
# Read in csv datasets for Ollivander and Gregorovitch
# Rename columns
Ollivander <- read.csv("./Unit11TimeSeries_Ollivander.csv", header=FALSE)
Ollivander <- dplyr::rename(Ollivander, "Year" = V1, "Wands" = V2)
Gregorovitch <- read.csv("./Unit11TimeSeries_Gregorovitch.csv", header=FALSE)
Gregorovitch <- dplyr::rename(Gregorovitch, "Year" = V1, "Wands" = V2)
# 3.b ==================================================================================================
# Convert "Year" to Date class variable for Ollivander and Gregorovitch
Ollivander$Year <- as.Date(Ollivander$Year,"%m/%d/%Y")
class(Ollivander$Year)
Gregorovitch$Year <- as.Date(Gregorovitch$Year,"%m/%d/%Y")
class(Gregorovitch$Year)
# 3.c ==================================================================================================
# Convert Ollivander and Gregorovitch data frames into xts (time series) objects
OTS <- as.xts(Ollivander, order.by = Ollivander$Year)
OTS <- xts(Ollivander$Wands, order.by=Ollivander$Year)
utils::View(OTS)
str(OTS)
class(OTS)
OTS
GTS <- as.xts(Gregorovitch, order.by = Gregorovitch$Year)
GTS <- xts(Gregorovitch$Wands, order.by=Gregorovitch$Year)
utils::View(GTS)
str(GTS)
class(GTS)
GTS
# 3.d ==================================================================================================
# Bind both xts (time series) objects
wandTS <-cbind(OTS, GTS)
colnames(wandTS) <- c("Ollivander", "Gregorovitch")
#wandTS <- merge(OTS, GTS, join = "right", fill = 9999)
#colnames(wandTS) <- c("Ollivander", "Gregorovitch")
#wandTS <- sapply(wandTS, as.numeric)
utils::View(wandTS)
str(wandTS)
#dygraphs
dygraph(wandTS, main="Ollivander vs. Gregorovitch Wand Sales", ylab="Wands", xlab="Year") %>%
dyOptions(rightGap=20) %>%
dyLegend(width=291) %>%
dyAxis('y', rangePad=10) %>%
dySeries(name = "Ollivander", label = "Ollivander", color = "red") %>%
dySeries(name = "Gregorovitch", label = "Gregorovitch", color = "darkgreen") %>%
dyRangeSelector(height=100, fillColor = "purple") %>%
dyShading(from = "1995-1-1", to = "1999-1-1", color = "#87f89b") %>%
dyHighlight(highlightCircleSize = 5,
highlightSeriesBackgroundAlpha = 0.5,
hideOnMouseOut = FALSE) %>%
dyEvent("1995-1-1", "Voldemort Rises", labelLoc = "top") %>%
dyEvent("1999-1-1", "Voldemort Defeated", labelLoc = "top")
# ================================================================================
# sapply(wandTS, as.numeric)
|
topological_sort <- function(dag) {
Rfast::topological_sort(dag)
}
# topological_sort <- function(dag) {
# ## TOPOLOGICAL_SORT Return the nodes in topological order (parents before children).
# ## order = topological_sort(adj_mat)
# ## Base Code is from BNT
# n = dim(dag)[1];
# indeg = numeric(n)
# zero_indeg = NULL ## a stack of nodes with no parents
# ## for ( i in 1:n ) {
# ## indeg[i] = length( which(dag[, i] > 0) ) ## parents(A, i)
# ## if ( indeg[i] == 0 ) zero_indeg = c(i, zero_indeg)
# ## }
# indeg = Rfast::colsums(dag) ## parents(A, i)
# for ( i in 1:n ) if ( indeg[i] == 0 ) zero_indeg = c(i, zero_indeg)
# i = 1
# ord = numeric(n)
# while ( !is.null(zero_indeg) & i <= n ) {
# v = zero_indeg[1] ## pop v
# zero_indeg = zero_indeg[-1]
# ord[i] = v
# i = i + 1;
# cs = which(dag[v, ] > 0) ## children(A, v)
# if ( length(cs) > 0 ) {
# for ( j in 1:length(cs) ) {
# m = cs[j]
# indeg[m] = indeg[m] - 1
# if ( indeg[m] == 0 ) {
# zero_indeg = c(m, zero_indeg) ## push m
# }
# }
#
# } ## end if ( length(cs) > 0 )
#
# } ## end while ( !is.null(zero_indeg) & i <= n )
#
# ord
# }
| /R/topological_sort.R | no_license | cran/MXM | R | false | false | 1,304 | r | topological_sort <- function(dag) {
Rfast::topological_sort(dag)
}
# topological_sort <- function(dag) {
# ## TOPOLOGICAL_SORT Return the nodes in topological order (parents before children).
# ## order = topological_sort(adj_mat)
# ## Base Code is from BNT
# n = dim(dag)[1];
# indeg = numeric(n)
# zero_indeg = NULL ## a stack of nodes with no parents
# ## for ( i in 1:n ) {
# ## indeg[i] = length( which(dag[, i] > 0) ) ## parents(A, i)
# ## if ( indeg[i] == 0 ) zero_indeg = c(i, zero_indeg)
# ## }
# indeg = Rfast::colsums(dag) ## parents(A, i)
# for ( i in 1:n ) if ( indeg[i] == 0 ) zero_indeg = c(i, zero_indeg)
# i = 1
# ord = numeric(n)
# while ( !is.null(zero_indeg) & i <= n ) {
# v = zero_indeg[1] ## pop v
# zero_indeg = zero_indeg[-1]
# ord[i] = v
# i = i + 1;
# cs = which(dag[v, ] > 0) ## children(A, v)
# if ( length(cs) > 0 ) {
# for ( j in 1:length(cs) ) {
# m = cs[j]
# indeg[m] = indeg[m] - 1
# if ( indeg[m] == 0 ) {
# zero_indeg = c(m, zero_indeg) ## push m
# }
# }
#
# } ## end if ( length(cs) > 0 )
#
# } ## end while ( !is.null(zero_indeg) & i <= n )
#
# ord
# }
|
# Build Neural Network for Multi-Class classification using neuralnet library.
rm(list=ls(all=TRUE))
# Set the working directory
setwd("~/Desktop/GNQ3/20160218_Batch25_CSE7405c_NeuralNetLab")
# Importing "all_features.txt" files's data into R dataframe using read.table function
data<-read.table("all_features.txt", sep="", header=F)
# Understand the structure the summary of the data using str and summary R commands
str(data)
summary(data)
# Separate Target Variable and Independent Variables.
# In this case First variable is target variable and remaining are independent variables
target_Variable = data[, 1]
independent_Variables = data[,2:73]
# Standardization the independent variables using decostand funcion in vegan R library
library(vegan)
# Note: To standardize the data using 'Range' method
independent_Variables = decostand(independent_Variables, "range")
# Recombine Target and independent variables
data = data.frame(independent_Variables, target = target_Variable)
rm(independent_Variables, target_Variable)
# Convert class/target variable as factor
data$target = as.factor(data$target)
# compute mean, min, max, and median number records for each level in the target attribute
target_Var_Dist = data.frame(table(data$target))
mean(target_Var_Dist$Freq)
min(target_Var_Dist$Freq)
max(target_Var_Dist$Freq)
median(target_Var_Dist$Freq)
#-----------------------------------------------
#Splitting as Train & Test sets
trainID = sample(1:nrow(data),(nrow(data)*0.6))
train_Data = data[trainID,]
test_Data = data[-trainID,]
rm(trainID)
#Executing NN on Train Data
library(nnet)
nn = nnet(target ~ ., data = train_Data,
size = 8, rang = 0.1,
decay = 5e-4, maxit = 100)
#Validating the results on test data
pred = predict(nn, test_Data, type = "class")
sort(as.numeric(unique(pred)))
conf_Matrix = table(pred, test_Data$target)
conf_Matrix = conf_Matrix[order(as.numeric(rownames(conf_Matrix))), ]
#Accuracy of model
sum(diag(conf_Matrix))/sum(conf_Matrix)*100
#-----------------------------------------------
# In this case median is more appropriate, so take median number of samples from each level of the target attribute
min_Num_Train_Records = as.integer(median(target_Var_Dist$Freq))
train_RowNames = c();
test_RowNames = c();
#Selecing train samples randomly for each class and also making sure to have equal sample in each class
makeTrainCountsEqualForEachClass = 1;
for (i in target_Var_Dist$Var1){
subset_RowNames = rownames(data[which(data$target==i),])
subset_Count = target_Var_Dist[target_Var_Dist$Var1==i, "Freq"]
subset_Train_Count = round(subset_Count * 0.8, digits = 0)
set.seed(123);
subset_RowNames_Train = sample(subset_RowNames, subset_Train_Count)
subset_RowNames_Test = setdiff(subset_RowNames, subset_RowNames_Train)
if (subset_Train_Count == 0){
subset_RowNames_Train = subset_RowNames;
subset_Train_Count = subset_Count;
}
if (length(subset_RowNames_Test) == 0){
subset_RowNames_Test = subset_RowNames;
}
if (subset_Train_Count < min_Num_Train_Records &&
makeTrainCountsEqualForEachClass == 1){
cat("\nEnsuring all classes have similar training data sizes");
# If you DO NOT have enough samples to be selected for training.
# repeat the selectedRowNamesTrain.
num_Times_Repeat = round(min_Num_Train_Records/subset_Train_Count)
subset_RowNames_Train = rep.int(subset_RowNames_Train, num_Times_Repeat);
subset_RowNames_Test = rep.int(subset_RowNames_Test, num_Times_Repeat);
}
cat("\nTrain data: ", length(subset_RowNames_Train), ", Test data:", length(subset_RowNames_Test))
train_RowNames = c(train_RowNames, as.numeric(subset_RowNames_Train))
test_RowNames = c(test_RowNames, as.numeric(subset_RowNames_Test))
}
rm(i, num_Times_Repeat, subset_Count, subset_Train_Count,
subset_RowNames, subset_RowNames_Test, subset_RowNames_Train,
subset_Train_Counttarget_Var_Dist, makeTrainCountsEqualForEachClass )
# Make sure all the training data is presented randomly.
train_RowNames = sample(train_RowNames);
length(train_RowNames)
length(unique(data$target)) * min_Num_Train_Records
rm(min_Num_Train_Records)
#Splitting as Train & Test sets
train_Data = data[train_RowNames,]
test_Data = data[test_RowNames,]
rm(train_RowNames, test_RowNames)
#Executing NN on Train Data
library(nnet)
nn = nnet(target ~ ., data = train_Data, size = 8, rang = 0.1, decay = 5e-4, maxit = 100)
#Validating the results on test data
pred = predict(nn, test_Data, type = "class")
sort(as.numeric(unique(pred)))
conf_Matrix = table(pred, test_Data$target)
conf_Matrix = conf_Matrix[order(as.numeric(rownames(conf_Matrix))), ]
#Accuracy of model
sum(diag(conf_Matrix))/sum(conf_Matrix)*100
| /04_nnet_Multi-Class.R | no_license | nursnaaz/NeuralNetwork | R | false | false | 4,798 | r | # Build Neural Network for Multi-Class classification using neuralnet library.
rm(list=ls(all=TRUE))
# Set the working directory
setwd("~/Desktop/GNQ3/20160218_Batch25_CSE7405c_NeuralNetLab")
# Importing "all_features.txt" files's data into R dataframe using read.table function
data<-read.table("all_features.txt", sep="", header=F)
# Understand the structure the summary of the data using str and summary R commands
str(data)
summary(data)
# Separate Target Variable and Independent Variables.
# In this case First variable is target variable and remaining are independent variables
target_Variable = data[, 1]
independent_Variables = data[,2:73]
# Standardization the independent variables using decostand funcion in vegan R library
library(vegan)
# Note: To standardize the data using 'Range' method
independent_Variables = decostand(independent_Variables, "range")
# Recombine Target and independent variables
data = data.frame(independent_Variables, target = target_Variable)
rm(independent_Variables, target_Variable)
# Convert class/target variable as factor
data$target = as.factor(data$target)
# compute mean, min, max, and median number records for each level in the target attribute
target_Var_Dist = data.frame(table(data$target))
mean(target_Var_Dist$Freq)
min(target_Var_Dist$Freq)
max(target_Var_Dist$Freq)
median(target_Var_Dist$Freq)
#-----------------------------------------------
#Splitting as Train & Test sets
trainID = sample(1:nrow(data),(nrow(data)*0.6))
train_Data = data[trainID,]
test_Data = data[-trainID,]
rm(trainID)
#Executing NN on Train Data
library(nnet)
nn = nnet(target ~ ., data = train_Data,
size = 8, rang = 0.1,
decay = 5e-4, maxit = 100)
#Validating the results on test data
pred = predict(nn, test_Data, type = "class")
sort(as.numeric(unique(pred)))
conf_Matrix = table(pred, test_Data$target)
conf_Matrix = conf_Matrix[order(as.numeric(rownames(conf_Matrix))), ]
#Accuracy of model
sum(diag(conf_Matrix))/sum(conf_Matrix)*100
#-----------------------------------------------
# In this case median is more appropriate, so take median number of samples from each level of the target attribute
min_Num_Train_Records = as.integer(median(target_Var_Dist$Freq))
train_RowNames = c();
test_RowNames = c();
#Selecing train samples randomly for each class and also making sure to have equal sample in each class
makeTrainCountsEqualForEachClass = 1;
for (i in target_Var_Dist$Var1){
subset_RowNames = rownames(data[which(data$target==i),])
subset_Count = target_Var_Dist[target_Var_Dist$Var1==i, "Freq"]
subset_Train_Count = round(subset_Count * 0.8, digits = 0)
set.seed(123);
subset_RowNames_Train = sample(subset_RowNames, subset_Train_Count)
subset_RowNames_Test = setdiff(subset_RowNames, subset_RowNames_Train)
if (subset_Train_Count == 0){
subset_RowNames_Train = subset_RowNames;
subset_Train_Count = subset_Count;
}
if (length(subset_RowNames_Test) == 0){
subset_RowNames_Test = subset_RowNames;
}
if (subset_Train_Count < min_Num_Train_Records &&
makeTrainCountsEqualForEachClass == 1){
cat("\nEnsuring all classes have similar training data sizes");
# If you DO NOT have enough samples to be selected for training.
# repeat the selectedRowNamesTrain.
num_Times_Repeat = round(min_Num_Train_Records/subset_Train_Count)
subset_RowNames_Train = rep.int(subset_RowNames_Train, num_Times_Repeat);
subset_RowNames_Test = rep.int(subset_RowNames_Test, num_Times_Repeat);
}
cat("\nTrain data: ", length(subset_RowNames_Train), ", Test data:", length(subset_RowNames_Test))
train_RowNames = c(train_RowNames, as.numeric(subset_RowNames_Train))
test_RowNames = c(test_RowNames, as.numeric(subset_RowNames_Test))
}
rm(i, num_Times_Repeat, subset_Count, subset_Train_Count,
subset_RowNames, subset_RowNames_Test, subset_RowNames_Train,
subset_Train_Counttarget_Var_Dist, makeTrainCountsEqualForEachClass )
# Make sure all the training data is presented randomly.
train_RowNames = sample(train_RowNames);
length(train_RowNames)
length(unique(data$target)) * min_Num_Train_Records
rm(min_Num_Train_Records)
#Splitting as Train & Test sets
train_Data = data[train_RowNames,]
test_Data = data[test_RowNames,]
rm(train_RowNames, test_RowNames)
#Executing NN on Train Data
library(nnet)
nn = nnet(target ~ ., data = train_Data, size = 8, rang = 0.1, decay = 5e-4, maxit = 100)
#Validating the results on test data
pred = predict(nn, test_Data, type = "class")
sort(as.numeric(unique(pred)))
conf_Matrix = table(pred, test_Data$target)
conf_Matrix = conf_Matrix[order(as.numeric(rownames(conf_Matrix))), ]
#Accuracy of model
sum(diag(conf_Matrix))/sum(conf_Matrix)*100
|
data_nova_scotia = function( output="stan_data", Npop=971395, Npreds=5, interpolate_missing_data=FALSE, ... ) {
# install.packages("googlesheets4")
library(googlesheets4)
gsdata = read_sheet( "https://docs.google.com/spreadsheets/d/1tgf2H9gDmRnGDGeQE-fC9IPhrmNxP8-JC7Nnnob_vuY/edit?usp=sharing" )
gsdata$Iobs = gsdata$InfectedCurrently
gsdata$Robs = gsdata$Recoveries + gsdata$Deaths # note: recovered = deaths+recoveries
gsdata$Sobs = Npop - gsdata$Robs - gsdata$Iobs
gsdata$Mobs = gsdata$Deaths # mortalities .. this is redundant here but in some models, recoveries is split apart from mortalities and so useful
gsdata$dayno = lubridate::date( gsdata$Date)
gsdata$dayno = gsdata$dayno - min(gsdata$dayno) + 1
time_start = min( gsdata$Date )
time_distancing = min( gsdata$Date[ which( grepl("socialdistancing", gsdata$ManagementMeasures )) ] )
time_relaxation = min( gsdata$Date[ which( grepl("open parks", gsdata$ManagementMeasures )) ] )
if (output=="raw_data") return (gsdata)
daily = expand.grid( dayno=1:max(gsdata$dayno ))
daily[, c("Iobs", "Sobs", "Robs", "Mobs")] = NA
i = match( gsdata$dayno, daily$dayno )
daily[i, c("Sobs", "Iobs", "Robs", "Mobs")] = gsdata[,c("Sobs", "Iobs", "Robs", "Mobs")]
if (interpolate_missing_data) {
# these are cummulative counts .. linear approximation where missing
j = which( !is.finite(daily$Robs) )
if (length(j) > 0) {
o = approx( x=daily$dayno , y=daily$Robs, xout = daily$dayno, method="linear")
daily$Robs[j] = trunc(o$y[j])
}
j = which( !is.finite(daily$Mobs) )
if (length(j) > 0) {
o = approx( x=daily$dayno , y=daily$Mobs, xout = daily$dayno, method="linear")
daily$Mobs[j] = trunc(o$y[j])
}
j = which( !is.finite(daily$Sobs) )
if (length(j) > 0) {
o = approx( x=daily$dayno , y=daily$Sobs, xout = daily$dayno, method="linear")
daily$Sobs[j] = trunc(o$y[j])
}
j = which( !is.finite(daily$Iobs) )
if (length(j) > 0) daily$Iobs[j] = Npop - daily$Sobs[j] - daily$Robs[j] - daily$Mobs[j]
}
# final check .. set missing values as -1
j = which( !is.finite(daily$Sobs) ); if (length(j) > 0) daily$Sobs[j] = -1
j = which( !is.finite(daily$Iobs) ); if (length(j) > 0) daily$Iobs[j] = -1
j = which( !is.finite(daily$Robs) ); if (length(j) > 0) daily$Robs[j] = -1
j = which( !is.finite(daily$Mobs) ); if (length(j) > 0) daily$Mobs[j] = -1
if (output=="daily_data") return (daily)
stan_data = list(
Npop = Npop,
Nobs = nrow( daily ),
Npreds = Npreds, # here, only total number of predictions for output
Sobs = daily$Sobs,
Iobs = daily$Iobs,
Robs = daily$Robs,
Mobs = daily$Mobs,
time = as.integer(daily$dayno),
time_start = time_start,
time_distancing = time_distancing,
time_relaxation = time_relaxation,
plotlabel = "Nova Scotia"
)
stan_data = c( stan_data, list(...) )
if (!exists("modelname", stan_data)) stan_data$modelname="discrete_autoregressive_with_observation_error_structured_beta_mortality"
stan_data$timestamp = max( lubridate::date( gsdata$Date) )
if ( stan_data$modelname %in% c("continuous") ) {
# used by ODE-based methods for rk4 integration
if (!exists("time_pred", stan_data)) stan_data$time_pred = as.integer( c(daily$dayno, max(daily$dayno)+c(1:Npreds)) )
if (!exists("t0", stan_data)) stan_data$t0 = -0.01
}
# add a few more flags for discrete_variable_encounter_rate and
if (!exists("BETA_prior", stan_data)) stan_data$BETA_prior = 0.5
if (!exists("GAMMA_prior", stan_data)) stan_data$GAMMA_prior = 1/28
if (!exists("EPSILON_prior", stan_data)) stan_data$me_prior = 0.05 # % of positively Infected that die
if (!exists("BNP", stan_data)) stan_data$BNP = 3 # number of days to use for beta averaging for projections
if (output=="stan_data") return (stan_data)
} | /R/data_nova_scotia.R | permissive | ndewar/adapt | R | false | false | 3,875 | r | data_nova_scotia = function( output="stan_data", Npop=971395, Npreds=5, interpolate_missing_data=FALSE, ... ) {
# install.packages("googlesheets4")
library(googlesheets4)
gsdata = read_sheet( "https://docs.google.com/spreadsheets/d/1tgf2H9gDmRnGDGeQE-fC9IPhrmNxP8-JC7Nnnob_vuY/edit?usp=sharing" )
gsdata$Iobs = gsdata$InfectedCurrently
gsdata$Robs = gsdata$Recoveries + gsdata$Deaths # note: recovered = deaths+recoveries
gsdata$Sobs = Npop - gsdata$Robs - gsdata$Iobs
gsdata$Mobs = gsdata$Deaths # mortalities .. this is redundant here but in some models, recoveries is split apart from mortalities and so useful
gsdata$dayno = lubridate::date( gsdata$Date)
gsdata$dayno = gsdata$dayno - min(gsdata$dayno) + 1
time_start = min( gsdata$Date )
time_distancing = min( gsdata$Date[ which( grepl("socialdistancing", gsdata$ManagementMeasures )) ] )
time_relaxation = min( gsdata$Date[ which( grepl("open parks", gsdata$ManagementMeasures )) ] )
if (output=="raw_data") return (gsdata)
daily = expand.grid( dayno=1:max(gsdata$dayno ))
daily[, c("Iobs", "Sobs", "Robs", "Mobs")] = NA
i = match( gsdata$dayno, daily$dayno )
daily[i, c("Sobs", "Iobs", "Robs", "Mobs")] = gsdata[,c("Sobs", "Iobs", "Robs", "Mobs")]
if (interpolate_missing_data) {
# these are cummulative counts .. linear approximation where missing
j = which( !is.finite(daily$Robs) )
if (length(j) > 0) {
o = approx( x=daily$dayno , y=daily$Robs, xout = daily$dayno, method="linear")
daily$Robs[j] = trunc(o$y[j])
}
j = which( !is.finite(daily$Mobs) )
if (length(j) > 0) {
o = approx( x=daily$dayno , y=daily$Mobs, xout = daily$dayno, method="linear")
daily$Mobs[j] = trunc(o$y[j])
}
j = which( !is.finite(daily$Sobs) )
if (length(j) > 0) {
o = approx( x=daily$dayno , y=daily$Sobs, xout = daily$dayno, method="linear")
daily$Sobs[j] = trunc(o$y[j])
}
j = which( !is.finite(daily$Iobs) )
if (length(j) > 0) daily$Iobs[j] = Npop - daily$Sobs[j] - daily$Robs[j] - daily$Mobs[j]
}
# final check .. set missing values as -1
j = which( !is.finite(daily$Sobs) ); if (length(j) > 0) daily$Sobs[j] = -1
j = which( !is.finite(daily$Iobs) ); if (length(j) > 0) daily$Iobs[j] = -1
j = which( !is.finite(daily$Robs) ); if (length(j) > 0) daily$Robs[j] = -1
j = which( !is.finite(daily$Mobs) ); if (length(j) > 0) daily$Mobs[j] = -1
if (output=="daily_data") return (daily)
stan_data = list(
Npop = Npop,
Nobs = nrow( daily ),
Npreds = Npreds, # here, only total number of predictions for output
Sobs = daily$Sobs,
Iobs = daily$Iobs,
Robs = daily$Robs,
Mobs = daily$Mobs,
time = as.integer(daily$dayno),
time_start = time_start,
time_distancing = time_distancing,
time_relaxation = time_relaxation,
plotlabel = "Nova Scotia"
)
stan_data = c( stan_data, list(...) )
if (!exists("modelname", stan_data)) stan_data$modelname="discrete_autoregressive_with_observation_error_structured_beta_mortality"
stan_data$timestamp = max( lubridate::date( gsdata$Date) )
if ( stan_data$modelname %in% c("continuous") ) {
# used by ODE-based methods for rk4 integration
if (!exists("time_pred", stan_data)) stan_data$time_pred = as.integer( c(daily$dayno, max(daily$dayno)+c(1:Npreds)) )
if (!exists("t0", stan_data)) stan_data$t0 = -0.01
}
# add a few more flags for discrete_variable_encounter_rate and
if (!exists("BETA_prior", stan_data)) stan_data$BETA_prior = 0.5
if (!exists("GAMMA_prior", stan_data)) stan_data$GAMMA_prior = 1/28
if (!exists("EPSILON_prior", stan_data)) stan_data$me_prior = 0.05 # % of positively Infected that die
if (!exists("BNP", stan_data)) stan_data$BNP = 3 # number of days to use for beta averaging for projections
if (output=="stan_data") return (stan_data)
} |
context("Testing oligonucleotide generation")
test_that("Generating oligonucleotide of length 1", {
expect_equal(generate_oligo(1), c("A", "C", "G", "T"))
})
test_that("Generating oligonucleotide of length 2", {
expect_equal(generate_oligo(2), c("AA", "AC", "AG", "AT", "CA", "CC", "CG",
"CT", "GA", "GC", "GG", "GT", "TA", "TC", "TG", "TT"))
})
| /tests/testthat/test-generate_oligo.r | no_license | KamilSJaron/sighunt | R | false | false | 368 | r | context("Testing oligonucleotide generation")
test_that("Generating oligonucleotide of length 1", {
expect_equal(generate_oligo(1), c("A", "C", "G", "T"))
})
test_that("Generating oligonucleotide of length 2", {
expect_equal(generate_oligo(2), c("AA", "AC", "AG", "AT", "CA", "CC", "CG",
"CT", "GA", "GC", "GG", "GT", "TA", "TC", "TG", "TT"))
})
|
plot00 = function(DR7_viability,
data ,
unique = 'MGI id',
xlab = 'Viability',
...) {
pM = DR7_viability$Viability[DR7_viability[, unique] %in% data$inputdata[data$duplicates,][, unique]]
plot(
pM,
xlab = xlab,
ylab = 'Frequency of [removed] duplicates with known label',
#main = 'DMDD',
col = unique(pM),
sub = paste0('Total = ', length(pM)),
...
)
} | /Other projects/Viability/GeneViability/R/plot00.R | permissive | mpi2/impc_stats_pipeline | R | false | false | 457 | r | plot00 = function(DR7_viability,
data ,
unique = 'MGI id',
xlab = 'Viability',
...) {
pM = DR7_viability$Viability[DR7_viability[, unique] %in% data$inputdata[data$duplicates,][, unique]]
plot(
pM,
xlab = xlab,
ylab = 'Frequency of [removed] duplicates with known label',
#main = 'DMDD',
col = unique(pM),
sub = paste0('Total = ', length(pM)),
...
)
} |
#Dplyr Exmaple
#dplyr example
#Basic manipulation in R
#http://www3.nd.edu/~steve/computing_with_data/24_dplyr/dplyr.html
#https://cran.rstudio.com/web/packages/dplyr/vignettes/introduction.html
library(dplyr)
library(hflights)
hflights_df <- tbl_df(hflights)
hflights_df
#filter
f_df <- filter(hflights_df, Month ==1, UniqueCarrier == "AA")
f_df
f_df <- filter(hflights_df, Month ==1 | Month == 2, UniqueCarrier == "AA")
glimpse(f_df)
summary(f_df)
glimpse(f_df)
slice(f_df,1:3)
f_df[3:5,]
slice(f_df,3:5)
arrange(f_df,desc(ArrTime))
names(f_df)
slice(
arrange(
select(f_df,UniqueCarrier,FlightNum,Year:DayOfWeek)
,desc(FlightNum),DayOfWeek)
,1:3)
names(f_df)[order(names(f_df))]
slice(arrange(select(mutate(f_df,FlightNum,speed= Distance/AirTime),FlightNum,Distance,AirTime,speed),desc(speed)),1:5)
names(f_df)[order(names(f_df))]
sample_n(f_df,20)
sample_frac(f_df,0.05)
distinct(select(f_df,TailNum))
by_TailNum <- group_by(f_df,TailNum)
names(by_TailNum)
glimpse(by_TailNum)
filter(by_TailNum, TailNum == "N251AA")
summarise(by_TailNum,count = n(),distance = mean(Distance, na.arm = TRUE ))
daily <- group_by(f_df, Year, Month, DayofMonth)
per_day <-summarise(daily, DailyFlightCount = n())
per_day
per_month <- summarise(per_day, MonthlyFlightCount = sum(DailyFlightCount))
per_month
per_year <- summarise(per_month,YearlyCount = sum(MonthlyFlightCount))
per_year
names(f_df)
f_df %>%
group_by(Year, Month, DayofMonth) %>%
summarise(dailyFlightCount = n()) %>%
summarise(monthlyFlightCount=sum(dailyFlightCount)) %>%
summarise(YearlyFlightCount=sum(monthlyFlightCount) )
##########
#https://www.datacamp.com/courses/dplyr-data-manipulation-r-tutorial
rm(list=ls())
library(dplyr)
library(hflights)
str(hflights)
hflights <- tbl_df(hflights)
hflights
glimpse(hflights)
#https://campus.datacamp.com/courses/dplyr-data-manipulation-r-tutorial/chapter-one-introduction-to-dplyr-and-tbls?ex=5
unique(hflights$CancellationCode)
CanCodelookup <- c("A" = "carrier", "B" = "weather", "C" = "FFA", "D" = "security", "E" = "not cancelled")
CanCodelookup
hflights$CancellationCode <- CanCodelookup[hflights$CancellationCode]
unique(hflights$CancellationCode)
glimpse(hflights)
select(hflights, ActualElapsedTime,AirTime, ArrDelay,DepDelay)
#dplyr detailes examples
#http://www.rdocumentation.org/packages/dplyr/functions/select
iris <- tbl_df(iris)
iris
select(iris, Petal.Length )
select (iris, starts_with ("Petal"))
select (iris, ends_with ("Width"))
select (iris, contains("etal"))
select (iris, matches(".t.", ignore.case = TRUE))
petal_v <- c("Petal.Length","Petal.Width")
petal_v
select (iris, one_of(petal_v))
select(iris, -starts_with("Petal"))
df <- as.data.frame(matrix(runif(100),nrow = 10))
df <- tbl_df(df)
df
glimpse(df)
select(df, V4:V9)
select(df, num_range("V",4:9))
select(df,Version_1=V1)
df
rename(df,Version_1 = V1)
glimpse(mtcars)
mtcars <-tbl_df(mtcars)
arrange(filter(mtcars, cyl ==6 | cyl == 4, hp >=80),desc(disp),mpg)
summarise(group_by(mtcars,gear),numofVechiles=n(),mean(mpg))
rm(list=ls())
######
hflights <- tbl_df(hflights)
glimpse(hflights)
hflights %>%
filter(DayOfWeek %in% c(6,7)) %>%
filter(Distance >1000) %>%
filter((TaxiIn + TaxiOut) <15)
nrow(hflights)
###
| /Dplyr_Example.R | no_license | shekhar270779/R_Examples | R | false | false | 3,237 | r | #Dplyr Exmaple
#dplyr example
#Basic manipulation in R
#http://www3.nd.edu/~steve/computing_with_data/24_dplyr/dplyr.html
#https://cran.rstudio.com/web/packages/dplyr/vignettes/introduction.html
library(dplyr)
library(hflights)
hflights_df <- tbl_df(hflights)
hflights_df
#filter
f_df <- filter(hflights_df, Month ==1, UniqueCarrier == "AA")
f_df
f_df <- filter(hflights_df, Month ==1 | Month == 2, UniqueCarrier == "AA")
glimpse(f_df)
summary(f_df)
glimpse(f_df)
slice(f_df,1:3)
f_df[3:5,]
slice(f_df,3:5)
arrange(f_df,desc(ArrTime))
names(f_df)
slice(
arrange(
select(f_df,UniqueCarrier,FlightNum,Year:DayOfWeek)
,desc(FlightNum),DayOfWeek)
,1:3)
names(f_df)[order(names(f_df))]
slice(arrange(select(mutate(f_df,FlightNum,speed= Distance/AirTime),FlightNum,Distance,AirTime,speed),desc(speed)),1:5)
names(f_df)[order(names(f_df))]
sample_n(f_df,20)
sample_frac(f_df,0.05)
distinct(select(f_df,TailNum))
by_TailNum <- group_by(f_df,TailNum)
names(by_TailNum)
glimpse(by_TailNum)
filter(by_TailNum, TailNum == "N251AA")
summarise(by_TailNum,count = n(),distance = mean(Distance, na.arm = TRUE ))
daily <- group_by(f_df, Year, Month, DayofMonth)
per_day <-summarise(daily, DailyFlightCount = n())
per_day
per_month <- summarise(per_day, MonthlyFlightCount = sum(DailyFlightCount))
per_month
per_year <- summarise(per_month,YearlyCount = sum(MonthlyFlightCount))
per_year
names(f_df)
f_df %>%
group_by(Year, Month, DayofMonth) %>%
summarise(dailyFlightCount = n()) %>%
summarise(monthlyFlightCount=sum(dailyFlightCount)) %>%
summarise(YearlyFlightCount=sum(monthlyFlightCount) )
##########
#https://www.datacamp.com/courses/dplyr-data-manipulation-r-tutorial
rm(list=ls())
library(dplyr)
library(hflights)
str(hflights)
hflights <- tbl_df(hflights)
hflights
glimpse(hflights)
#https://campus.datacamp.com/courses/dplyr-data-manipulation-r-tutorial/chapter-one-introduction-to-dplyr-and-tbls?ex=5
unique(hflights$CancellationCode)
CanCodelookup <- c("A" = "carrier", "B" = "weather", "C" = "FFA", "D" = "security", "E" = "not cancelled")
CanCodelookup
hflights$CancellationCode <- CanCodelookup[hflights$CancellationCode]
unique(hflights$CancellationCode)
glimpse(hflights)
select(hflights, ActualElapsedTime,AirTime, ArrDelay,DepDelay)
#dplyr detailes examples
#http://www.rdocumentation.org/packages/dplyr/functions/select
iris <- tbl_df(iris)
iris
select(iris, Petal.Length )
select (iris, starts_with ("Petal"))
select (iris, ends_with ("Width"))
select (iris, contains("etal"))
select (iris, matches(".t.", ignore.case = TRUE))
petal_v <- c("Petal.Length","Petal.Width")
petal_v
select (iris, one_of(petal_v))
select(iris, -starts_with("Petal"))
df <- as.data.frame(matrix(runif(100),nrow = 10))
df <- tbl_df(df)
df
glimpse(df)
select(df, V4:V9)
select(df, num_range("V",4:9))
select(df,Version_1=V1)
df
rename(df,Version_1 = V1)
glimpse(mtcars)
mtcars <-tbl_df(mtcars)
arrange(filter(mtcars, cyl ==6 | cyl == 4, hp >=80),desc(disp),mpg)
summarise(group_by(mtcars,gear),numofVechiles=n(),mean(mpg))
rm(list=ls())
######
hflights <- tbl_df(hflights)
glimpse(hflights)
hflights %>%
filter(DayOfWeek %in% c(6,7)) %>%
filter(Distance >1000) %>%
filter((TaxiIn + TaxiOut) <15)
nrow(hflights)
###
|
# multiple cluster simulations, one (censored) covariate
suppressPackageStartupMessages({
library(diffcyt)
library(magrittr)
library(purrr)
library(SummarizedExperiment)
})
devtools::load_all("simulationStudyCensoredDiffcyt")
RhpcBLASctl::blas_set_num_threads(1)
dir_save <-"./"
transform_fn <- "identity"
clustering <- ""
covariates_type <- "params_FlowCAP"
cat("clustering to use: \t",clustering,"\n")
if (clustering == "som100"){
da_res <- readRDS("../FlowCAP_4/results/da_res1_som100_cc_complete.rds")
} else {
da_res <- readRDS("../FlowCAP_4/results/da_res1_cc_complete.rds")
}
DM_fit_file_name <- ifelse(clustering=="som100","DM_fit_FlowCAP_som100.rds","DM_fit_FlowCAP.rds")
if (DM_fit_file_name %in% list.files(dir_save)){
dirfit <- readRDS(paste0(dir_save,DM_fit_file_name))
} else {
cat("fit dirichlet multinomial on FlowCAP_4 data\n")
dirfit <- dirmult::dirmult(t(SummarizedExperiment::assay(da_res$d_counts)))
saveRDS(dirfit, paste0(dir_save,DM_fit_file_name))
}
if (covariates_type == "params_FlowCAP"){
# parameters of weibull fit
weifit_ls <- readRDS(paste0(dir_save,"weibull_fits_FlowCAP.rds"))
weibull_params <- list(shape_x = weifit_ls$weifit$estimate[1],
shape_c1 = weifit_ls$weifit_cens$estimate[1],
shape_c2 = weifit_ls$weifit_cens$estimate[1],
scale_x = weifit_ls$weifit$estimate[2])
} else {
weibull_params <- NULL
}
clustering <- ifelse(covariates_type=="",clustering,paste0(clustering,"_",covariates_type))
clu_name <- ifelse(clustering %in% c("","_"),"meta20",stringr::str_replace(clustering,"^_+",""))
clu_name <- ifelse(clustering == "_params_FlowCAP","meta20_params_FlowCAP",clu_name)
for(alpha_factor in c(1,2,5,10)){
plot_dir_clustering <- paste0(dir_save,"/",clu_name,"/")
if (!dir.exists(plot_dir_clustering)){
dir.create(plot_dir_clustering)
}
plot_dir <- paste0(dir_save,"/",clu_name,"/af_",alpha_factor,"/")
if (!dir.exists(plot_dir)){
dir.create(plot_dir)
}
alphas <- dirfit$gamma * alpha_factor
sizes <- apply(t(SummarizedExperiment::assay(da_res$d_counts)), 1, sum)
cov_ls <- lapply(seq_along(sizes), function(k) cov_matrix_dirichlet_multinomial(alphas,sizes[k]))
names(cov_ls) <- paste0("s_",round(sizes))
saveRDS(list(alphas=alphas,sizes=sizes,cov=cov_ls), paste0(dir_save,"/",clu_name,"/af_",alpha_factor,"/res_mclu_params_af_",alpha_factor,"_",clustering,"_params_FlowCAP.rds"))
cat("start simulations for alpha_factor:\t",alpha_factor,"\n")
out <- run_simulations_wrapper_multicluster(reps=10,
nr_cores = 10,
n_obs_c = c(50, 100, 200, 400),
nr_diff = 6,
mi_reps = 50,
alpha = alphas,
sizes = sizes,
slope=c(list(rep(0.9,3))),
group=NA,
group_slope=NA,
diff_cluster = list(c(7,5),c(20,4),c(11,13)),
enforce_sum_alpha = FALSE,
censoring_rates = c(0.3,0.5,0.7),
weibull_params = weibull_params,
imputation_types = c("km","rs","mrl","cc","pmm","km_exp"),
formulas = list(glmer = list(formula(Y~Surv(X,I) + (1|R)))),
verbose = TRUE, seed = 123,
transform_fn=c(transform_fn))
saveRDS(out, paste0(dir_save,"/",clu_name,"/af_",alpha_factor,"/res_mclu_output_af_",alpha_factor,"_",clu_name,"_complete_",transform_fn,".rds"))
}
| /Simulations_based_on_real_data/run_simulations_testDA_multicluster.R | no_license | retogerber/censcyt_paper_scripts | R | false | false | 3,993 | r | # multiple cluster simulations, one (censored) covariate
suppressPackageStartupMessages({
library(diffcyt)
library(magrittr)
library(purrr)
library(SummarizedExperiment)
})
devtools::load_all("simulationStudyCensoredDiffcyt")
RhpcBLASctl::blas_set_num_threads(1)
dir_save <-"./"
transform_fn <- "identity"
clustering <- ""
covariates_type <- "params_FlowCAP"
cat("clustering to use: \t",clustering,"\n")
if (clustering == "som100"){
da_res <- readRDS("../FlowCAP_4/results/da_res1_som100_cc_complete.rds")
} else {
da_res <- readRDS("../FlowCAP_4/results/da_res1_cc_complete.rds")
}
DM_fit_file_name <- ifelse(clustering=="som100","DM_fit_FlowCAP_som100.rds","DM_fit_FlowCAP.rds")
if (DM_fit_file_name %in% list.files(dir_save)){
dirfit <- readRDS(paste0(dir_save,DM_fit_file_name))
} else {
cat("fit dirichlet multinomial on FlowCAP_4 data\n")
dirfit <- dirmult::dirmult(t(SummarizedExperiment::assay(da_res$d_counts)))
saveRDS(dirfit, paste0(dir_save,DM_fit_file_name))
}
if (covariates_type == "params_FlowCAP"){
# parameters of weibull fit
weifit_ls <- readRDS(paste0(dir_save,"weibull_fits_FlowCAP.rds"))
weibull_params <- list(shape_x = weifit_ls$weifit$estimate[1],
shape_c1 = weifit_ls$weifit_cens$estimate[1],
shape_c2 = weifit_ls$weifit_cens$estimate[1],
scale_x = weifit_ls$weifit$estimate[2])
} else {
weibull_params <- NULL
}
clustering <- ifelse(covariates_type=="",clustering,paste0(clustering,"_",covariates_type))
clu_name <- ifelse(clustering %in% c("","_"),"meta20",stringr::str_replace(clustering,"^_+",""))
clu_name <- ifelse(clustering == "_params_FlowCAP","meta20_params_FlowCAP",clu_name)
for(alpha_factor in c(1,2,5,10)){
plot_dir_clustering <- paste0(dir_save,"/",clu_name,"/")
if (!dir.exists(plot_dir_clustering)){
dir.create(plot_dir_clustering)
}
plot_dir <- paste0(dir_save,"/",clu_name,"/af_",alpha_factor,"/")
if (!dir.exists(plot_dir)){
dir.create(plot_dir)
}
alphas <- dirfit$gamma * alpha_factor
sizes <- apply(t(SummarizedExperiment::assay(da_res$d_counts)), 1, sum)
cov_ls <- lapply(seq_along(sizes), function(k) cov_matrix_dirichlet_multinomial(alphas,sizes[k]))
names(cov_ls) <- paste0("s_",round(sizes))
saveRDS(list(alphas=alphas,sizes=sizes,cov=cov_ls), paste0(dir_save,"/",clu_name,"/af_",alpha_factor,"/res_mclu_params_af_",alpha_factor,"_",clustering,"_params_FlowCAP.rds"))
cat("start simulations for alpha_factor:\t",alpha_factor,"\n")
out <- run_simulations_wrapper_multicluster(reps=10,
nr_cores = 10,
n_obs_c = c(50, 100, 200, 400),
nr_diff = 6,
mi_reps = 50,
alpha = alphas,
sizes = sizes,
slope=c(list(rep(0.9,3))),
group=NA,
group_slope=NA,
diff_cluster = list(c(7,5),c(20,4),c(11,13)),
enforce_sum_alpha = FALSE,
censoring_rates = c(0.3,0.5,0.7),
weibull_params = weibull_params,
imputation_types = c("km","rs","mrl","cc","pmm","km_exp"),
formulas = list(glmer = list(formula(Y~Surv(X,I) + (1|R)))),
verbose = TRUE, seed = 123,
transform_fn=c(transform_fn))
saveRDS(out, paste0(dir_save,"/",clu_name,"/af_",alpha_factor,"/res_mclu_output_af_",alpha_factor,"_",clu_name,"_complete_",transform_fn,".rds"))
}
|
\name{con.rss}
\alias{con.rss}
\title{Selecting ranked set sample with a concomitant variable}
\usage{
con.rss(X,Y,m,r=1,sets=FALSE,concomitant=FALSE)
}
\description{
The \code{\link{con.rss}} function samples from a target population by using ranked set sampling method. Ranking procedure of X is done by using concomitant variable Y.
}
\arguments{
\item{X}{A vector of interested variable from target population}
\item{Y}{A vector of concomitant variable from target population}
\item{m}{Size of units in each set}
\item{r}{Number of cycles. (Default by = 1)}
\item{sets}{logical; if TRUE, ranked set sample is given with ranked sets(see \code{\link{rankedsets}})}
\item{concomitant}{logical; if TRUE, ranked set sample of concomitant variable is given }
}
\details{
X and Y must be vectors and also they should be in same length.
}
\value{
\item{corr.coef}{the correlation coefficient between X and Y}
\item{var.of.interest}{the sets of X, which are ranked by Y}
\item{concomitant.var.}{the ranked sets of Y}
\item{sample.x}{the obtained ranked set sample of X}
\item{sample.y}{the obtained ranked set sample of Y}
}
\references{
McIntyre, G. A. (1952). A method for unbiased selective sampling, using ranked sets. Australian Journal of Agricultural Research, 3(4), 385-390.
Lynne Stokes, S. (1977). Ranked set sampling with concomitant variables. Communications in Statistics-Theory and Methods, 6(12), 1207-1211.
Chen, Z., Bai, Z., & Sinha, B. (2003). Ranked set sampling: theory and applications (Vol. 176). Springer Science & Business Media.
}
\seealso{
\code{\link{rss}}
}
\examples{
library("LearnBayes")
mu=c(1,12,2)
Sigma <- matrix(c(1,2,0,2,5,0.5,0,0.5,3), 3, 3)
x <- rmnorm(10000, mu, Sigma)
xx=as.numeric(x[,1])
xy=as.numeric(x[,3])
con.rss(xx, xy, m=3, r=4, sets=TRUE, concomitant=TRUE)
}
| /man/conrss.rd | no_license | cran/RSSampling | R | false | false | 1,891 | rd | \name{con.rss}
\alias{con.rss}
\title{Selecting ranked set sample with a concomitant variable}
\usage{
con.rss(X,Y,m,r=1,sets=FALSE,concomitant=FALSE)
}
\description{
The \code{\link{con.rss}} function samples from a target population by using ranked set sampling method. Ranking procedure of X is done by using concomitant variable Y.
}
\arguments{
\item{X}{A vector of interested variable from target population}
\item{Y}{A vector of concomitant variable from target population}
\item{m}{Size of units in each set}
\item{r}{Number of cycles. (Default by = 1)}
\item{sets}{logical; if TRUE, ranked set sample is given with ranked sets(see \code{\link{rankedsets}})}
\item{concomitant}{logical; if TRUE, ranked set sample of concomitant variable is given }
}
\details{
X and Y must be vectors and also they should be in same length.
}
\value{
\item{corr.coef}{the correlation coefficient between X and Y}
\item{var.of.interest}{the sets of X, which are ranked by Y}
\item{concomitant.var.}{the ranked sets of Y}
\item{sample.x}{the obtained ranked set sample of X}
\item{sample.y}{the obtained ranked set sample of Y}
}
\references{
McIntyre, G. A. (1952). A method for unbiased selective sampling, using ranked sets. Australian Journal of Agricultural Research, 3(4), 385-390.
Lynne Stokes, S. (1977). Ranked set sampling with concomitant variables. Communications in Statistics-Theory and Methods, 6(12), 1207-1211.
Chen, Z., Bai, Z., & Sinha, B. (2003). Ranked set sampling: theory and applications (Vol. 176). Springer Science & Business Media.
}
\seealso{
\code{\link{rss}}
}
\examples{
library("LearnBayes")
mu=c(1,12,2)
Sigma <- matrix(c(1,2,0,2,5,0.5,0,0.5,3), 3, 3)
x <- rmnorm(10000, mu, Sigma)
xx=as.numeric(x[,1])
xy=as.numeric(x[,3])
con.rss(xx, xy, m=3, r=4, sets=TRUE, concomitant=TRUE)
}
|
#========================================================================
#R - ASSIGNMENT 1
#=========================================================================
#Submitted by - Kaviya U C - D20018
#===========================================================================
# Feature Imporatnce using T tests and Chi sq tests
#=============================================================================
setwd('C:/Users/KAVIYA/OneDrive/Desktop/Term 2/I2R')
fram <- read.csv("framingham.csv")
View(fram)
str(fram)
#Converting categorical variables to factor
fram$male = as.factor(fram$male)
fram$education = as.factor(fram$education)
fram$currentSmoker= as.factor(fram$currentSmoker)
fram$BPMeds=as.factor(fram$BPMeds)
fram$prevalentStroke=as.factor(fram$prevalentStroke)
fram$prevalentHyp = as.factor(fram$prevalentHyp)
fram$diabetes = as.factor(fram$diabetes)
#Target variable - Risk of Coronary heart disease in 10 years
fram$TenYearCHD = as.factor(fram$TenYearCHD)
str(fram)
#===================================================================
# T- test for numerical variables
#====================================================================
?t.test()
#Function for T - test
t_test = function(data,var=names(data),target)
{
#Check if target variable is a factor
if(!is.factor(data[,target]))
{
stop("Enter a categorical variable as target")
}
p_value=c()#Initialize a vector
signifcant = c()
for(i in var)
{
if(is.numeric(data[,i]) || (is.integer(data[,i])))
{
t= t.test(data[,i] ~ data[,target])
p_value[i]= t$p.value
}
}
t_result= as.data.frame(p_value)
View(t_result)
}
#Function call
t_test(fram,var=c("age","cigsPerDay","totChol","sysBP","diaBP","BMI","heartRate","glucose"),"TenYearCHD")
#===========================================================================
#Chi - squared test for categorical variables
#=====================================================================
?chisq.test()
chisq_test = function(data,var=names(data),target)
{
if(!is.factor(data[,target]))
{
stop("Enter a categorical variable as target")
}
p_value=c()#Initialize a vector
for(i in var)
{
if(is.factor(data[,i]))
{
chi=chisq.test(data[,i], data[,target],simulate.p.value=TRUE)
p_value[i]= chi$p.value
}
}
chi_result = as.data.frame(p_value)
View(chi_result)
}
#Function call
chisq_test(fram,var=c("male","education","currentSmoker","BPMeds","prevalentStroke","prevalentHyp","diabetes"),"TenYearCHD")
| /R Assignment 1.R | no_license | Kaviya-U-C/R-Programming | R | false | false | 2,633 | r | #========================================================================
#R - ASSIGNMENT 1
#=========================================================================
#Submitted by - Kaviya U C - D20018
#===========================================================================
# Feature Imporatnce using T tests and Chi sq tests
#=============================================================================
setwd('C:/Users/KAVIYA/OneDrive/Desktop/Term 2/I2R')
fram <- read.csv("framingham.csv")
View(fram)
str(fram)
#Converting categorical variables to factor
fram$male = as.factor(fram$male)
fram$education = as.factor(fram$education)
fram$currentSmoker= as.factor(fram$currentSmoker)
fram$BPMeds=as.factor(fram$BPMeds)
fram$prevalentStroke=as.factor(fram$prevalentStroke)
fram$prevalentHyp = as.factor(fram$prevalentHyp)
fram$diabetes = as.factor(fram$diabetes)
#Target variable - Risk of Coronary heart disease in 10 years
fram$TenYearCHD = as.factor(fram$TenYearCHD)
str(fram)
#===================================================================
# T- test for numerical variables
#====================================================================
?t.test()
#Function for T - test
t_test = function(data,var=names(data),target)
{
#Check if target variable is a factor
if(!is.factor(data[,target]))
{
stop("Enter a categorical variable as target")
}
p_value=c()#Initialize a vector
signifcant = c()
for(i in var)
{
if(is.numeric(data[,i]) || (is.integer(data[,i])))
{
t= t.test(data[,i] ~ data[,target])
p_value[i]= t$p.value
}
}
t_result= as.data.frame(p_value)
View(t_result)
}
#Function call
t_test(fram,var=c("age","cigsPerDay","totChol","sysBP","diaBP","BMI","heartRate","glucose"),"TenYearCHD")
#===========================================================================
#Chi - squared test for categorical variables
#=====================================================================
?chisq.test()
chisq_test = function(data,var=names(data),target)
{
if(!is.factor(data[,target]))
{
stop("Enter a categorical variable as target")
}
p_value=c()#Initialize a vector
for(i in var)
{
if(is.factor(data[,i]))
{
chi=chisq.test(data[,i], data[,target],simulate.p.value=TRUE)
p_value[i]= chi$p.value
}
}
chi_result = as.data.frame(p_value)
View(chi_result)
}
#Function call
chisq_test(fram,var=c("male","education","currentSmoker","BPMeds","prevalentStroke","prevalentHyp","diabetes"),"TenYearCHD")
|
##########################
# backward stepwise selection
##########################
### in the regusbsets function we must set the method parameter to "backward"
fit <- regsubsets(burglPerPop~., crim2, nvmax = 16, method="backward")
summ <- summary(fit)
summ
### get the adjusted R squared
summ$adjr2
which.max(summ$adjr2)
coef(fit, 12)
######################################
| /code/ml-r-files/lect006-stepwise-backward.R | no_license | zhou100/ML_prediction | R | false | false | 414 | r |
##########################
# backward stepwise selection
##########################
### in the regusbsets function we must set the method parameter to "backward"
fit <- regsubsets(burglPerPop~., crim2, nvmax = 16, method="backward")
summ <- summary(fit)
summ
### get the adjusted R squared
summ$adjr2
which.max(summ$adjr2)
coef(fit, 12)
######################################
|
## Caching the inverse of a vector
## makeCacheMatrix creates a vector which is really a list which sets and gets the value of a vector including seting and getting
##the value of the invers
makeCacheMatrix <- function(x = matrix()) {
s<-NULL
set<-function(y){
x<<-y
s<<-NULL
}
get<-function()x
setsolve<-function(solve) s<<-solve
getsolve<-function()s
list(set=set,get=get,
setsolve=setsolve,
getsolve=getsolve)
}
## The next function clculates teh inverse of the vector created in the function above after checking to see if the inverse
## has already been calculated.
cacheSolve <- function(x, ...) {
s<-x$getsolve()
if(!is.null(s)){
message("getting cached data")
return(s)
}
data<-x$get()
s<-solve(data,...)
x$setsolve(s)
s
}
| /cachematrix.R | no_license | mrozga/ProgrammingAssignment2 | R | false | false | 766 | r | ## Caching the inverse of a vector
## makeCacheMatrix creates a vector which is really a list which sets and gets the value of a vector including seting and getting
##the value of the invers
makeCacheMatrix <- function(x = matrix()) {
s<-NULL
set<-function(y){
x<<-y
s<<-NULL
}
get<-function()x
setsolve<-function(solve) s<<-solve
getsolve<-function()s
list(set=set,get=get,
setsolve=setsolve,
getsolve=getsolve)
}
## The next function clculates teh inverse of the vector created in the function above after checking to see if the inverse
## has already been calculated.
cacheSolve <- function(x, ...) {
s<-x$getsolve()
if(!is.null(s)){
message("getting cached data")
return(s)
}
data<-x$get()
s<-solve(data,...)
x$setsolve(s)
s
}
|
## Exploratory Data Analysis Course Project 1
## read data
dataSets <- read.table("household_power_consumption.txt",
sep=";", header=T, na.strings="?")
## data conversion
dataSets$Date <- as.Date(dataSets$Date, format="%d/%m/%Y")
dateTime <- paste(dataSets$Date, dataSets$Time)
dataSets$DateTime <- as.POSIXct(dateTime)
##dataSets$DateTime <- strptime(dateTime, "%Y-%m-%d %H:%M:%S")
## filter data (only 2007-02-01 to 2007-02-02)
dataSets_F <- dataSets[dataSets$Date >= "2007-02-01" & dataSets$Date <= "2007-02-02",]
## plot
png(filename="plot1.png",width=480,height=480)
hist(dataSets_F$Global_active_power, col="red",
main="Global Active Power",
xlab="Global Active Power (kilowatts)",
ylab="Frequency")
dev.off()
| /plot1.R | no_license | uueixiang/ExData_Plotting1 | R | false | false | 766 | r |
## Exploratory Data Analysis Course Project 1
## read data
dataSets <- read.table("household_power_consumption.txt",
sep=";", header=T, na.strings="?")
## data conversion
dataSets$Date <- as.Date(dataSets$Date, format="%d/%m/%Y")
dateTime <- paste(dataSets$Date, dataSets$Time)
dataSets$DateTime <- as.POSIXct(dateTime)
##dataSets$DateTime <- strptime(dateTime, "%Y-%m-%d %H:%M:%S")
## filter data (only 2007-02-01 to 2007-02-02)
dataSets_F <- dataSets[dataSets$Date >= "2007-02-01" & dataSets$Date <= "2007-02-02",]
## plot
png(filename="plot1.png",width=480,height=480)
hist(dataSets_F$Global_active_power, col="red",
main="Global Active Power",
xlab="Global Active Power (kilowatts)",
ylab="Frequency")
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/text-cleaning.R
\name{hacky_cleaning}
\alias{hacky_cleaning}
\title{Clean up funky text}
\usage{
hacky_cleaning(text)
}
\arguments{
\item{text}{String to update}
}
\value{
The text with replacement, if needed.
}
\description{
Removes html formatting, converts non-ascii characters to ascii, and does
several replacements:
Replaces '&', amp;' with 'and'. Replaces ''' with an apostrophe.
Replaces '"' with single quote.
}
| /man/hacky_cleaning.Rd | no_license | BrunoGrandePhD/porTools | R | false | true | 512 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/text-cleaning.R
\name{hacky_cleaning}
\alias{hacky_cleaning}
\title{Clean up funky text}
\usage{
hacky_cleaning(text)
}
\arguments{
\item{text}{String to update}
}
\value{
The text with replacement, if needed.
}
\description{
Removes html formatting, converts non-ascii characters to ascii, and does
several replacements:
Replaces '&', amp;' with 'and'. Replaces ''' with an apostrophe.
Replaces '"' with single quote.
}
|
\name{do}
\alias{do}
\alias{do.tbl_sql}
\title{Apply a function to a tbl}
\usage{
do(.data, .f, ...)
\method{do}{tbl_sql} (.data, .f, ...,
.chunk_size = 10000L)
}
\arguments{
\item{.data}{a tbl}
\item{.f}{a function to apply to each piece. The first
unnamed argument supplied to \code{.f} will be a data
frame.}
\item{...}{other arguments passed on to the function ()}
\item{.chunk_size}{The size of each chunk to pull into R.
If this number is too big, the process will be slow
because R has to allocate and free a lot of memory. If
it's too small, it will be slow, because of the overhead
of talking to the database.}
}
\description{
This is a general purpose complement to the specialised
manipulation functions \code{\link{filter}},
\code{\link{select}}, \code{\link{mutate}},
\code{\link{summarise}} and \code{\link{arrange}}.
}
\examples{
by_dest <- group_by(hflights, Dest)
do(by_dest, nrow)
# Inefficient version of
group_size(by_dest)
# You can use it to do any arbitrary computation, like fitting a linear
# model. Let's explore how carrier departure delays vary over the course
# of a year
jan <- filter(hflights, Month == 1)
jan <- mutate(jan, date = ISOdate(Year, Month, DayofMonth))
carriers <- group_by(hflights, UniqueCarrier)
group_size(carriers)
mods <- do(carriers, failwith(NULL, lm), formula = ArrDelay ~ date)
sapply(mods, coef)
}
| /man/do.Rd | no_license | Funreason/dplyr | R | false | false | 1,396 | rd | \name{do}
\alias{do}
\alias{do.tbl_sql}
\title{Apply a function to a tbl}
\usage{
do(.data, .f, ...)
\method{do}{tbl_sql} (.data, .f, ...,
.chunk_size = 10000L)
}
\arguments{
\item{.data}{a tbl}
\item{.f}{a function to apply to each piece. The first
unnamed argument supplied to \code{.f} will be a data
frame.}
\item{...}{other arguments passed on to the function ()}
\item{.chunk_size}{The size of each chunk to pull into R.
If this number is too big, the process will be slow
because R has to allocate and free a lot of memory. If
it's too small, it will be slow, because of the overhead
of talking to the database.}
}
\description{
This is a general purpose complement to the specialised
manipulation functions \code{\link{filter}},
\code{\link{select}}, \code{\link{mutate}},
\code{\link{summarise}} and \code{\link{arrange}}.
}
\examples{
by_dest <- group_by(hflights, Dest)
do(by_dest, nrow)
# Inefficient version of
group_size(by_dest)
# You can use it to do any arbitrary computation, like fitting a linear
# model. Let's explore how carrier departure delays vary over the course
# of a year
jan <- filter(hflights, Month == 1)
jan <- mutate(jan, date = ISOdate(Year, Month, DayofMonth))
carriers <- group_by(hflights, UniqueCarrier)
group_size(carriers)
mods <- do(carriers, failwith(NULL, lm), formula = ArrDelay ~ date)
sapply(mods, coef)
}
|
% Generated by roxygen2 (4.1.0.9001): do not edit by hand
% Please edit documentation in R/gspinbutton.R
\name{gspinbutton}
\alias{gspinbutton}
\title{Basic spinbutton}
\usage{
gspinbutton(from = 0, to = 100, by = 1, value = from, handler = NULL,
action = NULL, container = NULL, ..., width = NULL, height = NULL,
ext.args = NULL)
}
\arguments{
\item{from}{from value}
\item{to}{to}
\item{by}{by. From to by are same as seq() usage}
\item{value}{initial value}
\item{handler}{optional means (to \code{\link{addHandlerChanged}})
to specify a handler for the default signal. A handler is a
function with signature \code{(h,...)} where \code{h} is a list
with components \code{obj} referring to the object emitting the
signal, \code{action} containing values passed to the
\code{action} argument, and possible other values.}
\item{action}{Passed to handler to parameterize a call}
\item{container}{A parent container. In \pkg{gWidgetsWWW2} a
parent container is not optional (though it can be substituted
with the \code{parent} argument in some circumstances). The parent
specifies the widget heirarchy and the \code{...} argument is used
to pass along arguments to layout the child component in the
parent container. Typically, these are passed to the \code{add}
method of the parent container.}
\item{...}{Used to pass along argument to the parent container's
\code{add} method and possible other arguments to the underlying
ference class constructors.}
\item{width}{width in pixels of component. Sizing in
\pkg{gWidgetsWWW2} is sometimes necessary as the arguments
\code{expand} and \code{fill} are not well implemented.}
\item{height}{height in pixels of the component.}
\item{ext.args}{The contructors of \pkg{gWidgetsWWW2} ultimately
call an Ext constructor. The options passed to the Ext constructor
may be added to or overridden by use of this argument. Values are
passed in as named list components and with values converted into JavaScript
objects by \code{asJSObject}.}
}
\value{
an GSpinbutton reference class instance
}
\description{
Basic spinbutton
}
\examples{
w <- gwindow()
sb <- gstatusbar("Powered by gWidgetsWWW and Rook", cont=w)
sp <- gspinbutton(cont=w)
}
| /man/gspinbutton.Rd | no_license | tokareff/gWidgetsWWW2 | R | false | false | 2,192 | rd | % Generated by roxygen2 (4.1.0.9001): do not edit by hand
% Please edit documentation in R/gspinbutton.R
\name{gspinbutton}
\alias{gspinbutton}
\title{Basic spinbutton}
\usage{
gspinbutton(from = 0, to = 100, by = 1, value = from, handler = NULL,
action = NULL, container = NULL, ..., width = NULL, height = NULL,
ext.args = NULL)
}
\arguments{
\item{from}{from value}
\item{to}{to}
\item{by}{by. From to by are same as seq() usage}
\item{value}{initial value}
\item{handler}{optional means (to \code{\link{addHandlerChanged}})
to specify a handler for the default signal. A handler is a
function with signature \code{(h,...)} where \code{h} is a list
with components \code{obj} referring to the object emitting the
signal, \code{action} containing values passed to the
\code{action} argument, and possible other values.}
\item{action}{Passed to handler to parameterize a call}
\item{container}{A parent container. In \pkg{gWidgetsWWW2} a
parent container is not optional (though it can be substituted
with the \code{parent} argument in some circumstances). The parent
specifies the widget heirarchy and the \code{...} argument is used
to pass along arguments to layout the child component in the
parent container. Typically, these are passed to the \code{add}
method of the parent container.}
\item{...}{Used to pass along argument to the parent container's
\code{add} method and possible other arguments to the underlying
ference class constructors.}
\item{width}{width in pixels of component. Sizing in
\pkg{gWidgetsWWW2} is sometimes necessary as the arguments
\code{expand} and \code{fill} are not well implemented.}
\item{height}{height in pixels of the component.}
\item{ext.args}{The contructors of \pkg{gWidgetsWWW2} ultimately
call an Ext constructor. The options passed to the Ext constructor
may be added to or overridden by use of this argument. Values are
passed in as named list components and with values converted into JavaScript
objects by \code{asJSObject}.}
}
\value{
an GSpinbutton reference class instance
}
\description{
Basic spinbutton
}
\examples{
w <- gwindow()
sb <- gstatusbar("Powered by gWidgetsWWW and Rook", cont=w)
sp <- gspinbutton(cont=w)
}
|
#' R graphs
#' List all possible R graphs based on given SIX nodes
#' @param x The vector representing nodes
#' @return A matrix listing edges of R graphs
#' @examples
#' R(c(1:6))
R = function(x){
if(length(x) != 6){
stop('The number of nodes should be SIX!')
}
pair4 = combn(x, 4)
x = sort(x)
R.map = matrix(,, ncol = 6)
for(i in 1: ncol(pair4)){
pair4.temp = pair4[ , i]
rst = x[!x %in% pair4.temp]
c4.mat = c4(pair4.temp)
# connect rest nodes to c4 graph
nodesCnctC4 = matrix(,,ncol = 2)
for(j in 1:length(pair4.temp)){
temp.vec = c(deToIn(rst[1], pair4.temp[j]), deToIn(rst[2], pair4.temp[j]))
nodesCnctC4 = rbind(nodesCnctC4, temp.vec)
}
nodesCnctC4 = nodesCnctC4[-1, ]
nodesCnctC4 = matrix(rep(nodesCnctC4, 3), ncol = 2, byrow = F)
temp.mat = cbind(matrix(rep(c4.mat, each = 4), ncol = 4, byrow = F), nodesCnctC4)
R.map = rbind(R.map, temp.mat)
}
return(R.map[-1, ])
}
| /R/R.R | no_license | placeboo/subgraph | R | false | false | 1,185 | r | #' R graphs
#' List all possible R graphs based on given SIX nodes
#' @param x The vector representing nodes
#' @return A matrix listing edges of R graphs
#' @examples
#' R(c(1:6))
R = function(x){
if(length(x) != 6){
stop('The number of nodes should be SIX!')
}
pair4 = combn(x, 4)
x = sort(x)
R.map = matrix(,, ncol = 6)
for(i in 1: ncol(pair4)){
pair4.temp = pair4[ , i]
rst = x[!x %in% pair4.temp]
c4.mat = c4(pair4.temp)
# connect rest nodes to c4 graph
nodesCnctC4 = matrix(,,ncol = 2)
for(j in 1:length(pair4.temp)){
temp.vec = c(deToIn(rst[1], pair4.temp[j]), deToIn(rst[2], pair4.temp[j]))
nodesCnctC4 = rbind(nodesCnctC4, temp.vec)
}
nodesCnctC4 = nodesCnctC4[-1, ]
nodesCnctC4 = matrix(rep(nodesCnctC4, 3), ncol = 2, byrow = F)
temp.mat = cbind(matrix(rep(c4.mat, each = 4), ncol = 4, byrow = F), nodesCnctC4)
R.map = rbind(R.map, temp.mat)
}
return(R.map[-1, ])
}
|
# datascience at NIT ------------------------------------------------------
# SALES ANALYSIS ----
# 1.0 Load libraries ----
library(tidyverse)
library(lubridate)
# 2.0 Importing Files ----
# A good convention is to use the csv file name and suffix it with tbl for the data structure tibble
order_items_tbl <- read_csv(file = "DS_business_case_01/00_data/01_e-commerce/01_raw_data/olist_order_items_dataset.csv")
products_tbl <- read_csv(file = "DS_business_case_01/00_data/01_e-commerce/01_raw_data/olist_products_dataset.csv")
orders_tbl <- read_csv(file = "DS_business_case_01/00_data/01_e-commerce/01_raw_data/olist_orders_dataset.csv")
# 3.0 Examining Data ----
# Method 1: Print it in the console
order_items_tbl
products_tbl
orders_tbl
# Method 2: Click on the file in the environment tab
# Method 3: glimpse() function
glimpse(order_items_tbl)
# 4.0 Joining Data ----
# by automatically detecting a common column
left_join(order_items_tbl, products_tbl)
# Chaining commands with the pipe and assigning it to order_items_joined_tbl
order_items_joined_tbl <- order_items_tbl %>%
left_join(orders_tbl) %>%
left_join(products_tbl)
# Examine the results with glimpse()
order_items_joined_tbl %>% glimpse()
# 5.0 Wrangling Data ----
#take a look at 1 column
order_items_joined_tbl$product.category.name
# All actions are chained with the pipe already. You can perform each step separately and use glimpse() or View() to validate your code. Store the result in a variable at the end of the steps.
order_items_wrangled_tbl <- order_items_joined_tbl %>%
# 5.1 Separate product category name in main and sub
separate(col = product.category.name,
into = c("main.category.name", "sub.category.name"),
sep = " - ",
# Setting remove to FALSE to keep the original column
remove = FALSE) %>%
# 5.2 Add the total price (price + freight value)
# Add a column to a tibble that uses a formula-style calculation of other columns
mutate(total.price = price + freight.value) %>%
# 5.3 Optional: Reorganize. Using select to grab or remove unnecessary columns
# 5.3.1 by exact column name
select(-shipping.limit.date, -order.approved.at) %>%
# 5.3.2 by a pattern (we don't need columns that start with "product." or end with ".date")
# You can use the select_helpers to define patterns.
# Type ?ends_with and click on Select helpers in the documentation
select(-starts_with("product.")) %>%
select(-ends_with(".date")) %>%
# 5.3.3 Actually we need the column "product.id". Let's bind it back to the data
bind_cols(order_items_joined_tbl %>% select(product.id)) %>%
# 5.3.4 You can reorder the data by selecting the columns in your desired order.
# You can use select_helpers like contains() or everything()
select(contains("timestamp"), contains(".id"),
main.category.name, sub.category.name, price, freight.value, total.price,
everything()) %>%
# 5.4 Rename columns because we actually wanted underscores instead of the dots
# (one at the time vs. multiple at once)
rename(order_date = order.purchase.timestamp) %>%
set_names(names(.) %>% str_replace_all("\\.", "_"))
# 6.0 Business Insights ----
# 6.1 Sales by Year ----
# Step 1 - Manipulate
revenue_by_year_tbl <- order_items_wrangled_tbl %>%
# Select columns
select(order_date, total_price) %>%
# Add year column
mutate(year = year(order_date)) %>%
# Grouping by year and summarizing sales
group_by(year) %>%
summarize(revenue = sum(total_price)) %>%
# Optional: Add a column that turns the numbers into a currency format (makes it in the plot optically more appealing)
mutate(revenue_text = scales::dollar(revenue))
revenue_by_year_tbl
# Step 2 - Visualize
revenue_by_year_tbl %>%
# Setup canvas with the columns year (x-axis) and revenue (y-axis)
ggplot(aes(x = year, y = revenue)) +
# Geometries
geom_col(fill = "#2DC6D6") + # Use geom_col for a bar plot
geom_label(aes(label = revenue_text)) + # Adding labels to the bars
geom_smooth(method = "lm", se = FALSE) + # Adding a trendline
# Formatting
scale_y_continuous(labels = scales::dollar) + # Change the y-axis
labs(
title = "Revenue by year",
subtitle = "Upward Trend",
x = "", # Override defaults for x and y
y = "Revenue"
)
# 6.2 Sales by Year and Category 2 ----
# Step 1 - Manipulate
revenue_by_year_cat_main_tbl <- order_items_wrangled_tbl %>%
# Select columns and add a year
select(order_date, total_price, main_category_name) %>%
mutate(year = year(order_date)) %>%
# Filter > 1.000.000
group_by(main_category_name) %>%
filter(sum(total_price) > 1000000) %>% # If you run the code up here, R will tell you that we have 6 groups
ungroup() %>%
# Group by and summarize year and main catgegory
group_by(year, main_category_name) %>%
summarise(revenue = sum(total_price)) %>%
ungroup() %>%
# Format $ Text
mutate(revenue_text = scales::dollar(revenue))
revenue_by_year_cat_main_tbl
# Step 2 - Visualize
revenue_by_year_cat_main_tbl %>%
# Set up x, y, fill
ggplot(aes(x = year, y = revenue, fill = main_category_name)) +
# Geometries
geom_col() + # Run up to here to get a stacked bar plot
# Facet
facet_wrap(~ main_category_name) +
# Formatting
scale_y_continuous(labels = scales::dollar) +
labs(
title = "Revenue by year and main category",
subtitle = "Each product category has an upward trend",
fill = "Main category" # Changes the legend name
)
# 7.0 Writing Files ----
# If you want to interact with the filesystem use the fs package
install.packages("fs")
library(fs)
fs::dir_create("DS_business_case_01/00_data/01_e-commerce/04_wrangled_data_student")
# 7.1 Excel ----
install.packages("writexl")
library("writexl")
order_items_wrangled_tbl %>%
write_xlsx("DS_business_case_01/00_data/01_e-commerce/04_wrangled_data_student/order_items.xlsx")
# 7.2 CSV ----
order_items_wrangled_tbl %>%
write_csv("DS_business_case_01/00_data/01_e-commerce/04_wrangled_data_student/order_items.csv")
# 7.3 RDS ----
order_items_wrangled_tbl %>%
write_rds("DS_business_case_01/00_data/01_e-commerce/04_wrangled_data_student/order_items.rds")
| /DS_business_case_01/00_scripts/sales_analysis.R | no_license | olvera10/lab_journal_website | R | false | false | 6,279 | r | # datascience at NIT ------------------------------------------------------
# SALES ANALYSIS ----
# 1.0 Load libraries ----
library(tidyverse)
library(lubridate)
# 2.0 Importing Files ----
# A good convention is to use the csv file name and suffix it with tbl for the data structure tibble
order_items_tbl <- read_csv(file = "DS_business_case_01/00_data/01_e-commerce/01_raw_data/olist_order_items_dataset.csv")
products_tbl <- read_csv(file = "DS_business_case_01/00_data/01_e-commerce/01_raw_data/olist_products_dataset.csv")
orders_tbl <- read_csv(file = "DS_business_case_01/00_data/01_e-commerce/01_raw_data/olist_orders_dataset.csv")
# 3.0 Examining Data ----
# Method 1: Print it in the console
order_items_tbl
products_tbl
orders_tbl
# Method 2: Click on the file in the environment tab
# Method 3: glimpse() function
glimpse(order_items_tbl)
# 4.0 Joining Data ----
# by automatically detecting a common column
left_join(order_items_tbl, products_tbl)
# Chaining commands with the pipe and assigning it to order_items_joined_tbl
order_items_joined_tbl <- order_items_tbl %>%
left_join(orders_tbl) %>%
left_join(products_tbl)
# Examine the results with glimpse()
order_items_joined_tbl %>% glimpse()
# 5.0 Wrangling Data ----
#take a look at 1 column
order_items_joined_tbl$product.category.name
# All actions are chained with the pipe already. You can perform each step separately and use glimpse() or View() to validate your code. Store the result in a variable at the end of the steps.
order_items_wrangled_tbl <- order_items_joined_tbl %>%
# 5.1 Separate product category name in main and sub
separate(col = product.category.name,
into = c("main.category.name", "sub.category.name"),
sep = " - ",
# Setting remove to FALSE to keep the original column
remove = FALSE) %>%
# 5.2 Add the total price (price + freight value)
# Add a column to a tibble that uses a formula-style calculation of other columns
mutate(total.price = price + freight.value) %>%
# 5.3 Optional: Reorganize. Using select to grab or remove unnecessary columns
# 5.3.1 by exact column name
select(-shipping.limit.date, -order.approved.at) %>%
# 5.3.2 by a pattern (we don't need columns that start with "product." or end with ".date")
# You can use the select_helpers to define patterns.
# Type ?ends_with and click on Select helpers in the documentation
select(-starts_with("product.")) %>%
select(-ends_with(".date")) %>%
# 5.3.3 Actually we need the column "product.id". Let's bind it back to the data
bind_cols(order_items_joined_tbl %>% select(product.id)) %>%
# 5.3.4 You can reorder the data by selecting the columns in your desired order.
# You can use select_helpers like contains() or everything()
select(contains("timestamp"), contains(".id"),
main.category.name, sub.category.name, price, freight.value, total.price,
everything()) %>%
# 5.4 Rename columns because we actually wanted underscores instead of the dots
# (one at the time vs. multiple at once)
rename(order_date = order.purchase.timestamp) %>%
set_names(names(.) %>% str_replace_all("\\.", "_"))
# 6.0 Business Insights ----
# 6.1 Sales by Year ----
# Step 1 - Manipulate
revenue_by_year_tbl <- order_items_wrangled_tbl %>%
# Select columns
select(order_date, total_price) %>%
# Add year column
mutate(year = year(order_date)) %>%
# Grouping by year and summarizing sales
group_by(year) %>%
summarize(revenue = sum(total_price)) %>%
# Optional: Add a column that turns the numbers into a currency format (makes it in the plot optically more appealing)
mutate(revenue_text = scales::dollar(revenue))
revenue_by_year_tbl
# Step 2 - Visualize
revenue_by_year_tbl %>%
# Setup canvas with the columns year (x-axis) and revenue (y-axis)
ggplot(aes(x = year, y = revenue)) +
# Geometries
geom_col(fill = "#2DC6D6") + # Use geom_col for a bar plot
geom_label(aes(label = revenue_text)) + # Adding labels to the bars
geom_smooth(method = "lm", se = FALSE) + # Adding a trendline
# Formatting
scale_y_continuous(labels = scales::dollar) + # Change the y-axis
labs(
title = "Revenue by year",
subtitle = "Upward Trend",
x = "", # Override defaults for x and y
y = "Revenue"
)
# 6.2 Sales by Year and Category 2 ----
# Step 1 - Manipulate
revenue_by_year_cat_main_tbl <- order_items_wrangled_tbl %>%
# Select columns and add a year
select(order_date, total_price, main_category_name) %>%
mutate(year = year(order_date)) %>%
# Filter > 1.000.000
group_by(main_category_name) %>%
filter(sum(total_price) > 1000000) %>% # If you run the code up here, R will tell you that we have 6 groups
ungroup() %>%
# Group by and summarize year and main catgegory
group_by(year, main_category_name) %>%
summarise(revenue = sum(total_price)) %>%
ungroup() %>%
# Format $ Text
mutate(revenue_text = scales::dollar(revenue))
revenue_by_year_cat_main_tbl
# Step 2 - Visualize
revenue_by_year_cat_main_tbl %>%
# Set up x, y, fill
ggplot(aes(x = year, y = revenue, fill = main_category_name)) +
# Geometries
geom_col() + # Run up to here to get a stacked bar plot
# Facet
facet_wrap(~ main_category_name) +
# Formatting
scale_y_continuous(labels = scales::dollar) +
labs(
title = "Revenue by year and main category",
subtitle = "Each product category has an upward trend",
fill = "Main category" # Changes the legend name
)
# 7.0 Writing Files ----
# If you want to interact with the filesystem use the fs package
install.packages("fs")
library(fs)
fs::dir_create("DS_business_case_01/00_data/01_e-commerce/04_wrangled_data_student")
# 7.1 Excel ----
install.packages("writexl")
library("writexl")
order_items_wrangled_tbl %>%
write_xlsx("DS_business_case_01/00_data/01_e-commerce/04_wrangled_data_student/order_items.xlsx")
# 7.2 CSV ----
order_items_wrangled_tbl %>%
write_csv("DS_business_case_01/00_data/01_e-commerce/04_wrangled_data_student/order_items.csv")
# 7.3 RDS ----
order_items_wrangled_tbl %>%
write_rds("DS_business_case_01/00_data/01_e-commerce/04_wrangled_data_student/order_items.rds")
|
# This file is generated by make.paws. Please do not edit here.
#' @importFrom paws.common populate
#' @include eventbridge_service.R
NULL
.eventbridge$activate_event_source_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$activate_event_source_output <- function(...) {
list()
}
.eventbridge$create_event_bus_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string")), EventSourceName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$create_event_bus_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(EventBusArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$create_partner_event_source_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string")), Account = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$create_partner_event_source_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(EventSourceArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$deactivate_event_source_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$deactivate_event_source_output <- function(...) {
list()
}
.eventbridge$delete_event_bus_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$delete_event_bus_output <- function(...) {
list()
}
.eventbridge$delete_partner_event_source_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string")), Account = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$delete_partner_event_source_output <- function(...) {
list()
}
.eventbridge$delete_rule_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string")), EventBusName = structure(logical(0), tags = list(type = "string")), Force = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$delete_rule_output <- function(...) {
list()
}
.eventbridge$describe_event_bus_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$describe_event_bus_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string")), Arn = structure(logical(0), tags = list(type = "string")), Policy = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$describe_event_source_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$describe_event_source_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Arn = structure(logical(0), tags = list(type = "string")), CreatedBy = structure(logical(0), tags = list(type = "string")), CreationTime = structure(logical(0), tags = list(type = "timestamp")), ExpirationTime = structure(logical(0), tags = list(type = "timestamp")), Name = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$describe_partner_event_source_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$describe_partner_event_source_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Arn = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$describe_rule_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string")), EventBusName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$describe_rule_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string")), Arn = structure(logical(0), tags = list(type = "string")), EventPattern = structure(logical(0), tags = list(type = "string")), ScheduleExpression = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), RoleArn = structure(logical(0), tags = list(type = "string")), ManagedBy = structure(logical(0), tags = list(type = "string")), EventBusName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$disable_rule_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string")), EventBusName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$disable_rule_output <- function(...) {
list()
}
.eventbridge$enable_rule_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string")), EventBusName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$enable_rule_output <- function(...) {
list()
}
.eventbridge$list_event_buses_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(NamePrefix = structure(logical(0), tags = list(type = "string")), NextToken = structure(logical(0), tags = list(type = "string")), Limit = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$list_event_buses_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(EventBuses = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Arn = structure(logical(0), tags = list(type = "string")), Policy = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$list_event_sources_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(NamePrefix = structure(logical(0), tags = list(type = "string")), NextToken = structure(logical(0), tags = list(type = "string")), Limit = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$list_event_sources_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(EventSources = structure(list(structure(list(Arn = structure(logical(0), tags = list(type = "string")), CreatedBy = structure(logical(0), tags = list(type = "string")), CreationTime = structure(logical(0), tags = list(type = "timestamp")), ExpirationTime = structure(logical(0), tags = list(type = "timestamp")), Name = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$list_partner_event_source_accounts_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(EventSourceName = structure(logical(0), tags = list(type = "string")), NextToken = structure(logical(0), tags = list(type = "string")), Limit = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$list_partner_event_source_accounts_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(PartnerEventSourceAccounts = structure(list(structure(list(Account = structure(logical(0), tags = list(type = "string")), CreationTime = structure(logical(0), tags = list(type = "timestamp")), ExpirationTime = structure(logical(0), tags = list(type = "timestamp")), State = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$list_partner_event_sources_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(NamePrefix = structure(logical(0), tags = list(type = "string")), NextToken = structure(logical(0), tags = list(type = "string")), Limit = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$list_partner_event_sources_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(PartnerEventSources = structure(list(structure(list(Arn = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$list_rule_names_by_target_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(TargetArn = structure(logical(0), tags = list(type = "string")), EventBusName = structure(logical(0), tags = list(type = "string")), NextToken = structure(logical(0), tags = list(type = "string")), Limit = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$list_rule_names_by_target_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(RuleNames = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$list_rules_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(NamePrefix = structure(logical(0), tags = list(type = "string")), EventBusName = structure(logical(0), tags = list(type = "string")), NextToken = structure(logical(0), tags = list(type = "string")), Limit = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$list_rules_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Rules = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Arn = structure(logical(0), tags = list(type = "string")), EventPattern = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), ScheduleExpression = structure(logical(0), tags = list(type = "string")), RoleArn = structure(logical(0), tags = list(type = "string")), ManagedBy = structure(logical(0), tags = list(type = "string")), EventBusName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$list_tags_for_resource_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ResourceARN = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$list_tags_for_resource_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Tags = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$list_targets_by_rule_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Rule = structure(logical(0), tags = list(type = "string")), EventBusName = structure(logical(0), tags = list(type = "string")), NextToken = structure(logical(0), tags = list(type = "string")), Limit = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$list_targets_by_rule_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Targets = structure(list(structure(list(Id = structure(logical(0), tags = list(type = "string")), Arn = structure(logical(0), tags = list(type = "string")), RoleArn = structure(logical(0), tags = list(type = "string")), Input = structure(logical(0), tags = list(type = "string")), InputPath = structure(logical(0), tags = list(type = "string")), InputTransformer = structure(list(InputPathsMap = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), InputTemplate = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), KinesisParameters = structure(list(PartitionKeyPath = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), RunCommandParameters = structure(list(RunCommandTargets = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Values = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure")), EcsParameters = structure(list(TaskDefinitionArn = structure(logical(0), tags = list(type = "string")), TaskCount = structure(logical(0), tags = list(type = "integer")), LaunchType = structure(logical(0), tags = list(type = "string")), NetworkConfiguration = structure(list(awsvpcConfiguration = structure(list(Subnets = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SecurityGroups = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), AssignPublicIp = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), PlatformVersion = structure(logical(0), tags = list(type = "string")), Group = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), BatchParameters = structure(list(JobDefinition = structure(logical(0), tags = list(type = "string")), JobName = structure(logical(0), tags = list(type = "string")), ArrayProperties = structure(list(Size = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), RetryStrategy = structure(list(Attempts = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))), tags = list(type = "structure")), SqsParameters = structure(list(MessageGroupId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$put_events_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Entries = structure(list(structure(list(Time = structure(logical(0), tags = list(type = "timestamp")), Source = structure(logical(0), tags = list(type = "string")), Resources = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), DetailType = structure(logical(0), tags = list(type = "string")), Detail = structure(logical(0), tags = list(type = "string")), EventBusName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$put_events_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(FailedEntryCount = structure(logical(0), tags = list(type = "integer")), Entries = structure(list(structure(list(EventId = structure(logical(0), tags = list(type = "string")), ErrorCode = structure(logical(0), tags = list(type = "string")), ErrorMessage = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$put_partner_events_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Entries = structure(list(structure(list(Time = structure(logical(0), tags = list(type = "timestamp")), Source = structure(logical(0), tags = list(type = "string")), Resources = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), DetailType = structure(logical(0), tags = list(type = "string")), Detail = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$put_partner_events_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(FailedEntryCount = structure(logical(0), tags = list(type = "integer")), Entries = structure(list(structure(list(EventId = structure(logical(0), tags = list(type = "string")), ErrorCode = structure(logical(0), tags = list(type = "string")), ErrorMessage = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$put_permission_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(EventBusName = structure(logical(0), tags = list(type = "string")), Action = structure(logical(0), tags = list(type = "string")), Principal = structure(logical(0), tags = list(type = "string")), StatementId = structure(logical(0), tags = list(type = "string")), Condition = structure(list(Type = structure(logical(0), tags = list(type = "string")), Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$put_permission_output <- function(...) {
list()
}
.eventbridge$put_rule_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string")), ScheduleExpression = structure(logical(0), tags = list(type = "string")), EventPattern = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), RoleArn = structure(logical(0), tags = list(type = "string")), Tags = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), EventBusName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$put_rule_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(RuleArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$put_targets_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Rule = structure(logical(0), tags = list(type = "string")), EventBusName = structure(logical(0), tags = list(type = "string")), Targets = structure(list(structure(list(Id = structure(logical(0), tags = list(type = "string")), Arn = structure(logical(0), tags = list(type = "string")), RoleArn = structure(logical(0), tags = list(type = "string")), Input = structure(logical(0), tags = list(type = "string")), InputPath = structure(logical(0), tags = list(type = "string")), InputTransformer = structure(list(InputPathsMap = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), InputTemplate = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), KinesisParameters = structure(list(PartitionKeyPath = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), RunCommandParameters = structure(list(RunCommandTargets = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Values = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure")), EcsParameters = structure(list(TaskDefinitionArn = structure(logical(0), tags = list(type = "string")), TaskCount = structure(logical(0), tags = list(type = "integer")), LaunchType = structure(logical(0), tags = list(type = "string")), NetworkConfiguration = structure(list(awsvpcConfiguration = structure(list(Subnets = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SecurityGroups = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), AssignPublicIp = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), PlatformVersion = structure(logical(0), tags = list(type = "string")), Group = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), BatchParameters = structure(list(JobDefinition = structure(logical(0), tags = list(type = "string")), JobName = structure(logical(0), tags = list(type = "string")), ArrayProperties = structure(list(Size = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), RetryStrategy = structure(list(Attempts = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))), tags = list(type = "structure")), SqsParameters = structure(list(MessageGroupId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$put_targets_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(FailedEntryCount = structure(logical(0), tags = list(type = "integer")), FailedEntries = structure(list(structure(list(TargetId = structure(logical(0), tags = list(type = "string")), ErrorCode = structure(logical(0), tags = list(type = "string")), ErrorMessage = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$remove_permission_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(StatementId = structure(logical(0), tags = list(type = "string")), EventBusName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$remove_permission_output <- function(...) {
list()
}
.eventbridge$remove_targets_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Rule = structure(logical(0), tags = list(type = "string")), EventBusName = structure(logical(0), tags = list(type = "string")), Ids = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), Force = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$remove_targets_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(FailedEntryCount = structure(logical(0), tags = list(type = "integer")), FailedEntries = structure(list(structure(list(TargetId = structure(logical(0), tags = list(type = "string")), ErrorCode = structure(logical(0), tags = list(type = "string")), ErrorMessage = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$tag_resource_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ResourceARN = structure(logical(0), tags = list(type = "string")), Tags = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$tag_resource_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$test_event_pattern_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(EventPattern = structure(logical(0), tags = list(type = "string")), Event = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$test_event_pattern_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Result = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$untag_resource_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ResourceARN = structure(logical(0), tags = list(type = "string")), TagKeys = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$untag_resource_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
| /cran/paws.application.integration/R/eventbridge_interfaces.R | permissive | johnnytommy/paws | R | false | false | 28,730 | r | # This file is generated by make.paws. Please do not edit here.
#' @importFrom paws.common populate
#' @include eventbridge_service.R
NULL
.eventbridge$activate_event_source_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$activate_event_source_output <- function(...) {
list()
}
.eventbridge$create_event_bus_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string")), EventSourceName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$create_event_bus_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(EventBusArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$create_partner_event_source_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string")), Account = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$create_partner_event_source_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(EventSourceArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$deactivate_event_source_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$deactivate_event_source_output <- function(...) {
list()
}
.eventbridge$delete_event_bus_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$delete_event_bus_output <- function(...) {
list()
}
.eventbridge$delete_partner_event_source_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string")), Account = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$delete_partner_event_source_output <- function(...) {
list()
}
.eventbridge$delete_rule_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string")), EventBusName = structure(logical(0), tags = list(type = "string")), Force = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$delete_rule_output <- function(...) {
list()
}
.eventbridge$describe_event_bus_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$describe_event_bus_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string")), Arn = structure(logical(0), tags = list(type = "string")), Policy = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$describe_event_source_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$describe_event_source_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Arn = structure(logical(0), tags = list(type = "string")), CreatedBy = structure(logical(0), tags = list(type = "string")), CreationTime = structure(logical(0), tags = list(type = "timestamp")), ExpirationTime = structure(logical(0), tags = list(type = "timestamp")), Name = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$describe_partner_event_source_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$describe_partner_event_source_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Arn = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$describe_rule_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string")), EventBusName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$describe_rule_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string")), Arn = structure(logical(0), tags = list(type = "string")), EventPattern = structure(logical(0), tags = list(type = "string")), ScheduleExpression = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), RoleArn = structure(logical(0), tags = list(type = "string")), ManagedBy = structure(logical(0), tags = list(type = "string")), EventBusName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$disable_rule_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string")), EventBusName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$disable_rule_output <- function(...) {
list()
}
.eventbridge$enable_rule_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string")), EventBusName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$enable_rule_output <- function(...) {
list()
}
.eventbridge$list_event_buses_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(NamePrefix = structure(logical(0), tags = list(type = "string")), NextToken = structure(logical(0), tags = list(type = "string")), Limit = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$list_event_buses_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(EventBuses = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Arn = structure(logical(0), tags = list(type = "string")), Policy = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$list_event_sources_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(NamePrefix = structure(logical(0), tags = list(type = "string")), NextToken = structure(logical(0), tags = list(type = "string")), Limit = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$list_event_sources_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(EventSources = structure(list(structure(list(Arn = structure(logical(0), tags = list(type = "string")), CreatedBy = structure(logical(0), tags = list(type = "string")), CreationTime = structure(logical(0), tags = list(type = "timestamp")), ExpirationTime = structure(logical(0), tags = list(type = "timestamp")), Name = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$list_partner_event_source_accounts_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(EventSourceName = structure(logical(0), tags = list(type = "string")), NextToken = structure(logical(0), tags = list(type = "string")), Limit = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$list_partner_event_source_accounts_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(PartnerEventSourceAccounts = structure(list(structure(list(Account = structure(logical(0), tags = list(type = "string")), CreationTime = structure(logical(0), tags = list(type = "timestamp")), ExpirationTime = structure(logical(0), tags = list(type = "timestamp")), State = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$list_partner_event_sources_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(NamePrefix = structure(logical(0), tags = list(type = "string")), NextToken = structure(logical(0), tags = list(type = "string")), Limit = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$list_partner_event_sources_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(PartnerEventSources = structure(list(structure(list(Arn = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$list_rule_names_by_target_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(TargetArn = structure(logical(0), tags = list(type = "string")), EventBusName = structure(logical(0), tags = list(type = "string")), NextToken = structure(logical(0), tags = list(type = "string")), Limit = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$list_rule_names_by_target_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(RuleNames = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$list_rules_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(NamePrefix = structure(logical(0), tags = list(type = "string")), EventBusName = structure(logical(0), tags = list(type = "string")), NextToken = structure(logical(0), tags = list(type = "string")), Limit = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$list_rules_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Rules = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Arn = structure(logical(0), tags = list(type = "string")), EventPattern = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), ScheduleExpression = structure(logical(0), tags = list(type = "string")), RoleArn = structure(logical(0), tags = list(type = "string")), ManagedBy = structure(logical(0), tags = list(type = "string")), EventBusName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$list_tags_for_resource_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ResourceARN = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$list_tags_for_resource_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Tags = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$list_targets_by_rule_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Rule = structure(logical(0), tags = list(type = "string")), EventBusName = structure(logical(0), tags = list(type = "string")), NextToken = structure(logical(0), tags = list(type = "string")), Limit = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$list_targets_by_rule_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Targets = structure(list(structure(list(Id = structure(logical(0), tags = list(type = "string")), Arn = structure(logical(0), tags = list(type = "string")), RoleArn = structure(logical(0), tags = list(type = "string")), Input = structure(logical(0), tags = list(type = "string")), InputPath = structure(logical(0), tags = list(type = "string")), InputTransformer = structure(list(InputPathsMap = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), InputTemplate = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), KinesisParameters = structure(list(PartitionKeyPath = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), RunCommandParameters = structure(list(RunCommandTargets = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Values = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure")), EcsParameters = structure(list(TaskDefinitionArn = structure(logical(0), tags = list(type = "string")), TaskCount = structure(logical(0), tags = list(type = "integer")), LaunchType = structure(logical(0), tags = list(type = "string")), NetworkConfiguration = structure(list(awsvpcConfiguration = structure(list(Subnets = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SecurityGroups = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), AssignPublicIp = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), PlatformVersion = structure(logical(0), tags = list(type = "string")), Group = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), BatchParameters = structure(list(JobDefinition = structure(logical(0), tags = list(type = "string")), JobName = structure(logical(0), tags = list(type = "string")), ArrayProperties = structure(list(Size = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), RetryStrategy = structure(list(Attempts = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))), tags = list(type = "structure")), SqsParameters = structure(list(MessageGroupId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$put_events_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Entries = structure(list(structure(list(Time = structure(logical(0), tags = list(type = "timestamp")), Source = structure(logical(0), tags = list(type = "string")), Resources = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), DetailType = structure(logical(0), tags = list(type = "string")), Detail = structure(logical(0), tags = list(type = "string")), EventBusName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$put_events_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(FailedEntryCount = structure(logical(0), tags = list(type = "integer")), Entries = structure(list(structure(list(EventId = structure(logical(0), tags = list(type = "string")), ErrorCode = structure(logical(0), tags = list(type = "string")), ErrorMessage = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$put_partner_events_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Entries = structure(list(structure(list(Time = structure(logical(0), tags = list(type = "timestamp")), Source = structure(logical(0), tags = list(type = "string")), Resources = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), DetailType = structure(logical(0), tags = list(type = "string")), Detail = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$put_partner_events_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(FailedEntryCount = structure(logical(0), tags = list(type = "integer")), Entries = structure(list(structure(list(EventId = structure(logical(0), tags = list(type = "string")), ErrorCode = structure(logical(0), tags = list(type = "string")), ErrorMessage = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$put_permission_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(EventBusName = structure(logical(0), tags = list(type = "string")), Action = structure(logical(0), tags = list(type = "string")), Principal = structure(logical(0), tags = list(type = "string")), StatementId = structure(logical(0), tags = list(type = "string")), Condition = structure(list(Type = structure(logical(0), tags = list(type = "string")), Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$put_permission_output <- function(...) {
list()
}
.eventbridge$put_rule_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string")), ScheduleExpression = structure(logical(0), tags = list(type = "string")), EventPattern = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), RoleArn = structure(logical(0), tags = list(type = "string")), Tags = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), EventBusName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$put_rule_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(RuleArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$put_targets_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Rule = structure(logical(0), tags = list(type = "string")), EventBusName = structure(logical(0), tags = list(type = "string")), Targets = structure(list(structure(list(Id = structure(logical(0), tags = list(type = "string")), Arn = structure(logical(0), tags = list(type = "string")), RoleArn = structure(logical(0), tags = list(type = "string")), Input = structure(logical(0), tags = list(type = "string")), InputPath = structure(logical(0), tags = list(type = "string")), InputTransformer = structure(list(InputPathsMap = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), InputTemplate = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), KinesisParameters = structure(list(PartitionKeyPath = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), RunCommandParameters = structure(list(RunCommandTargets = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Values = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure")), EcsParameters = structure(list(TaskDefinitionArn = structure(logical(0), tags = list(type = "string")), TaskCount = structure(logical(0), tags = list(type = "integer")), LaunchType = structure(logical(0), tags = list(type = "string")), NetworkConfiguration = structure(list(awsvpcConfiguration = structure(list(Subnets = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SecurityGroups = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), AssignPublicIp = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), PlatformVersion = structure(logical(0), tags = list(type = "string")), Group = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), BatchParameters = structure(list(JobDefinition = structure(logical(0), tags = list(type = "string")), JobName = structure(logical(0), tags = list(type = "string")), ArrayProperties = structure(list(Size = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), RetryStrategy = structure(list(Attempts = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))), tags = list(type = "structure")), SqsParameters = structure(list(MessageGroupId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$put_targets_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(FailedEntryCount = structure(logical(0), tags = list(type = "integer")), FailedEntries = structure(list(structure(list(TargetId = structure(logical(0), tags = list(type = "string")), ErrorCode = structure(logical(0), tags = list(type = "string")), ErrorMessage = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$remove_permission_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(StatementId = structure(logical(0), tags = list(type = "string")), EventBusName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$remove_permission_output <- function(...) {
list()
}
.eventbridge$remove_targets_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Rule = structure(logical(0), tags = list(type = "string")), EventBusName = structure(logical(0), tags = list(type = "string")), Ids = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), Force = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$remove_targets_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(FailedEntryCount = structure(logical(0), tags = list(type = "integer")), FailedEntries = structure(list(structure(list(TargetId = structure(logical(0), tags = list(type = "string")), ErrorCode = structure(logical(0), tags = list(type = "string")), ErrorMessage = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$tag_resource_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ResourceARN = structure(logical(0), tags = list(type = "string")), Tags = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$tag_resource_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$test_event_pattern_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(EventPattern = structure(logical(0), tags = list(type = "string")), Event = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$test_event_pattern_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Result = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$untag_resource_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ResourceARN = structure(logical(0), tags = list(type = "string")), TagKeys = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.eventbridge$untag_resource_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
|
\name{Heatmap}
\alias{Heatmap}
\title{
Constructor method for Heatmap class
}
\description{
Constructor method for Heatmap class
}
\usage{
Heatmap(matrix, col, name,
na_col = "grey",
color_space = "LAB",
rect_gp = gpar(col = NA),
cell_fun = function(j, i, x, y, width, height, fill) NULL,
row_title = character(0),
row_title_side = c("left", "right"),
row_title_gp = gpar(fontsize = 14),
row_title_rot = switch(row_title_side[1], "left" = 90, "right" = 270),
column_title = character(0),
column_title_side = c("top", "bottom"),
column_title_gp = gpar(fontsize = 14),
column_title_rot = 0,
cluster_rows = TRUE,
clustering_distance_rows = "euclidean",
clustering_method_rows = "complete",
row_dend_side = c("left", "right"),
row_dend_width = unit(10, "mm"),
show_row_dend = TRUE,
row_dend_reorder = TRUE,
row_dend_gp = gpar(),
row_hclust_side = row_dend_side,
row_hclust_width = row_dend_width,
show_row_hclust = show_row_dend,
row_hclust_reorder = row_dend_reorder,
row_hclust_gp = row_dend_gp,
cluster_columns = TRUE,
clustering_distance_columns = "euclidean",
clustering_method_columns = "complete",
column_dend_side = c("top", "bottom"),
column_dend_height = unit(10, "mm"),
show_column_dend = TRUE,
column_dend_gp = gpar(),
column_dend_reorder = TRUE,
column_hclust_side = column_dend_side,
column_hclust_height = column_dend_height,
show_column_hclust = show_column_dend,
column_hclust_gp = column_dend_gp,
column_hclust_reorder = column_dend_reorder,
row_order = NULL,
column_order = NULL,
row_names_side = c("right", "left"),
show_row_names = TRUE,
row_names_max_width = unit(4, "cm"),
row_names_gp = gpar(fontsize = 12),
column_names_side = c("bottom", "top"),
show_column_names = TRUE,
column_names_max_height = unit(4, "cm"),
column_names_gp = gpar(fontsize = 12),
top_annotation = new("HeatmapAnnotation"),
top_annotation_height = top_annotation@size,
bottom_annotation = new("HeatmapAnnotation"),
bottom_annotation_height = bottom_annotation@size,
km = 1,
split = NULL,
gap = unit(1, "mm"),
combined_name_fun = function(x) paste(x, collapse = "/"),
width = NULL,
show_heatmap_legend = TRUE,
heatmap_legend_param = list(title = name, color_bar = "discrete"))
}
\arguments{
\item{matrix}{a matrix. Either numeric or character. If it is a simple vector, it will be converted to a one-column matrix.}
\item{col}{a vector of colors if the color mapping is discrete or a color mapping function if the matrix is continuous numbers (should be generated by \code{\link[circlize]{colorRamp2}}. If the matrix is continuous, the value can also be a vector of colors so that colors will be interpolated. Pass to \code{\link{ColorMapping}}.}
\item{name}{name of the heatmap. The name is used as the title of the heatmap legend.}
\item{na_col}{color for \code{NA} values.}
\item{rect_gp}{graphic parameters for drawing rectangles (for heatmap body).}
\item{color_space}{the color space in which colors are interpolated. Only used if \code{matrix} is numeric and \code{col} is a vector of colors. Pass to \code{\link[circlize]{colorRamp2}}.}
\item{cell_fun}{self-defined function to add graphics on each cell. Seven parameters will be passed into this function: \code{i}, \code{j}, \code{x}, \code{y}, \code{width}, \code{height}, \code{fill} which are row index, column index in \code{matrix}, coordinate of the middle points in the heatmap body viewport, the width and height of the cell and the filled color. \code{x}, \code{y}, \code{width} and \code{height} are all \code{\link[grid]{unit}} objects.}
\item{row_title}{title on row.}
\item{row_title_side}{will the title be put on the left or right of the heatmap?}
\item{row_title_gp}{graphic parameters for drawing text.}
\item{row_title_rot}{rotation of row titles. Only 0, 90, 270 are allowed to set.}
\item{column_title}{title on column.}
\item{column_title_side}{will the title be put on the top or bottom of the heatmap?}
\item{column_title_gp}{graphic parameters for drawing text.}
\item{column_title_rot}{rotation of column titles. Only 0, 90, 270 are allowed to set.}
\item{cluster_rows}{If the value is a logical, it means whether make cluster on rows. The value can also be a \code{\link[stats]{hclust}} or a \code{\link[stats]{dendrogram}} that already contains clustering information. This means you can use any type of clustering methods and render the \code{\link[stats]{dendrogram}} object with self-defined graphic settings.}
\item{clustering_distance_rows}{it can be a pre-defined character which is in ("euclidean", "maximum", "manhattan", "canberra", "binary", "minkowski", "pearson", "spearman", "kendall"). It can also be a function. If the function has one argument, the input argument should be a matrix and the returned value should be a \code{\link[stats]{dist}} object. If the function has two arguments, the input arguments are two vectors and the function calculates distance between these two vectors.}
\item{clustering_method_rows}{method to make cluster, pass to \code{\link[stats]{hclust}}.}
\item{row_dend_side}{should the row cluster be put on the left or right of the heatmap?}
\item{row_dend_width}{width of the row cluster, should be a \code{\link[grid]{unit}} object.}
\item{show_row_dend}{whether show row clusters. }
\item{row_dend_gp}{graphics parameters for drawing lines. If users already provide a \code{\link[stats]{dendrogram}} object with edges rendered, this argument will be ignored.}
\item{row_dend_reorder}{apply reordering on rows. The value can be a logical value or a vector which contains weight which is used to reorder rows}
\item{row_hclust_side}{deprecated, use \code{row_dend_side} instead}
\item{row_hclust_width}{deprecated, use \code{row_dend_width} instead}
\item{show_row_hclust}{deprecated, use \code{show_row_dend} instead}
\item{row_hclust_gp}{deprecated, use \code{row_dend_gp} instead}
\item{row_hclust_reorder}{deprecated, use \code{row_dend_reorder} instead}
\item{cluster_columns}{whether make cluster on columns. Same settings as \code{cluster_rows}.}
\item{clustering_distance_columns}{same setting as \code{clustering_distance_rows}.}
\item{clustering_method_columns}{method to make cluster, pass to \code{\link[stats]{hclust}}.}
\item{column_dend_side}{should the column cluster be put on the top or bottom of the heatmap?}
\item{column_dend_height}{height of the column cluster, should be a \code{\link[grid]{unit}} object.}
\item{show_column_dend}{whether show column clusters.}
\item{column_dend_gp}{graphic parameters for drawling lines. Same settings as \code{row_dend_gp}.}
\item{column_dend_reorder}{apply reordering on columns. The value can be a logical value or a vector which contains weight which is used to reorder columns}
\item{column_hclust_side}{deprecated, use \code{column_dend_side} instead}
\item{column_hclust_height}{deprecated, use \code{column_dend_height} instead}
\item{show_column_hclust}{deprecated, use \code{show_column_dend} instead}
\item{column_hclust_gp}{deprecated, use \code{column_dend_gp} instead}
\item{column_hclust_reorder}{deprecated, use \code{column_dend_reorder} instead}
\item{row_order}{order of rows. It makes it easy to adjust row order for a list of heatmaps if this heatmap is selected as the main heatmap. Manually setting row order should turn off clustering}
\item{column_order}{order of column. It makes it easy to adjust column order for both matrix and column annotations.}
\item{row_names_side}{should the row names be put on the left or right of the heatmap?}
\item{show_row_names}{whether show row names.}
\item{row_names_max_width}{maximum width of row names viewport. Because some times row names can be very long, it is not reasonable to show them all.}
\item{row_names_gp}{graphic parameters for drawing text.}
\item{column_names_side}{should the column names be put on the top or bottom of the heatmap?}
\item{column_names_max_height}{maximum height of column names viewport.}
\item{show_column_names}{whether show column names.}
\item{column_names_gp}{graphic parameters for drawing text.}
\item{top_annotation}{a \code{\link{HeatmapAnnotation}} object which contains a list of annotations.}
\item{top_annotation_height}{total height of the column annotations on the top.}
\item{bottom_annotation}{a \code{\link{HeatmapAnnotation}} object.}
\item{bottom_annotation_height}{total height of the column annotations on the bottom.}
\item{km}{do k-means clustering on rows. If the value is larger than 1, the heatmap will be split by rows according to the k-means clustering. For each row-clusters, hierarchical clustering is still applied with parameters above.}
\item{split}{a vector or a data frame by which the rows are split. But if \code{cluster_rows} is a clustering object, \code{split} can be a single number indicating rows are to be split according to the split on the tree.}
\item{gap}{gap between row-slices if the heatmap is split by rows, should be \code{\link[grid]{unit}} object.}
\item{combined_name_fun}{if the heatmap is split by rows, how to make a combined row title for each slice? The input parameter for this function is a vector which contains level names under each column in \code{split}.}
\item{width}{the width of the single heatmap, should be a fixed \code{\link[grid]{unit}} object. It is used for the layout when the heatmap is appended to a list of heatmaps.}
\item{show_heatmap_legend}{whether show heatmap legend?}
\item{heatmap_legend_param}{a list contains parameters for the heatmap legend. See \code{\link{color_mapping_legend,ColorMapping-method}} for all available parameters.}
}
\details{
The initialization function only applies parameter checking and fill values to each slot with proper ones.
Then it will be ready for clustering and layout.
Following methods can be applied on the \code{\link{Heatmap-class}} object:
\itemize{
\item \code{\link{show,Heatmap-method}}: draw a single heatmap with default parameters
\item \code{\link{draw,Heatmap-method}}: draw a single heatmap.
\item \code{\link{add_heatmap,Heatmap-method}} append heatmaps and row annotations to a list of heatmaps.
}
The constructor function pretends to be a high-level graphic function because the \code{show} method
of the \code{\link{Heatmap-class}} object actually plots the graphics.
}
\value{
A \code{\link{Heatmap-class}} object.
}
\author{
Zuguang Gu <z.gu@dkfz.de>
}
\examples{
mat = matrix(rnorm(80, 2), 8, 10)
mat = rbind(mat, matrix(rnorm(40, -2), 4, 10))
rownames(mat) = letters[1:12]
colnames(mat) = letters[1:10]
require(circlize)
Heatmap(mat)
Heatmap(mat, col = colorRamp2(c(-3, 0, 3), c("green", "white", "red")))
Heatmap(mat, name = "test")
Heatmap(mat, column_title = "blablabla")
Heatmap(mat, row_title = "blablabla")
Heatmap(mat, column_title = "blablabla", column_title_side = "bottom")
Heatmap(mat, column_title = "blablabla", column_title_gp = gpar(fontsize = 20,
fontface = "bold"))
Heatmap(mat, cluster_rows = FALSE)
Heatmap(mat, clustering_distance_rows = "pearson")
Heatmap(mat, clustering_distance_rows = function(x) dist(x))
Heatmap(mat, clustering_distance_rows = function(x, y) 1 - cor(x, y))
Heatmap(mat, clustering_method_rows = "single")
Heatmap(mat, row_dend_side = "right")
Heatmap(mat, row_dend_width = unit(1, "cm"))
Heatmap(mat, row_names_side = "left", row_dend_side = "right",
column_names_side = "top", column_dend_side = "bottom")
Heatmap(mat, show_row_names = FALSE)
mat2 = mat
rownames(mat2) = NULL
colnames(mat2) = NULL
Heatmap(mat2)
Heatmap(mat, row_names_gp = gpar(fontsize = 20))
Heatmap(mat, km = 2)
Heatmap(mat, split = rep(c("A", "B"), 6))
Heatmap(mat, split = data.frame(rep(c("A", "B"), 6), rep(c("C", "D"), each = 6)))
Heatmap(mat, split = data.frame(rep(c("A", "B"), 6), rep(c("C", "D"), each = 6)),
combined_name_fun = function(x) paste(x, collapse = "\n"))
annotation = HeatmapAnnotation(df = data.frame(type = c(rep("A", 6), rep("B", 6))))
Heatmap(mat, top_annotation = annotation)
annotation = HeatmapAnnotation(df = data.frame(type1 = rep(c("A", "B"), 6),
type2 = rep(c("C", "D"), each = 6)))
Heatmap(mat, bottom_annotation = annotation)
annotation = data.frame(value = rnorm(10))
annotation = HeatmapAnnotation(df = annotation)
Heatmap(mat, top_annotation = annotation)
annotation = data.frame(value = rnorm(10))
value = 1:10
ha = HeatmapAnnotation(df = annotation, points = anno_points(value),
annotation_height = c(1, 2))
Heatmap(mat, top_annotation = ha, top_annotation_height = unit(2, "cm"),
bottom_annotation = ha)
# character matrix
mat3 = matrix(sample(letters[1:6], 100, replace = TRUE), 10, 10)
rownames(mat3) = {x = letters[1:10]; x[1] = "aaaaaaaaaaaaaaaaaaaaaaa";x}
Heatmap(mat3, rect_gp = gpar(col = "white"))
mat = matrix(1:9, 3, 3)
rownames(mat) = letters[1:3]
colnames(mat) = letters[1:3]
Heatmap(mat, rect_gp = gpar(col = "white"),
cell_fun = function(i, j, x, y, width, height, fill) {
grid.text(mat[i, j], x = x, y = y)
},
cluster_rows = FALSE, cluster_columns = FALSE, row_names_side = "left",
column_names_side = "top")
}
| /man/Heatmap.rd | no_license | palc/ComplexHeatmap | R | false | false | 13,360 | rd | \name{Heatmap}
\alias{Heatmap}
\title{
Constructor method for Heatmap class
}
\description{
Constructor method for Heatmap class
}
\usage{
Heatmap(matrix, col, name,
na_col = "grey",
color_space = "LAB",
rect_gp = gpar(col = NA),
cell_fun = function(j, i, x, y, width, height, fill) NULL,
row_title = character(0),
row_title_side = c("left", "right"),
row_title_gp = gpar(fontsize = 14),
row_title_rot = switch(row_title_side[1], "left" = 90, "right" = 270),
column_title = character(0),
column_title_side = c("top", "bottom"),
column_title_gp = gpar(fontsize = 14),
column_title_rot = 0,
cluster_rows = TRUE,
clustering_distance_rows = "euclidean",
clustering_method_rows = "complete",
row_dend_side = c("left", "right"),
row_dend_width = unit(10, "mm"),
show_row_dend = TRUE,
row_dend_reorder = TRUE,
row_dend_gp = gpar(),
row_hclust_side = row_dend_side,
row_hclust_width = row_dend_width,
show_row_hclust = show_row_dend,
row_hclust_reorder = row_dend_reorder,
row_hclust_gp = row_dend_gp,
cluster_columns = TRUE,
clustering_distance_columns = "euclidean",
clustering_method_columns = "complete",
column_dend_side = c("top", "bottom"),
column_dend_height = unit(10, "mm"),
show_column_dend = TRUE,
column_dend_gp = gpar(),
column_dend_reorder = TRUE,
column_hclust_side = column_dend_side,
column_hclust_height = column_dend_height,
show_column_hclust = show_column_dend,
column_hclust_gp = column_dend_gp,
column_hclust_reorder = column_dend_reorder,
row_order = NULL,
column_order = NULL,
row_names_side = c("right", "left"),
show_row_names = TRUE,
row_names_max_width = unit(4, "cm"),
row_names_gp = gpar(fontsize = 12),
column_names_side = c("bottom", "top"),
show_column_names = TRUE,
column_names_max_height = unit(4, "cm"),
column_names_gp = gpar(fontsize = 12),
top_annotation = new("HeatmapAnnotation"),
top_annotation_height = top_annotation@size,
bottom_annotation = new("HeatmapAnnotation"),
bottom_annotation_height = bottom_annotation@size,
km = 1,
split = NULL,
gap = unit(1, "mm"),
combined_name_fun = function(x) paste(x, collapse = "/"),
width = NULL,
show_heatmap_legend = TRUE,
heatmap_legend_param = list(title = name, color_bar = "discrete"))
}
\arguments{
\item{matrix}{a matrix. Either numeric or character. If it is a simple vector, it will be converted to a one-column matrix.}
\item{col}{a vector of colors if the color mapping is discrete or a color mapping function if the matrix is continuous numbers (should be generated by \code{\link[circlize]{colorRamp2}}. If the matrix is continuous, the value can also be a vector of colors so that colors will be interpolated. Pass to \code{\link{ColorMapping}}.}
\item{name}{name of the heatmap. The name is used as the title of the heatmap legend.}
\item{na_col}{color for \code{NA} values.}
\item{rect_gp}{graphic parameters for drawing rectangles (for heatmap body).}
\item{color_space}{the color space in which colors are interpolated. Only used if \code{matrix} is numeric and \code{col} is a vector of colors. Pass to \code{\link[circlize]{colorRamp2}}.}
\item{cell_fun}{self-defined function to add graphics on each cell. Seven parameters will be passed into this function: \code{i}, \code{j}, \code{x}, \code{y}, \code{width}, \code{height}, \code{fill} which are row index, column index in \code{matrix}, coordinate of the middle points in the heatmap body viewport, the width and height of the cell and the filled color. \code{x}, \code{y}, \code{width} and \code{height} are all \code{\link[grid]{unit}} objects.}
\item{row_title}{title on row.}
\item{row_title_side}{will the title be put on the left or right of the heatmap?}
\item{row_title_gp}{graphic parameters for drawing text.}
\item{row_title_rot}{rotation of row titles. Only 0, 90, 270 are allowed to set.}
\item{column_title}{title on column.}
\item{column_title_side}{will the title be put on the top or bottom of the heatmap?}
\item{column_title_gp}{graphic parameters for drawing text.}
\item{column_title_rot}{rotation of column titles. Only 0, 90, 270 are allowed to set.}
\item{cluster_rows}{If the value is a logical, it means whether make cluster on rows. The value can also be a \code{\link[stats]{hclust}} or a \code{\link[stats]{dendrogram}} that already contains clustering information. This means you can use any type of clustering methods and render the \code{\link[stats]{dendrogram}} object with self-defined graphic settings.}
\item{clustering_distance_rows}{it can be a pre-defined character which is in ("euclidean", "maximum", "manhattan", "canberra", "binary", "minkowski", "pearson", "spearman", "kendall"). It can also be a function. If the function has one argument, the input argument should be a matrix and the returned value should be a \code{\link[stats]{dist}} object. If the function has two arguments, the input arguments are two vectors and the function calculates distance between these two vectors.}
\item{clustering_method_rows}{method to make cluster, pass to \code{\link[stats]{hclust}}.}
\item{row_dend_side}{should the row cluster be put on the left or right of the heatmap?}
\item{row_dend_width}{width of the row cluster, should be a \code{\link[grid]{unit}} object.}
\item{show_row_dend}{whether show row clusters. }
\item{row_dend_gp}{graphics parameters for drawing lines. If users already provide a \code{\link[stats]{dendrogram}} object with edges rendered, this argument will be ignored.}
\item{row_dend_reorder}{apply reordering on rows. The value can be a logical value or a vector which contains weight which is used to reorder rows}
\item{row_hclust_side}{deprecated, use \code{row_dend_side} instead}
\item{row_hclust_width}{deprecated, use \code{row_dend_width} instead}
\item{show_row_hclust}{deprecated, use \code{show_row_dend} instead}
\item{row_hclust_gp}{deprecated, use \code{row_dend_gp} instead}
\item{row_hclust_reorder}{deprecated, use \code{row_dend_reorder} instead}
\item{cluster_columns}{whether make cluster on columns. Same settings as \code{cluster_rows}.}
\item{clustering_distance_columns}{same setting as \code{clustering_distance_rows}.}
\item{clustering_method_columns}{method to make cluster, pass to \code{\link[stats]{hclust}}.}
\item{column_dend_side}{should the column cluster be put on the top or bottom of the heatmap?}
\item{column_dend_height}{height of the column cluster, should be a \code{\link[grid]{unit}} object.}
\item{show_column_dend}{whether show column clusters.}
\item{column_dend_gp}{graphic parameters for drawling lines. Same settings as \code{row_dend_gp}.}
\item{column_dend_reorder}{apply reordering on columns. The value can be a logical value or a vector which contains weight which is used to reorder columns}
\item{column_hclust_side}{deprecated, use \code{column_dend_side} instead}
\item{column_hclust_height}{deprecated, use \code{column_dend_height} instead}
\item{show_column_hclust}{deprecated, use \code{show_column_dend} instead}
\item{column_hclust_gp}{deprecated, use \code{column_dend_gp} instead}
\item{column_hclust_reorder}{deprecated, use \code{column_dend_reorder} instead}
\item{row_order}{order of rows. It makes it easy to adjust row order for a list of heatmaps if this heatmap is selected as the main heatmap. Manually setting row order should turn off clustering}
\item{column_order}{order of column. It makes it easy to adjust column order for both matrix and column annotations.}
\item{row_names_side}{should the row names be put on the left or right of the heatmap?}
\item{show_row_names}{whether show row names.}
\item{row_names_max_width}{maximum width of row names viewport. Because some times row names can be very long, it is not reasonable to show them all.}
\item{row_names_gp}{graphic parameters for drawing text.}
\item{column_names_side}{should the column names be put on the top or bottom of the heatmap?}
\item{column_names_max_height}{maximum height of column names viewport.}
\item{show_column_names}{whether show column names.}
\item{column_names_gp}{graphic parameters for drawing text.}
\item{top_annotation}{a \code{\link{HeatmapAnnotation}} object which contains a list of annotations.}
\item{top_annotation_height}{total height of the column annotations on the top.}
\item{bottom_annotation}{a \code{\link{HeatmapAnnotation}} object.}
\item{bottom_annotation_height}{total height of the column annotations on the bottom.}
\item{km}{do k-means clustering on rows. If the value is larger than 1, the heatmap will be split by rows according to the k-means clustering. For each row-clusters, hierarchical clustering is still applied with parameters above.}
\item{split}{a vector or a data frame by which the rows are split. But if \code{cluster_rows} is a clustering object, \code{split} can be a single number indicating rows are to be split according to the split on the tree.}
\item{gap}{gap between row-slices if the heatmap is split by rows, should be \code{\link[grid]{unit}} object.}
\item{combined_name_fun}{if the heatmap is split by rows, how to make a combined row title for each slice? The input parameter for this function is a vector which contains level names under each column in \code{split}.}
\item{width}{the width of the single heatmap, should be a fixed \code{\link[grid]{unit}} object. It is used for the layout when the heatmap is appended to a list of heatmaps.}
\item{show_heatmap_legend}{whether show heatmap legend?}
\item{heatmap_legend_param}{a list contains parameters for the heatmap legend. See \code{\link{color_mapping_legend,ColorMapping-method}} for all available parameters.}
}
\details{
The initialization function only applies parameter checking and fill values to each slot with proper ones.
Then it will be ready for clustering and layout.
Following methods can be applied on the \code{\link{Heatmap-class}} object:
\itemize{
\item \code{\link{show,Heatmap-method}}: draw a single heatmap with default parameters
\item \code{\link{draw,Heatmap-method}}: draw a single heatmap.
\item \code{\link{add_heatmap,Heatmap-method}} append heatmaps and row annotations to a list of heatmaps.
}
The constructor function pretends to be a high-level graphic function because the \code{show} method
of the \code{\link{Heatmap-class}} object actually plots the graphics.
}
\value{
A \code{\link{Heatmap-class}} object.
}
\author{
Zuguang Gu <z.gu@dkfz.de>
}
\examples{
mat = matrix(rnorm(80, 2), 8, 10)
mat = rbind(mat, matrix(rnorm(40, -2), 4, 10))
rownames(mat) = letters[1:12]
colnames(mat) = letters[1:10]
require(circlize)
Heatmap(mat)
Heatmap(mat, col = colorRamp2(c(-3, 0, 3), c("green", "white", "red")))
Heatmap(mat, name = "test")
Heatmap(mat, column_title = "blablabla")
Heatmap(mat, row_title = "blablabla")
Heatmap(mat, column_title = "blablabla", column_title_side = "bottom")
Heatmap(mat, column_title = "blablabla", column_title_gp = gpar(fontsize = 20,
fontface = "bold"))
Heatmap(mat, cluster_rows = FALSE)
Heatmap(mat, clustering_distance_rows = "pearson")
Heatmap(mat, clustering_distance_rows = function(x) dist(x))
Heatmap(mat, clustering_distance_rows = function(x, y) 1 - cor(x, y))
Heatmap(mat, clustering_method_rows = "single")
Heatmap(mat, row_dend_side = "right")
Heatmap(mat, row_dend_width = unit(1, "cm"))
Heatmap(mat, row_names_side = "left", row_dend_side = "right",
column_names_side = "top", column_dend_side = "bottom")
Heatmap(mat, show_row_names = FALSE)
mat2 = mat
rownames(mat2) = NULL
colnames(mat2) = NULL
Heatmap(mat2)
Heatmap(mat, row_names_gp = gpar(fontsize = 20))
Heatmap(mat, km = 2)
Heatmap(mat, split = rep(c("A", "B"), 6))
Heatmap(mat, split = data.frame(rep(c("A", "B"), 6), rep(c("C", "D"), each = 6)))
Heatmap(mat, split = data.frame(rep(c("A", "B"), 6), rep(c("C", "D"), each = 6)),
combined_name_fun = function(x) paste(x, collapse = "\n"))
annotation = HeatmapAnnotation(df = data.frame(type = c(rep("A", 6), rep("B", 6))))
Heatmap(mat, top_annotation = annotation)
annotation = HeatmapAnnotation(df = data.frame(type1 = rep(c("A", "B"), 6),
type2 = rep(c("C", "D"), each = 6)))
Heatmap(mat, bottom_annotation = annotation)
annotation = data.frame(value = rnorm(10))
annotation = HeatmapAnnotation(df = annotation)
Heatmap(mat, top_annotation = annotation)
annotation = data.frame(value = rnorm(10))
value = 1:10
ha = HeatmapAnnotation(df = annotation, points = anno_points(value),
annotation_height = c(1, 2))
Heatmap(mat, top_annotation = ha, top_annotation_height = unit(2, "cm"),
bottom_annotation = ha)
# character matrix
mat3 = matrix(sample(letters[1:6], 100, replace = TRUE), 10, 10)
rownames(mat3) = {x = letters[1:10]; x[1] = "aaaaaaaaaaaaaaaaaaaaaaa";x}
Heatmap(mat3, rect_gp = gpar(col = "white"))
mat = matrix(1:9, 3, 3)
rownames(mat) = letters[1:3]
colnames(mat) = letters[1:3]
Heatmap(mat, rect_gp = gpar(col = "white"),
cell_fun = function(i, j, x, y, width, height, fill) {
grid.text(mat[i, j], x = x, y = y)
},
cluster_rows = FALSE, cluster_columns = FALSE, row_names_side = "left",
column_names_side = "top")
}
|
#get Arguments from the Shell Script (input file and output file name)
args<-commandArgs(TRUE)
input.file <- args[1]
output.file <- args[2]
#library graph used to create network graph
#load the library
#library(igraph)
#since the data is not in a proper JSon format we use read table function with comma seperator
venmo.data <- read.table(input.file, sep=",")
#assign column names
colnames(venmo.data) <- c('created_time','target','actor')
#rearrange order
venmo.data <- venmo.data[3:1]
#clean column Actor
venmo.data$actor <- as.character(venmo.data$actor)
venmo.data$actor <- sapply(strsplit(venmo.data$actor, split=': ', fixed=TRUE), function(x) (gsub('.{1}$', '',x[2])))
venmo.data[ venmo.data$actor == '',"actor" ] = NA
#clean column Target
venmo.data$target <- as.character(venmo.data$target)
venmo.data$target <- sapply(strsplit(venmo.data$target, split=': ', fixed=TRUE), function(x) (x[2]))
#clean column Create date
venmo.data$created_time <- as.character(venmo.data$created_time)
venmo.data$created_time <- sapply(strsplit(venmo.data$created_time, split=': ', fixed=TRUE),
function(x) (x[2]))
venmo.data$created_time <- strptime(venmo.data$created_time, "%Y-%m-%dT%H:%M:%S")
#save as factors
venmo.data$actor <- as.factor(venmo.data$actor)
venmo.data$target <- as.factor(venmo.data$target)
#summary of the input Data
summary(venmo.data)
#as we need to find only the relation between the actor and target we filter the records
#that has null values
#cleansed from the source --not needed now
venmo.data <- venmo.data[complete.cases(venmo.data[,c('actor','target')]),]
summary(venmo.data)
##########################################################
#Graph and Median Function
##########################################################
#buffer list to store valid transaction within window and median list to capture median values.
buffer.list <- NULL
median.list <-NULL
#loop through each and every records
for(i in 1:dim(venmo.data)[1]){
print(paste0('Transaction : ',i))
#get the maximum time frame uptill the processing records
Max.TimeFrame <- max(venmo.data$created_time[1:i])
buffer.list$timediff <- NULL
buffer.list <- rbind(buffer.list,venmo.data[i,])
buffer.list$timediff <- as.numeric(buffer.list$created_time - Max.TimeFrame,units="secs")
#filter records based on the window
buffer.list <- buffer.list[buffer.list$timediff>-60,]
#print(buffer.list)
#list names to capture all the names and the count
list.names<-NULL
#to create a Graph string with relations
#graph.string <- 'G <- make_graph(~ '
#loop through all the valid customers
for(i in 1:dim(buffer.list)[1]){
list.names <- rbind(list.names,as.character(buffer.list[i,1]))
list.names <- rbind(list.names,as.character(buffer.list[i,2]))
#if(max(dim(buffer.list)[1]) == i){
# graph.string <- paste0(graph.string,'\'',as.character(buffer.list[i,1]),'\'-\'',as.character(buffer.list[i,2]),'\')')
#}
#else{
# graph.string <- paste0(graph.string,'\'',as.character(buffer.list[i,1]),'\'-\'',as.character(buffer.list[i,2]),'\',')
#}
}
#print(graph.string)
#print(list.names)
table.names <-table(list.names)
#print(table.names)
#table.names.df <- as.data.frame(table.names)
#table.names.df$GraphName <-paste0('\'',table.names.df$list.names,'\n(',table.names.df$Freq,')\'')
#table.names.df$list.names <- paste0('\'',table.names.df$list.names,'\'')
#replace names with names plus count eg abc as abc(2)
#for(i in 1:dim(table.names.df)[1]){
# graph.string <- gsub(table.names.df$list.names[i],table.names.df$GraphName[i],graph.string)
#}
#print(graph.string)
#parse the graph string and plot the graph
#eval(parse(text=graph.string))
#plot(G)#,vertex.size = 50,edge.width=2)
#Capture the median value
median.list <- rbind(median.list,round(median(table.names),2))
print('Median Calculated.')
print('\n#################################################################\n')
}
#write the final output to the output file
write.table(median.list, file=output.file,row.names=FALSE, col.names=FALSE, sep=",")
| /src/get_median.R | no_license | PriyankDsilva/InsightDataScienceCodingChallange | R | false | false | 4,151 | r | #get Arguments from the Shell Script (input file and output file name)
args<-commandArgs(TRUE)
input.file <- args[1]
output.file <- args[2]
#library graph used to create network graph
#load the library
#library(igraph)
#since the data is not in a proper JSon format we use read table function with comma seperator
venmo.data <- read.table(input.file, sep=",")
#assign column names
colnames(venmo.data) <- c('created_time','target','actor')
#rearrange order
venmo.data <- venmo.data[3:1]
#clean column Actor
venmo.data$actor <- as.character(venmo.data$actor)
venmo.data$actor <- sapply(strsplit(venmo.data$actor, split=': ', fixed=TRUE), function(x) (gsub('.{1}$', '',x[2])))
venmo.data[ venmo.data$actor == '',"actor" ] = NA
#clean column Target
venmo.data$target <- as.character(venmo.data$target)
venmo.data$target <- sapply(strsplit(venmo.data$target, split=': ', fixed=TRUE), function(x) (x[2]))
#clean column Create date
venmo.data$created_time <- as.character(venmo.data$created_time)
venmo.data$created_time <- sapply(strsplit(venmo.data$created_time, split=': ', fixed=TRUE),
function(x) (x[2]))
venmo.data$created_time <- strptime(venmo.data$created_time, "%Y-%m-%dT%H:%M:%S")
#save as factors
venmo.data$actor <- as.factor(venmo.data$actor)
venmo.data$target <- as.factor(venmo.data$target)
#summary of the input Data
summary(venmo.data)
#as we need to find only the relation between the actor and target we filter the records
#that has null values
#cleansed from the source --not needed now
venmo.data <- venmo.data[complete.cases(venmo.data[,c('actor','target')]),]
summary(venmo.data)
##########################################################
#Graph and Median Function
##########################################################
#buffer list to store valid transaction within window and median list to capture median values.
buffer.list <- NULL
median.list <-NULL
#loop through each and every records
for(i in 1:dim(venmo.data)[1]){
print(paste0('Transaction : ',i))
#get the maximum time frame uptill the processing records
Max.TimeFrame <- max(venmo.data$created_time[1:i])
buffer.list$timediff <- NULL
buffer.list <- rbind(buffer.list,venmo.data[i,])
buffer.list$timediff <- as.numeric(buffer.list$created_time - Max.TimeFrame,units="secs")
#filter records based on the window
buffer.list <- buffer.list[buffer.list$timediff>-60,]
#print(buffer.list)
#list names to capture all the names and the count
list.names<-NULL
#to create a Graph string with relations
#graph.string <- 'G <- make_graph(~ '
#loop through all the valid customers
for(i in 1:dim(buffer.list)[1]){
list.names <- rbind(list.names,as.character(buffer.list[i,1]))
list.names <- rbind(list.names,as.character(buffer.list[i,2]))
#if(max(dim(buffer.list)[1]) == i){
# graph.string <- paste0(graph.string,'\'',as.character(buffer.list[i,1]),'\'-\'',as.character(buffer.list[i,2]),'\')')
#}
#else{
# graph.string <- paste0(graph.string,'\'',as.character(buffer.list[i,1]),'\'-\'',as.character(buffer.list[i,2]),'\',')
#}
}
#print(graph.string)
#print(list.names)
table.names <-table(list.names)
#print(table.names)
#table.names.df <- as.data.frame(table.names)
#table.names.df$GraphName <-paste0('\'',table.names.df$list.names,'\n(',table.names.df$Freq,')\'')
#table.names.df$list.names <- paste0('\'',table.names.df$list.names,'\'')
#replace names with names plus count eg abc as abc(2)
#for(i in 1:dim(table.names.df)[1]){
# graph.string <- gsub(table.names.df$list.names[i],table.names.df$GraphName[i],graph.string)
#}
#print(graph.string)
#parse the graph string and plot the graph
#eval(parse(text=graph.string))
#plot(G)#,vertex.size = 50,edge.width=2)
#Capture the median value
median.list <- rbind(median.list,round(median(table.names),2))
print('Median Calculated.')
print('\n#################################################################\n')
}
#write the final output to the output file
write.table(median.list, file=output.file,row.names=FALSE, col.names=FALSE, sep=",")
|
###############################################################################
# DESCRIPTION
# This R file, reads in output from DESeq2 run and plots UpSet diagram showing
# significant and differentially expressed genes in multiple conditions.
#
# AUTHOR
# Akshay Paropkari
#
# VERSION
# 0.0.9
###############################################################################
# library imports
library(reshape2)
library(tools)
library(UpSetR)
# note that all deseq2 tab-separated output files must end with "_deseq2_output.tsv"
# and be available inside <INPUT DIRECTORY>
input.folder <- <INPUT_DIRECTORY> # CHANGE THIS TO YOUR INPUT FOLDER LOCATION
setwd(input.folder)
deseq.res.files <- list.files(path = ".", pattern = "*_deseq2_output.tsv",
full.names = T)
# initiate list to save all significant ORFs
final.list <- list()
# iterate through all files in input folder, and calculate significantly
# upregulated ORFs
for (f in deseq.res.files) {
message(paste0("PROCESSING ", f))
if (length(unlist(strsplit(x = f, split = "/"))) == 2) {
mutant <- unlist(strsplit(x = f, split = "/"))[2]
} else {
mutant <- unlist(strsplit(x = f, split = "/"))[1]
}
mutant <- unlist(strsplit(mutant, split = "_"))[1]
message(paste0("FORMATTING ", toTitleCase(mutant),"-KO DATA SET"))
deseq.data <- read.csv(file = f, sep = "\t", header = T, row.names = 1)
# consider entries with ORF ids only
deseq.data <- deseq.data[grep(pattern = "^orf*", x = rownames(deseq.data)), ]
# remove ORFs with NA values in any column
deseq.data <- deseq.data[complete.cases(deseq.data), ]
# collect significant upregulated genes in mutant
# significant and upregulated ORFs
message(paste0("COLLECTING SIGNIFICANTLY UPREGULATED ORFs IN ",
toTitleCase(mutant),"-KO DATA SET"))
expr <- "upregulated"
orf.vec <- row.names(deseq.data)[which(deseq.data$padj < 0.05 & deseq.data$log2FoldChange > 0)]
# significant and downregulated ORFs
# message(paste0("COLLECTING SIGNIFICANTLY DOWNREGULATED ORFs IN ",
# toTitleCase(mutant),"-KO DATA SET"))
# expr <- "downregulated"
# orf.vec <- row.names(deseq.data)[which(deseq.data$padj < 0.05 & deseq.data$log2FoldChange < 0)]
# significant ORFs
# message(paste0("COLLECTING SIGNIFICANT AND DIFFERENTIALLY EXPRESSED ORFs IN ",
# toTitleCase(mutant),"-KO DATA SET"))
# expr <- "differentially_expressed"
# orf.vec <- row.names(deseq.data)[which(deseq.data$padj < 0.05)]
# save ORF list into a list
message(paste0("SAVING ORF LIST FOR ", toTitleCase(mutant), "-KO DATA SET"))
name <- paste0(toTitleCase(mutant), "-KO")
final.list[[name]] <- orf.vec
message(paste0("=========================================================="))
}
# save ORF list to file
fnh <- paste0(input.folder, "/", expr, "_orf_list.tsv")
message(paste0("SAVING ORF LIST DATA TO FILE AT ", fnh))
final.list.df <- melt(data = final.list, value.name = "orf.ids")
final.list.df <- dcast(data = final.list.df, formula = orf.ids~L1,
value.var="orf.ids")
final.list.df$orf.ids <- NULL
final.list.df$row_sum <- apply(X = final.list.df, MARGIN = 1,
FUN = function(x) length(which(!is.na(x))))
write.table(x = final.list.df, file = fnh, quote = F, sep = "\t", row.names = F)
# plot upset plot and save as JPEG file on the Desktop
# NO SPACES in file name or output folder
image.fnh <- paste0(input.folder, "/", expr, "_orf_upset_plot.jpeg")
message(paste0("SAVING UPSET PLOT TO FILE AT ", image.fnh))
jpeg(filename = image.fnh, width = 21, height = 9, units = "in", res=300)
upset(data = fromList(final.list), nsets = 100, nintersects = NA,
order.by = "freq", point.size = 4,
mainbar.y.label = "Intersection set size",
sets.x.label = "Set size",
text.scale = c(2.5, 0, 2, 1.3, 2, 1.75))
dev.off()
| /R/upsetR_venn_alternative_example.R | permissive | akshayparopkari/kadambari | R | false | false | 3,898 | r | ###############################################################################
# DESCRIPTION
# This R file, reads in output from DESeq2 run and plots UpSet diagram showing
# significant and differentially expressed genes in multiple conditions.
#
# AUTHOR
# Akshay Paropkari
#
# VERSION
# 0.0.9
###############################################################################
# library imports
library(reshape2)
library(tools)
library(UpSetR)
# note that all deseq2 tab-separated output files must end with "_deseq2_output.tsv"
# and be available inside <INPUT DIRECTORY>
input.folder <- <INPUT_DIRECTORY> # CHANGE THIS TO YOUR INPUT FOLDER LOCATION
setwd(input.folder)
deseq.res.files <- list.files(path = ".", pattern = "*_deseq2_output.tsv",
full.names = T)
# initiate list to save all significant ORFs
final.list <- list()
# iterate through all files in input folder, and calculate significantly
# upregulated ORFs
for (f in deseq.res.files) {
message(paste0("PROCESSING ", f))
if (length(unlist(strsplit(x = f, split = "/"))) == 2) {
mutant <- unlist(strsplit(x = f, split = "/"))[2]
} else {
mutant <- unlist(strsplit(x = f, split = "/"))[1]
}
mutant <- unlist(strsplit(mutant, split = "_"))[1]
message(paste0("FORMATTING ", toTitleCase(mutant),"-KO DATA SET"))
deseq.data <- read.csv(file = f, sep = "\t", header = T, row.names = 1)
# consider entries with ORF ids only
deseq.data <- deseq.data[grep(pattern = "^orf*", x = rownames(deseq.data)), ]
# remove ORFs with NA values in any column
deseq.data <- deseq.data[complete.cases(deseq.data), ]
# collect significant upregulated genes in mutant
# significant and upregulated ORFs
message(paste0("COLLECTING SIGNIFICANTLY UPREGULATED ORFs IN ",
toTitleCase(mutant),"-KO DATA SET"))
expr <- "upregulated"
orf.vec <- row.names(deseq.data)[which(deseq.data$padj < 0.05 & deseq.data$log2FoldChange > 0)]
# significant and downregulated ORFs
# message(paste0("COLLECTING SIGNIFICANTLY DOWNREGULATED ORFs IN ",
# toTitleCase(mutant),"-KO DATA SET"))
# expr <- "downregulated"
# orf.vec <- row.names(deseq.data)[which(deseq.data$padj < 0.05 & deseq.data$log2FoldChange < 0)]
# significant ORFs
# message(paste0("COLLECTING SIGNIFICANT AND DIFFERENTIALLY EXPRESSED ORFs IN ",
# toTitleCase(mutant),"-KO DATA SET"))
# expr <- "differentially_expressed"
# orf.vec <- row.names(deseq.data)[which(deseq.data$padj < 0.05)]
# save ORF list into a list
message(paste0("SAVING ORF LIST FOR ", toTitleCase(mutant), "-KO DATA SET"))
name <- paste0(toTitleCase(mutant), "-KO")
final.list[[name]] <- orf.vec
message(paste0("=========================================================="))
}
# save ORF list to file
fnh <- paste0(input.folder, "/", expr, "_orf_list.tsv")
message(paste0("SAVING ORF LIST DATA TO FILE AT ", fnh))
final.list.df <- melt(data = final.list, value.name = "orf.ids")
final.list.df <- dcast(data = final.list.df, formula = orf.ids~L1,
value.var="orf.ids")
final.list.df$orf.ids <- NULL
final.list.df$row_sum <- apply(X = final.list.df, MARGIN = 1,
FUN = function(x) length(which(!is.na(x))))
write.table(x = final.list.df, file = fnh, quote = F, sep = "\t", row.names = F)
# plot upset plot and save as JPEG file on the Desktop
# NO SPACES in file name or output folder
image.fnh <- paste0(input.folder, "/", expr, "_orf_upset_plot.jpeg")
message(paste0("SAVING UPSET PLOT TO FILE AT ", image.fnh))
jpeg(filename = image.fnh, width = 21, height = 9, units = "in", res=300)
upset(data = fromList(final.list), nsets = 100, nintersects = NA,
order.by = "freq", point.size = 4,
mainbar.y.label = "Intersection set size",
sets.x.label = "Set size",
text.scale = c(2.5, 0, 2, 1.3, 2, 1.75))
dev.off()
|
pdf(file='S1A_c0_t2.pdf',width=4.5,height=4.5);
gstable=read.table('S1A_c0_t2.gene_summary.txt',header=T)
#
#
# parameters
# Do not modify the variables beginning with "__"
# gstablename='__GENE_SUMMARY_FILE__'
startindex=3
# outputfile='__OUTPUT_FILE__'
targetgenelist=c("ROSA26","CTRL","PROM1","LRIG1","AAVS1","CCR5","mKate2","KRT20","SOX9","EGFP")
# samplelabel=sub('.\\w+.\\w+$','',colnames(gstable)[startindex]);
samplelabel='2_vs_0 neg.'
# You need to write some codes in front of this code:
# gstable=read.table(gstablename,header=T)
# pdf(file=outputfile,width=6,height=6)
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
######
# function definition
plotrankedvalues<-function(val, tglist, ...){
plot(val,log='y',ylim=c(max(val),min(val)),type='l',lwd=2, ...)
if(length(tglist)>0){
for(i in 1:length(tglist)){
targetgene=tglist[i];
tx=which(names(val)==targetgene);ty=val[targetgene];
points(tx,ty,col=colors[(i %% length(colors)) ],cex=2,pch=20)
# text(tx+50,ty,targetgene,col=colors[i])
}
legend('topright',tglist,pch=20,pt.cex = 2,cex=1,col=colors)
}
}
plotrandvalues<-function(val,targetgenelist, ...){
# choose the one with the best distance distribution
mindiffvalue=0;
randval=val;
for(i in 1:20){
randval0=sample(val)
vindex=sort(which(names(randval0) %in% targetgenelist))
if(max(vindex)>0.9*length(val)){
# print('pass...')
next;
}
mindiffind=min(diff(vindex));
if (mindiffind > mindiffvalue){
mindiffvalue=mindiffind;
randval=randval0;
# print(paste('Diff: ',mindiffvalue))
}
}
plot(randval,log='y',ylim=c(max(randval),min(randval)),pch=20,col='grey', ...)
if(length(targetgenelist)>0){
for(i in 1:length(targetgenelist)){
targetgene=targetgenelist[i];
tx=which(names(randval)==targetgene);ty=randval[targetgene];
points(tx,ty,col=colors[(i %% length(colors)) ],cex=2,pch=20)
text(tx+50,ty,targetgene,col=colors[i])
}
}
}
# set.seed(1235)
pvec=gstable[,startindex]
names(pvec)=gstable[,'id']
pvec=sort(pvec);
plotrankedvalues(pvec,targetgenelist,xlab='Genes',ylab='RRA score',main=paste('Distribution of RRA scores in \\n',samplelabel))
# plotrandvalues(pvec,targetgenelist,xlab='Genes',ylab='RRA score',main=paste('Distribution of RRA scores in \\n',samplelabel))
pvec=gstable[,startindex+1]
names(pvec)=gstable[,'id']
pvec=sort(pvec);
plotrankedvalues(pvec,targetgenelist,xlab='Genes',ylab='p value',main=paste('Distribution of p values in \\n',samplelabel))
# plotrandvalues(pvec,targetgenelist,xlab='Genes',ylab='p value',main=paste('Distribution of p values in \\n',samplelabel))
# you need to write after this code:
# dev.off()
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(11699.459164920907,6954.034192576033),c(5522.919636024938,3374.896636630584),c(4756.43239051896,2915.969751664048),c(4066.593869563581,2635.446079352064),c(3121.5403646650025,2111.3097442428298),c(5465.643666031085,3495.472601045384),c(4406.8805148211795,2749.8702088477417),c(3345.5904825821344,2315.5506635576953),c(1322.401071916906,874.7909254992144),c(3790.321543710877,2311.859562606222))
targetgene="ROSA26"
collabel=c("S01_MNSC501_F1","S03_MNSC501_F3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(3747.3645662154872,2671.126721882974),c(3412.1316830161695,2743.718373928619),c(12389.297685876287,8073.668147856322),c(2046.7736336038736,1473.9796466217424),c(4238.42177954514,2928.273421502293),c(6259.9266028576085,4338.273984965162),c(4529.855391572688,3260.472507134906),c(4283.905638069671,3020.5509452891297),c(4191.25333366785,2907.3571827772766),c(6801.521436770074,5119.557019693715))
targetgene="CTRL"
collabel=c("S01_MNSC501_F1","S03_MNSC501_F3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(3710.303644454759,2373.377911797447),c(6274.245595356072,4611.415455374199),c(3129.9633014288042,1909.5295588956128),c(5714.120300563242,3668.9543457646378),c(2649.8559058920932,1695.445703710151),c(7861.96917533274,5033.431330826))
targetgene="PROM1"
collabel=c("S01_MNSC501_F1","S03_MNSC501_F3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(4214.837556606495,2896.283879922856),c(2638.0637944227706,1378.011021883432),c(2771.9884889672217,2055.9432299707273),c(5557.453676756526,3463.4830594659475),c(7455.141329641106,4850.106650236151),c(2269.1391641682453,1274.6601952421745))
targetgene="LRIG1"
collabel=c("S01_MNSC501_F1","S03_MNSC501_F3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(3973.0992714853796,2383.2208476680426),c(4298.224630568135,2756.022043766864),c(2612.794984131365,1645.000657373347),c(1599.5156914459901,1136.8590930538314),c(3209.1389070085424,2060.8646979060254),c(1177.5265595795124,803.4296404373938),c(2504.1390998783195,1546.5712986673875),c(4423.726388348784,2929.5037884861176),c(1310.6089604475833,804.6600074212183),c(2184.0675028538453,1350.9429482392932))
targetgene="AAVS1"
collabel=c("S01_MNSC501_F1","S03_MNSC501_F3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(2539.5154342862875,1589.6341431012447),c(4626.719164356411,3243.2473693613633),c(2360.949174893686,1797.566163367584),c(1634.8920258539583,1103.63918449057),c(3341.379014200233,2255.262681350295),c(2742.5082102939145,1502.2780872497058),c(5687.166902919076,3958.0905869633934),c(3402.8664525759873,2281.1003880106095),c(2310.4115543108746,1449.3723069452526),c(3197.34679553922,2308.1684616547486))
targetgene="CCR5"
collabel=c("S01_MNSC501_F1","S03_MNSC501_F3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(2788.834362494825,2149.451120741389),c(3599.120879172573,2821.2314939095622),c(3499.73022535971,2434.896260988671),c(2635.53691339363,1884.922219219123),c(2029.0854663998896,1318.9534066598562),c(5492.597063675251,3846.1271914353647))
targetgene="mKate2"
collabel=c("S01_MNSC501_F1","S03_MNSC501_F3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(2366.0029369519675,1657.304327211592),c(3864.4433872323343,2796.624154233072),c(3144.282293927268,2094.084606469287),c(1680.3758843784888,1056.8852391052394),c(5709.0665385049615,4152.488570407663),c(2106.5764846268676,1534.2676288291425))
targetgene="KRT20"
collabel=c("S01_MNSC501_F1","S03_MNSC501_F3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(2228.709067701996,4835.342246430257),c(1591.0927546821881,4960.839678780355),c(839.7667953510544,1253.7439565171583),c(892.8312969630066,2454.582132729863),c(1384.7308039690404,5465.2901421483975),c(2616.1641588368852,3574.2160880101515))
targetgene="SOX9"
collabel=c("S01_MNSC501_F1","S03_MNSC501_F3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(810.2865166777475,12371.340022355276),c(1255.0175778064904,18750.79283348527),c(827.1323902053514,10299.40202159483),c(721.8456806578271,7404.348508655798),c(1020.8599357727962,16052.598037958158),c(2694.4974707402434,13018.51305584696))
targetgene="EGFP"
collabel=c("S01_MNSC501_F1","S03_MNSC501_F3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
#
#
# parameters
# Do not modify the variables beginning with "__"
# gstablename='__GENE_SUMMARY_FILE__'
startindex=9
# outputfile='__OUTPUT_FILE__'
targetgenelist=c("EGFP","SOX9","KRT20","AAVS1","ROSA26","mKate2","LRIG1","CCR5","CTRL","PROM1")
# samplelabel=sub('.\\w+.\\w+$','',colnames(gstable)[startindex]);
samplelabel='2_vs_0 pos.'
# You need to write some codes in front of this code:
# gstable=read.table(gstablename,header=T)
# pdf(file=outputfile,width=6,height=6)
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
######
# function definition
plotrankedvalues<-function(val, tglist, ...){
plot(val,log='y',ylim=c(max(val),min(val)),type='l',lwd=2, ...)
if(length(tglist)>0){
for(i in 1:length(tglist)){
targetgene=tglist[i];
tx=which(names(val)==targetgene);ty=val[targetgene];
points(tx,ty,col=colors[(i %% length(colors)) ],cex=2,pch=20)
# text(tx+50,ty,targetgene,col=colors[i])
}
legend('topright',tglist,pch=20,pt.cex = 2,cex=1,col=colors)
}
}
plotrandvalues<-function(val,targetgenelist, ...){
# choose the one with the best distance distribution
mindiffvalue=0;
randval=val;
for(i in 1:20){
randval0=sample(val)
vindex=sort(which(names(randval0) %in% targetgenelist))
if(max(vindex)>0.9*length(val)){
# print('pass...')
next;
}
mindiffind=min(diff(vindex));
if (mindiffind > mindiffvalue){
mindiffvalue=mindiffind;
randval=randval0;
# print(paste('Diff: ',mindiffvalue))
}
}
plot(randval,log='y',ylim=c(max(randval),min(randval)),pch=20,col='grey', ...)
if(length(targetgenelist)>0){
for(i in 1:length(targetgenelist)){
targetgene=targetgenelist[i];
tx=which(names(randval)==targetgene);ty=randval[targetgene];
points(tx,ty,col=colors[(i %% length(colors)) ],cex=2,pch=20)
text(tx+50,ty,targetgene,col=colors[i])
}
}
}
# set.seed(1235)
pvec=gstable[,startindex]
names(pvec)=gstable[,'id']
pvec=sort(pvec);
plotrankedvalues(pvec,targetgenelist,xlab='Genes',ylab='RRA score',main=paste('Distribution of RRA scores in \\n',samplelabel))
# plotrandvalues(pvec,targetgenelist,xlab='Genes',ylab='RRA score',main=paste('Distribution of RRA scores in \\n',samplelabel))
pvec=gstable[,startindex+1]
names(pvec)=gstable[,'id']
pvec=sort(pvec);
plotrankedvalues(pvec,targetgenelist,xlab='Genes',ylab='p value',main=paste('Distribution of p values in \\n',samplelabel))
# plotrandvalues(pvec,targetgenelist,xlab='Genes',ylab='p value',main=paste('Distribution of p values in \\n',samplelabel))
# you need to write after this code:
# dev.off()
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(810.2865166777475,12371.340022355276),c(1255.0175778064904,18750.79283348527),c(827.1323902053514,10299.40202159483),c(721.8456806578271,7404.348508655798),c(1020.8599357727962,16052.598037958158),c(2694.4974707402434,13018.51305584696))
targetgene="EGFP"
collabel=c("S01_MNSC501_F1","S03_MNSC501_F3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(2228.709067701996,4835.342246430257),c(1591.0927546821881,4960.839678780355),c(839.7667953510544,1253.7439565171583),c(892.8312969630066,2454.582132729863),c(1384.7308039690404,5465.2901421483975),c(2616.1641588368852,3574.2160880101515))
targetgene="SOX9"
collabel=c("S01_MNSC501_F1","S03_MNSC501_F3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(2366.0029369519675,1657.304327211592),c(3864.4433872323343,2796.624154233072),c(3144.282293927268,2094.084606469287),c(1680.3758843784888,1056.8852391052394),c(5709.0665385049615,4152.488570407663),c(2106.5764846268676,1534.2676288291425))
targetgene="KRT20"
collabel=c("S01_MNSC501_F1","S03_MNSC501_F3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(3973.0992714853796,2383.2208476680426),c(4298.224630568135,2756.022043766864),c(2612.794984131365,1645.000657373347),c(1599.5156914459901,1136.8590930538314),c(3209.1389070085424,2060.8646979060254),c(1177.5265595795124,803.4296404373938),c(2504.1390998783195,1546.5712986673875),c(4423.726388348784,2929.5037884861176),c(1310.6089604475833,804.6600074212183),c(2184.0675028538453,1350.9429482392932))
targetgene="AAVS1"
collabel=c("S01_MNSC501_F1","S03_MNSC501_F3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(11699.459164920907,6954.034192576033),c(5522.919636024938,3374.896636630584),c(4756.43239051896,2915.969751664048),c(4066.593869563581,2635.446079352064),c(3121.5403646650025,2111.3097442428298),c(5465.643666031085,3495.472601045384),c(4406.8805148211795,2749.8702088477417),c(3345.5904825821344,2315.5506635576953),c(1322.401071916906,874.7909254992144),c(3790.321543710877,2311.859562606222))
targetgene="ROSA26"
collabel=c("S01_MNSC501_F1","S03_MNSC501_F3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(2788.834362494825,2149.451120741389),c(3599.120879172573,2821.2314939095622),c(3499.73022535971,2434.896260988671),c(2635.53691339363,1884.922219219123),c(2029.0854663998896,1318.9534066598562),c(5492.597063675251,3846.1271914353647))
targetgene="mKate2"
collabel=c("S01_MNSC501_F1","S03_MNSC501_F3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(4214.837556606495,2896.283879922856),c(2638.0637944227706,1378.011021883432),c(2771.9884889672217,2055.9432299707273),c(5557.453676756526,3463.4830594659475),c(7455.141329641106,4850.106650236151),c(2269.1391641682453,1274.6601952421745))
targetgene="LRIG1"
collabel=c("S01_MNSC501_F1","S03_MNSC501_F3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(2539.5154342862875,1589.6341431012447),c(4626.719164356411,3243.2473693613633),c(2360.949174893686,1797.566163367584),c(1634.8920258539583,1103.63918449057),c(3341.379014200233,2255.262681350295),c(2742.5082102939145,1502.2780872497058),c(5687.166902919076,3958.0905869633934),c(3402.8664525759873,2281.1003880106095),c(2310.4115543108746,1449.3723069452526),c(3197.34679553922,2308.1684616547486))
targetgene="CCR5"
collabel=c("S01_MNSC501_F1","S03_MNSC501_F3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(3747.3645662154872,2671.126721882974),c(3412.1316830161695,2743.718373928619),c(12389.297685876287,8073.668147856322),c(2046.7736336038736,1473.9796466217424),c(4238.42177954514,2928.273421502293),c(6259.9266028576085,4338.273984965162),c(4529.855391572688,3260.472507134906),c(4283.905638069671,3020.5509452891297),c(4191.25333366785,2907.3571827772766),c(6801.521436770074,5119.557019693715))
targetgene="CTRL"
collabel=c("S01_MNSC501_F1","S03_MNSC501_F3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(3710.303644454759,2373.377911797447),c(6274.245595356072,4611.415455374199),c(3129.9633014288042,1909.5295588956128),c(5714.120300563242,3668.9543457646378),c(2649.8559058920932,1695.445703710151),c(7861.96917533274,5033.431330826))
targetgene="PROM1"
collabel=c("S01_MNSC501_F1","S03_MNSC501_F3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
dev.off()
Sweave("S1A_c0_t2_summary.Rnw");
library(tools);
texi2dvi("S1A_c0_t2_summary.tex",pdf=TRUE);
| /Miniscreen05/MaGeCK_stats/S1A_c0_t2.R | no_license | davidchen0420/Miniscreen | R | false | false | 36,743 | r | pdf(file='S1A_c0_t2.pdf',width=4.5,height=4.5);
gstable=read.table('S1A_c0_t2.gene_summary.txt',header=T)
#
#
# parameters
# Do not modify the variables beginning with "__"
# gstablename='__GENE_SUMMARY_FILE__'
startindex=3
# outputfile='__OUTPUT_FILE__'
targetgenelist=c("ROSA26","CTRL","PROM1","LRIG1","AAVS1","CCR5","mKate2","KRT20","SOX9","EGFP")
# samplelabel=sub('.\\w+.\\w+$','',colnames(gstable)[startindex]);
samplelabel='2_vs_0 neg.'
# You need to write some codes in front of this code:
# gstable=read.table(gstablename,header=T)
# pdf(file=outputfile,width=6,height=6)
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
######
# function definition
plotrankedvalues<-function(val, tglist, ...){
plot(val,log='y',ylim=c(max(val),min(val)),type='l',lwd=2, ...)
if(length(tglist)>0){
for(i in 1:length(tglist)){
targetgene=tglist[i];
tx=which(names(val)==targetgene);ty=val[targetgene];
points(tx,ty,col=colors[(i %% length(colors)) ],cex=2,pch=20)
# text(tx+50,ty,targetgene,col=colors[i])
}
legend('topright',tglist,pch=20,pt.cex = 2,cex=1,col=colors)
}
}
plotrandvalues<-function(val,targetgenelist, ...){
# choose the one with the best distance distribution
mindiffvalue=0;
randval=val;
for(i in 1:20){
randval0=sample(val)
vindex=sort(which(names(randval0) %in% targetgenelist))
if(max(vindex)>0.9*length(val)){
# print('pass...')
next;
}
mindiffind=min(diff(vindex));
if (mindiffind > mindiffvalue){
mindiffvalue=mindiffind;
randval=randval0;
# print(paste('Diff: ',mindiffvalue))
}
}
plot(randval,log='y',ylim=c(max(randval),min(randval)),pch=20,col='grey', ...)
if(length(targetgenelist)>0){
for(i in 1:length(targetgenelist)){
targetgene=targetgenelist[i];
tx=which(names(randval)==targetgene);ty=randval[targetgene];
points(tx,ty,col=colors[(i %% length(colors)) ],cex=2,pch=20)
text(tx+50,ty,targetgene,col=colors[i])
}
}
}
# set.seed(1235)
pvec=gstable[,startindex]
names(pvec)=gstable[,'id']
pvec=sort(pvec);
plotrankedvalues(pvec,targetgenelist,xlab='Genes',ylab='RRA score',main=paste('Distribution of RRA scores in \\n',samplelabel))
# plotrandvalues(pvec,targetgenelist,xlab='Genes',ylab='RRA score',main=paste('Distribution of RRA scores in \\n',samplelabel))
pvec=gstable[,startindex+1]
names(pvec)=gstable[,'id']
pvec=sort(pvec);
plotrankedvalues(pvec,targetgenelist,xlab='Genes',ylab='p value',main=paste('Distribution of p values in \\n',samplelabel))
# plotrandvalues(pvec,targetgenelist,xlab='Genes',ylab='p value',main=paste('Distribution of p values in \\n',samplelabel))
# you need to write after this code:
# dev.off()
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(11699.459164920907,6954.034192576033),c(5522.919636024938,3374.896636630584),c(4756.43239051896,2915.969751664048),c(4066.593869563581,2635.446079352064),c(3121.5403646650025,2111.3097442428298),c(5465.643666031085,3495.472601045384),c(4406.8805148211795,2749.8702088477417),c(3345.5904825821344,2315.5506635576953),c(1322.401071916906,874.7909254992144),c(3790.321543710877,2311.859562606222))
targetgene="ROSA26"
collabel=c("S01_MNSC501_F1","S03_MNSC501_F3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(3747.3645662154872,2671.126721882974),c(3412.1316830161695,2743.718373928619),c(12389.297685876287,8073.668147856322),c(2046.7736336038736,1473.9796466217424),c(4238.42177954514,2928.273421502293),c(6259.9266028576085,4338.273984965162),c(4529.855391572688,3260.472507134906),c(4283.905638069671,3020.5509452891297),c(4191.25333366785,2907.3571827772766),c(6801.521436770074,5119.557019693715))
targetgene="CTRL"
collabel=c("S01_MNSC501_F1","S03_MNSC501_F3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(3710.303644454759,2373.377911797447),c(6274.245595356072,4611.415455374199),c(3129.9633014288042,1909.5295588956128),c(5714.120300563242,3668.9543457646378),c(2649.8559058920932,1695.445703710151),c(7861.96917533274,5033.431330826))
targetgene="PROM1"
collabel=c("S01_MNSC501_F1","S03_MNSC501_F3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(4214.837556606495,2896.283879922856),c(2638.0637944227706,1378.011021883432),c(2771.9884889672217,2055.9432299707273),c(5557.453676756526,3463.4830594659475),c(7455.141329641106,4850.106650236151),c(2269.1391641682453,1274.6601952421745))
targetgene="LRIG1"
collabel=c("S01_MNSC501_F1","S03_MNSC501_F3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(3973.0992714853796,2383.2208476680426),c(4298.224630568135,2756.022043766864),c(2612.794984131365,1645.000657373347),c(1599.5156914459901,1136.8590930538314),c(3209.1389070085424,2060.8646979060254),c(1177.5265595795124,803.4296404373938),c(2504.1390998783195,1546.5712986673875),c(4423.726388348784,2929.5037884861176),c(1310.6089604475833,804.6600074212183),c(2184.0675028538453,1350.9429482392932))
targetgene="AAVS1"
collabel=c("S01_MNSC501_F1","S03_MNSC501_F3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(2539.5154342862875,1589.6341431012447),c(4626.719164356411,3243.2473693613633),c(2360.949174893686,1797.566163367584),c(1634.8920258539583,1103.63918449057),c(3341.379014200233,2255.262681350295),c(2742.5082102939145,1502.2780872497058),c(5687.166902919076,3958.0905869633934),c(3402.8664525759873,2281.1003880106095),c(2310.4115543108746,1449.3723069452526),c(3197.34679553922,2308.1684616547486))
targetgene="CCR5"
collabel=c("S01_MNSC501_F1","S03_MNSC501_F3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(2788.834362494825,2149.451120741389),c(3599.120879172573,2821.2314939095622),c(3499.73022535971,2434.896260988671),c(2635.53691339363,1884.922219219123),c(2029.0854663998896,1318.9534066598562),c(5492.597063675251,3846.1271914353647))
targetgene="mKate2"
collabel=c("S01_MNSC501_F1","S03_MNSC501_F3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(2366.0029369519675,1657.304327211592),c(3864.4433872323343,2796.624154233072),c(3144.282293927268,2094.084606469287),c(1680.3758843784888,1056.8852391052394),c(5709.0665385049615,4152.488570407663),c(2106.5764846268676,1534.2676288291425))
targetgene="KRT20"
collabel=c("S01_MNSC501_F1","S03_MNSC501_F3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(2228.709067701996,4835.342246430257),c(1591.0927546821881,4960.839678780355),c(839.7667953510544,1253.7439565171583),c(892.8312969630066,2454.582132729863),c(1384.7308039690404,5465.2901421483975),c(2616.1641588368852,3574.2160880101515))
targetgene="SOX9"
collabel=c("S01_MNSC501_F1","S03_MNSC501_F3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(810.2865166777475,12371.340022355276),c(1255.0175778064904,18750.79283348527),c(827.1323902053514,10299.40202159483),c(721.8456806578271,7404.348508655798),c(1020.8599357727962,16052.598037958158),c(2694.4974707402434,13018.51305584696))
targetgene="EGFP"
collabel=c("S01_MNSC501_F1","S03_MNSC501_F3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
#
#
# parameters
# Do not modify the variables beginning with "__"
# gstablename='__GENE_SUMMARY_FILE__'
startindex=9
# outputfile='__OUTPUT_FILE__'
targetgenelist=c("EGFP","SOX9","KRT20","AAVS1","ROSA26","mKate2","LRIG1","CCR5","CTRL","PROM1")
# samplelabel=sub('.\\w+.\\w+$','',colnames(gstable)[startindex]);
samplelabel='2_vs_0 pos.'
# You need to write some codes in front of this code:
# gstable=read.table(gstablename,header=T)
# pdf(file=outputfile,width=6,height=6)
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
######
# function definition
plotrankedvalues<-function(val, tglist, ...){
plot(val,log='y',ylim=c(max(val),min(val)),type='l',lwd=2, ...)
if(length(tglist)>0){
for(i in 1:length(tglist)){
targetgene=tglist[i];
tx=which(names(val)==targetgene);ty=val[targetgene];
points(tx,ty,col=colors[(i %% length(colors)) ],cex=2,pch=20)
# text(tx+50,ty,targetgene,col=colors[i])
}
legend('topright',tglist,pch=20,pt.cex = 2,cex=1,col=colors)
}
}
plotrandvalues<-function(val,targetgenelist, ...){
# choose the one with the best distance distribution
mindiffvalue=0;
randval=val;
for(i in 1:20){
randval0=sample(val)
vindex=sort(which(names(randval0) %in% targetgenelist))
if(max(vindex)>0.9*length(val)){
# print('pass...')
next;
}
mindiffind=min(diff(vindex));
if (mindiffind > mindiffvalue){
mindiffvalue=mindiffind;
randval=randval0;
# print(paste('Diff: ',mindiffvalue))
}
}
plot(randval,log='y',ylim=c(max(randval),min(randval)),pch=20,col='grey', ...)
if(length(targetgenelist)>0){
for(i in 1:length(targetgenelist)){
targetgene=targetgenelist[i];
tx=which(names(randval)==targetgene);ty=randval[targetgene];
points(tx,ty,col=colors[(i %% length(colors)) ],cex=2,pch=20)
text(tx+50,ty,targetgene,col=colors[i])
}
}
}
# set.seed(1235)
pvec=gstable[,startindex]
names(pvec)=gstable[,'id']
pvec=sort(pvec);
plotrankedvalues(pvec,targetgenelist,xlab='Genes',ylab='RRA score',main=paste('Distribution of RRA scores in \\n',samplelabel))
# plotrandvalues(pvec,targetgenelist,xlab='Genes',ylab='RRA score',main=paste('Distribution of RRA scores in \\n',samplelabel))
pvec=gstable[,startindex+1]
names(pvec)=gstable[,'id']
pvec=sort(pvec);
plotrankedvalues(pvec,targetgenelist,xlab='Genes',ylab='p value',main=paste('Distribution of p values in \\n',samplelabel))
# plotrandvalues(pvec,targetgenelist,xlab='Genes',ylab='p value',main=paste('Distribution of p values in \\n',samplelabel))
# you need to write after this code:
# dev.off()
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(810.2865166777475,12371.340022355276),c(1255.0175778064904,18750.79283348527),c(827.1323902053514,10299.40202159483),c(721.8456806578271,7404.348508655798),c(1020.8599357727962,16052.598037958158),c(2694.4974707402434,13018.51305584696))
targetgene="EGFP"
collabel=c("S01_MNSC501_F1","S03_MNSC501_F3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(2228.709067701996,4835.342246430257),c(1591.0927546821881,4960.839678780355),c(839.7667953510544,1253.7439565171583),c(892.8312969630066,2454.582132729863),c(1384.7308039690404,5465.2901421483975),c(2616.1641588368852,3574.2160880101515))
targetgene="SOX9"
collabel=c("S01_MNSC501_F1","S03_MNSC501_F3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(2366.0029369519675,1657.304327211592),c(3864.4433872323343,2796.624154233072),c(3144.282293927268,2094.084606469287),c(1680.3758843784888,1056.8852391052394),c(5709.0665385049615,4152.488570407663),c(2106.5764846268676,1534.2676288291425))
targetgene="KRT20"
collabel=c("S01_MNSC501_F1","S03_MNSC501_F3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(3973.0992714853796,2383.2208476680426),c(4298.224630568135,2756.022043766864),c(2612.794984131365,1645.000657373347),c(1599.5156914459901,1136.8590930538314),c(3209.1389070085424,2060.8646979060254),c(1177.5265595795124,803.4296404373938),c(2504.1390998783195,1546.5712986673875),c(4423.726388348784,2929.5037884861176),c(1310.6089604475833,804.6600074212183),c(2184.0675028538453,1350.9429482392932))
targetgene="AAVS1"
collabel=c("S01_MNSC501_F1","S03_MNSC501_F3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(11699.459164920907,6954.034192576033),c(5522.919636024938,3374.896636630584),c(4756.43239051896,2915.969751664048),c(4066.593869563581,2635.446079352064),c(3121.5403646650025,2111.3097442428298),c(5465.643666031085,3495.472601045384),c(4406.8805148211795,2749.8702088477417),c(3345.5904825821344,2315.5506635576953),c(1322.401071916906,874.7909254992144),c(3790.321543710877,2311.859562606222))
targetgene="ROSA26"
collabel=c("S01_MNSC501_F1","S03_MNSC501_F3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(2788.834362494825,2149.451120741389),c(3599.120879172573,2821.2314939095622),c(3499.73022535971,2434.896260988671),c(2635.53691339363,1884.922219219123),c(2029.0854663998896,1318.9534066598562),c(5492.597063675251,3846.1271914353647))
targetgene="mKate2"
collabel=c("S01_MNSC501_F1","S03_MNSC501_F3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(4214.837556606495,2896.283879922856),c(2638.0637944227706,1378.011021883432),c(2771.9884889672217,2055.9432299707273),c(5557.453676756526,3463.4830594659475),c(7455.141329641106,4850.106650236151),c(2269.1391641682453,1274.6601952421745))
targetgene="LRIG1"
collabel=c("S01_MNSC501_F1","S03_MNSC501_F3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(2539.5154342862875,1589.6341431012447),c(4626.719164356411,3243.2473693613633),c(2360.949174893686,1797.566163367584),c(1634.8920258539583,1103.63918449057),c(3341.379014200233,2255.262681350295),c(2742.5082102939145,1502.2780872497058),c(5687.166902919076,3958.0905869633934),c(3402.8664525759873,2281.1003880106095),c(2310.4115543108746,1449.3723069452526),c(3197.34679553922,2308.1684616547486))
targetgene="CCR5"
collabel=c("S01_MNSC501_F1","S03_MNSC501_F3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(3747.3645662154872,2671.126721882974),c(3412.1316830161695,2743.718373928619),c(12389.297685876287,8073.668147856322),c(2046.7736336038736,1473.9796466217424),c(4238.42177954514,2928.273421502293),c(6259.9266028576085,4338.273984965162),c(4529.855391572688,3260.472507134906),c(4283.905638069671,3020.5509452891297),c(4191.25333366785,2907.3571827772766),c(6801.521436770074,5119.557019693715))
targetgene="CTRL"
collabel=c("S01_MNSC501_F1","S03_MNSC501_F3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(3710.303644454759,2373.377911797447),c(6274.245595356072,4611.415455374199),c(3129.9633014288042,1909.5295588956128),c(5714.120300563242,3668.9543457646378),c(2649.8559058920932,1695.445703710151),c(7861.96917533274,5033.431330826))
targetgene="PROM1"
collabel=c("S01_MNSC501_F1","S03_MNSC501_F3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
dev.off()
Sweave("S1A_c0_t2_summary.Rnw");
library(tools);
texi2dvi("S1A_c0_t2_summary.tex",pdf=TRUE);
|
\name{SSplotYield}
\alias{SSplotYield}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Plot yield and surplus production.
}
\description{
Plot yield and surplus production from Stock Synthesis output. Surplus
production is based on Walters et al. (2008).
}
\usage{
SSplotYield(replist, subplots = 1:2, add = FALSE, plot = TRUE, print =
FALSE, labels = c("Relative depletion", "Equilibrium yield (mt)",
"Total biomass (mt)", "Surplus production (mt)"),
col = "blue", lty = 1, lwd = 2, cex.main = 1,
pwidth = 7, pheight = 7, punits = "in", res = 300, ptsize = 12,
plotdir = "default", verbose = TRUE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{replist}{
list created by \code{SS_output}
}
\item{subplots}{
vector controlling which subplots to create
}
\item{add}{
add to existing plot? (not yet implemented)
}
\item{plot}{
plot to active plot device?
}
\item{print}{
print to PNG files?
}
\item{labels}{
vector of labels for plots (titles and axis labels)
}
\item{col}{
line color (only applied to equilbrium yield plot at this time)
}
\item{lty}{
line type (only applied to equilbrium yield plot at this time)
}
\item{lwd}{
line width (only applied to equilbrium yield plot at this time)
}
\item{cex.main}{
character expansion for plot titles
}
\item{pwidth}{
width of plot written to PNG file
}
\item{pheight}{
height of plot written to PNG file
}
\item{punits}{
units for PNG file
}
\item{res}{
resolution for PNG file
}
\item{ptsize}{
ptsize for PNG file
}
\item{plotdir}{
directory where PNG files will be written. by default it will
be the directory where the model was run.
}
\item{verbose}{
report progress to R GUI?
}
}
% \details{
% %% ~~ If necessary, more details than the description above ~~
% }
% \value{
% %% ~Describe the value returned
% %% If it is a LIST, use
% %% \item{comp1 }{Description of 'comp1'}
% %% \item{comp2 }{Description of 'comp2'}
% %% ...
% }
\references{ Walters, Hilborn, and Christensen, 2008, Surplus
production dynamics in declining and recovering fish populations.
Can. J. Fish. Aquat. Sci. 65: 2536-2551
}
\author{
Ian Stewart, Ian Taylor
}
% \note{
% %% ~~further notes~~
% }
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{\link{SS_plots}}, \code{\link{SS_output}}
}
% \examples{
% }
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ hplot }
| /man/SSplotYield.Rd | no_license | tectronics/r4ss | R | false | false | 2,713 | rd | \name{SSplotYield}
\alias{SSplotYield}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Plot yield and surplus production.
}
\description{
Plot yield and surplus production from Stock Synthesis output. Surplus
production is based on Walters et al. (2008).
}
\usage{
SSplotYield(replist, subplots = 1:2, add = FALSE, plot = TRUE, print =
FALSE, labels = c("Relative depletion", "Equilibrium yield (mt)",
"Total biomass (mt)", "Surplus production (mt)"),
col = "blue", lty = 1, lwd = 2, cex.main = 1,
pwidth = 7, pheight = 7, punits = "in", res = 300, ptsize = 12,
plotdir = "default", verbose = TRUE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{replist}{
list created by \code{SS_output}
}
\item{subplots}{
vector controlling which subplots to create
}
\item{add}{
add to existing plot? (not yet implemented)
}
\item{plot}{
plot to active plot device?
}
\item{print}{
print to PNG files?
}
\item{labels}{
vector of labels for plots (titles and axis labels)
}
\item{col}{
line color (only applied to equilbrium yield plot at this time)
}
\item{lty}{
line type (only applied to equilbrium yield plot at this time)
}
\item{lwd}{
line width (only applied to equilbrium yield plot at this time)
}
\item{cex.main}{
character expansion for plot titles
}
\item{pwidth}{
width of plot written to PNG file
}
\item{pheight}{
height of plot written to PNG file
}
\item{punits}{
units for PNG file
}
\item{res}{
resolution for PNG file
}
\item{ptsize}{
ptsize for PNG file
}
\item{plotdir}{
directory where PNG files will be written. by default it will
be the directory where the model was run.
}
\item{verbose}{
report progress to R GUI?
}
}
% \details{
% %% ~~ If necessary, more details than the description above ~~
% }
% \value{
% %% ~Describe the value returned
% %% If it is a LIST, use
% %% \item{comp1 }{Description of 'comp1'}
% %% \item{comp2 }{Description of 'comp2'}
% %% ...
% }
\references{ Walters, Hilborn, and Christensen, 2008, Surplus
production dynamics in declining and recovering fish populations.
Can. J. Fish. Aquat. Sci. 65: 2536-2551
}
\author{
Ian Stewart, Ian Taylor
}
% \note{
% %% ~~further notes~~
% }
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{\link{SS_plots}}, \code{\link{SS_output}}
}
% \examples{
% }
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ hplot }
|
source("function2.txt")
par(mfrow=c(1,1))
maximaOf5Dice = maxdice(1000000,5)
probability = c()
randomVar = c()
uniques = unique(maximaOf5Dice,incomparables = FALSE)
for(i in sort(uniques)){
probability = c(probability,length(which(maximaOf5Dice == i))/length(maximaOf5Dice))
randomVar = c(randomVar,i)
}
expectation = c(randomVar*probability)
cbind(randomVar,probability,expectation)
sum(expectation)
#second part of B
maximaOf2Dice = maxdice(1000000,2)
probability = c()
randomVar = c()
uniques = unique(maximaOf2Dice,incomparables = FALSE)
for(i in sort(uniques)){
probability = c(probability,length(which(maximaOf2Dice == i))/length(maximaOf2Dice))
randomVar = c(randomVar,i)
}
expectation = c(randomVar*probability)
cbind(randomVar,probability)
sum(probability)
probability[[3]]
| /Assignment 2/Excersises/24b.R | no_license | rhmvu/VU-Statistics | R | false | false | 798 | r | source("function2.txt")
par(mfrow=c(1,1))
maximaOf5Dice = maxdice(1000000,5)
probability = c()
randomVar = c()
uniques = unique(maximaOf5Dice,incomparables = FALSE)
for(i in sort(uniques)){
probability = c(probability,length(which(maximaOf5Dice == i))/length(maximaOf5Dice))
randomVar = c(randomVar,i)
}
expectation = c(randomVar*probability)
cbind(randomVar,probability,expectation)
sum(expectation)
#second part of B
maximaOf2Dice = maxdice(1000000,2)
probability = c()
randomVar = c()
uniques = unique(maximaOf2Dice,incomparables = FALSE)
for(i in sort(uniques)){
probability = c(probability,length(which(maximaOf2Dice == i))/length(maximaOf2Dice))
randomVar = c(randomVar,i)
}
expectation = c(randomVar*probability)
cbind(randomVar,probability)
sum(probability)
probability[[3]]
|
utils::globalVariables('biocLite')
#' Install package if necessary
#' @param pkg package
#' @export
install_package_if_necessary <- function(pkg){
if(!requireNamespace(pkg, quietly = TRUE)){
message("The package ", pkg, " is not available; trying to install it.")
oldOps <- options(warn = 2)
on.exit(options(oldOps))
source("https://bioconductor.org/biocLite.R")
biocLite(pkg)
}
}
| /autonomics.annotate/R/install_package_if_necessary.R | no_license | bhagwataditya/autonomics0 | R | false | false | 420 | r | utils::globalVariables('biocLite')
#' Install package if necessary
#' @param pkg package
#' @export
install_package_if_necessary <- function(pkg){
if(!requireNamespace(pkg, quietly = TRUE)){
message("The package ", pkg, " is not available; trying to install it.")
oldOps <- options(warn = 2)
on.exit(options(oldOps))
source("https://bioconductor.org/biocLite.R")
biocLite(pkg)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_tlevel_out.R
\name{read.tlevel.out}
\alias{read.tlevel.out}
\title{READ outputs of TLEVEL.OUT}
\usage{
read.tlevel.out(
project.path,
out.file = "T_Level.out",
output = NULL,
warn = FALSE,
...
)
}
\arguments{
\item{...}{}
}
\value{
}
\description{
READ outputs of TLEVEL.OUT
}
| /man/read.tlevel.out.Rd | no_license | ovevans/hydrusR | R | false | true | 371 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_tlevel_out.R
\name{read.tlevel.out}
\alias{read.tlevel.out}
\title{READ outputs of TLEVEL.OUT}
\usage{
read.tlevel.out(
project.path,
out.file = "T_Level.out",
output = NULL,
warn = FALSE,
...
)
}
\arguments{
\item{...}{}
}
\value{
}
\description{
READ outputs of TLEVEL.OUT
}
|
Rprof("out_ad.out")
parms = list(
max_i = 20,
lambda = 0.001,
lambda_ex = 0.1,
alpha = 0.01,
mu = 0.01,
r = 0.5,
d = 0.1,
K = 100,
init_pop = 100,
t_max = 300
)
t = 0
pop = rep(0, parms$max_i + 1)
is = 0:(parms$max_i)
pop[1] = parms$init_pop
fname = "out_ad.txt"
if(file.exists(fname)) file.remove(fname)
outfile = file(fname, "w")
writeChar(paste(c(t, pop, "\n"), collapse=" "), con=outfile, eos=NULL)
max_rates_per_capita = c(
P_inf_max = parms$lambda_ex + parms$lambda * 200, #parms$max_i * parms$K + ,
P_rec_max = parms$mu * parms$max_i,
P_die_max = parms$alpha * parms$max_i + parms$d,
P_birth_max = parms$r)
sum_max_rates_per_capita = sum(max_rates_per_capita)
infections = sum(is*pop)
N = sum(pop)
t_next = t
while (t < parms$t_max) {
if (N == 0) break()
t_next = t_next + rexp(1, sum_max_rates_per_capita*N)
i = sample(is, 1, prob=pop)
event = sample(1:4, 1, prob=max_rates_per_capita)
occurs = runif(1) < switch(event,
(parms$lambda * infections + parms$lambda_ex)/max_rates_per_capita[1],
(parms$mu * (i - 1))/max_rates_per_capita[2],
(parms$alpha * (i - 1) + parms$d)/max_rates_per_capita[3],
(parms$r * (1 - N/parms$K))/max_rates_per_capita[4])
if(occurs) {
switch(event, {
if (i < length(pop) - 1) {
pop[i + 1] = pop[i + 1] - 1
pop[i + 2] = pop[i + 2] + 1
}
}, {
pop[i+1] = pop[i+1] - 1
pop[i] = pop[i] + 1
}, {
pop[i+1] = pop[i+1] - 1
}, {
pop[1] = pop[1] + 1
})
t = t_next
writeChar(paste(c(t, pop, "\n"), collapse=" "), con=outfile, eos=NULL)
infections = sum(is*pop)
N = sum(pop)
}
#browser()
}
close(outfile)
Rprof(NULL)
| /inst/run_prototype_ad_algo.R | no_license | noamross/spore | R | false | false | 1,612 | r | Rprof("out_ad.out")
parms = list(
max_i = 20,
lambda = 0.001,
lambda_ex = 0.1,
alpha = 0.01,
mu = 0.01,
r = 0.5,
d = 0.1,
K = 100,
init_pop = 100,
t_max = 300
)
t = 0
pop = rep(0, parms$max_i + 1)
is = 0:(parms$max_i)
pop[1] = parms$init_pop
fname = "out_ad.txt"
if(file.exists(fname)) file.remove(fname)
outfile = file(fname, "w")
writeChar(paste(c(t, pop, "\n"), collapse=" "), con=outfile, eos=NULL)
max_rates_per_capita = c(
P_inf_max = parms$lambda_ex + parms$lambda * 200, #parms$max_i * parms$K + ,
P_rec_max = parms$mu * parms$max_i,
P_die_max = parms$alpha * parms$max_i + parms$d,
P_birth_max = parms$r)
sum_max_rates_per_capita = sum(max_rates_per_capita)
infections = sum(is*pop)
N = sum(pop)
t_next = t
while (t < parms$t_max) {
if (N == 0) break()
t_next = t_next + rexp(1, sum_max_rates_per_capita*N)
i = sample(is, 1, prob=pop)
event = sample(1:4, 1, prob=max_rates_per_capita)
occurs = runif(1) < switch(event,
(parms$lambda * infections + parms$lambda_ex)/max_rates_per_capita[1],
(parms$mu * (i - 1))/max_rates_per_capita[2],
(parms$alpha * (i - 1) + parms$d)/max_rates_per_capita[3],
(parms$r * (1 - N/parms$K))/max_rates_per_capita[4])
if(occurs) {
switch(event, {
if (i < length(pop) - 1) {
pop[i + 1] = pop[i + 1] - 1
pop[i + 2] = pop[i + 2] + 1
}
}, {
pop[i+1] = pop[i+1] - 1
pop[i] = pop[i] + 1
}, {
pop[i+1] = pop[i+1] - 1
}, {
pop[1] = pop[1] + 1
})
t = t_next
writeChar(paste(c(t, pop, "\n"), collapse=" "), con=outfile, eos=NULL)
infections = sum(is*pop)
N = sum(pop)
}
#browser()
}
close(outfile)
Rprof(NULL)
|
#' gym
#'@description simply typing the city name you want to check and your yelp api key, the function will give you all information (address,rate, etc) of 50 gym in that city.
#' @author Jiatong Li
#'
#' @title Get information of gyms from yelp using your own yelp api key
#' @return
#' @export
#' @usage gym(api1="your client id ",api_key1= "your client_secret",i="City,State")
#'
#' @examples gym(api1="q-f2lLVGZkys5AVkVPWsuQ",api_key1 = "tLcN4rX8BzXZTSE5fYKHntHgSbjHilyGQV8VoXq0zGgiwP7RM8rS0PCOuRyRpn6UkMsEE9gnXWqEs2V32IreXfwVjao7iiAgKeJQ85u2n_mbuZn3-Tuq3R7qs4CPX3Yx",i="San Jose,CA")
gym=function(api1,api_key1,i){
library(httr)
client_id <- api1
client_secret <- api_key1
res <- POST("https://api.yelp.com/oauth2/token",
body = list(grant_type = "client_credentials",
client_id = client_id,
client_secret = client_secret))
token <- content(res)$access_token
yelp <- "https://api.yelp.com"
term <- "gym"
location <- i
categories <- NULL
limit <- 50
radius <- 8000
url <- modify_url(yelp, path = c("v3", "businesses", "search"),
query = list(term = term, location = location,
limit = limit,
radius = radius))
res <- GET(url, add_headers('Authorization' = paste("bearer", client_secret)))
results <- content(res)
yelp_httr_parse <- function(x) {
parse_list <- list(
name = x$name,
rating = x$rating,
address1 = x$location$address1
)
parse_list <- lapply(parse_list, FUN = function(x) ifelse(is.null(x), "", x))
df <- data.frame(
name=parse_list$name,
rating = parse_list$rating,
address1 = parse_list$address1
)
df
}
results_list <- lapply(results$businesses, FUN = yelp_httr_parse)
GYM <- do.call("rbind", results_list)
GYM
}
| /R/gym.R | permissive | JT944/Uclapack3 | R | false | false | 1,892 | r | #' gym
#'@description simply typing the city name you want to check and your yelp api key, the function will give you all information (address,rate, etc) of 50 gym in that city.
#' @author Jiatong Li
#'
#' @title Get information of gyms from yelp using your own yelp api key
#' @return
#' @export
#' @usage gym(api1="your client id ",api_key1= "your client_secret",i="City,State")
#'
#' @examples gym(api1="q-f2lLVGZkys5AVkVPWsuQ",api_key1 = "tLcN4rX8BzXZTSE5fYKHntHgSbjHilyGQV8VoXq0zGgiwP7RM8rS0PCOuRyRpn6UkMsEE9gnXWqEs2V32IreXfwVjao7iiAgKeJQ85u2n_mbuZn3-Tuq3R7qs4CPX3Yx",i="San Jose,CA")
gym=function(api1,api_key1,i){
library(httr)
client_id <- api1
client_secret <- api_key1
res <- POST("https://api.yelp.com/oauth2/token",
body = list(grant_type = "client_credentials",
client_id = client_id,
client_secret = client_secret))
token <- content(res)$access_token
yelp <- "https://api.yelp.com"
term <- "gym"
location <- i
categories <- NULL
limit <- 50
radius <- 8000
url <- modify_url(yelp, path = c("v3", "businesses", "search"),
query = list(term = term, location = location,
limit = limit,
radius = radius))
res <- GET(url, add_headers('Authorization' = paste("bearer", client_secret)))
results <- content(res)
yelp_httr_parse <- function(x) {
parse_list <- list(
name = x$name,
rating = x$rating,
address1 = x$location$address1
)
parse_list <- lapply(parse_list, FUN = function(x) ifelse(is.null(x), "", x))
df <- data.frame(
name=parse_list$name,
rating = parse_list$rating,
address1 = parse_list$address1
)
df
}
results_list <- lapply(results$businesses, FUN = yelp_httr_parse)
GYM <- do.call("rbind", results_list)
GYM
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/biocro-package.R
\docType{data}
\name{doy124}
\alias{doy124}
\title{Weather data}
\format{data frame of dimensions 24 by 8.}
\source{
simulated
}
\description{
Example for a given day of the year to illustrate the \code{\link{CanA}}
function.
}
\details{
LAI: leaf area index.
year: year.
doy: 124 in this case.
hour: hour of the day, (0--23).
solarR: direct solar radiation.
DailyTemp.C: hourly air temperature (Celsius).
RH: relative humidity, (0--1).
WindSpeed: 4.1 m \eqn{s^{-1}} average daily value in this case.
}
\keyword{datasets}
| /man/doy124.Rd | permissive | kimberlyh66/biocro | R | false | true | 624 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/biocro-package.R
\docType{data}
\name{doy124}
\alias{doy124}
\title{Weather data}
\format{data frame of dimensions 24 by 8.}
\source{
simulated
}
\description{
Example for a given day of the year to illustrate the \code{\link{CanA}}
function.
}
\details{
LAI: leaf area index.
year: year.
doy: 124 in this case.
hour: hour of the day, (0--23).
solarR: direct solar radiation.
DailyTemp.C: hourly air temperature (Celsius).
RH: relative humidity, (0--1).
WindSpeed: 4.1 m \eqn{s^{-1}} average daily value in this case.
}
\keyword{datasets}
|
getData <- function(path)
{
files <- "flag.data"
files <- paste(path, files, sep="/")
dat <- read.table(files[1],
sep=",",
comment.char="",
na.strings="",
stringsAsFactors=FALSE, strip.white=TRUE)
dat
}
| /datasets/UCI_ML_DataDescription/scripts/flags.R | no_license | MaciejPiernik/clustering-generated-features | R | false | false | 252 | r | getData <- function(path)
{
files <- "flag.data"
files <- paste(path, files, sep="/")
dat <- read.table(files[1],
sep=",",
comment.char="",
na.strings="",
stringsAsFactors=FALSE, strip.white=TRUE)
dat
}
|
library("somebm")
FBM1 = fbm(hurst = 0.1, n = 10000)
plot(FBM1)
FBM5 = fbm(hurst = 0.5, n = 10000)
plot(FBM5)
FBM10 = fbm(hurst = 0.9, n = 10000)
plot(FBM10)
par(mfrow=c(1,3))
plot(FBM1, main = "H = 0.1 - Negative Correlation", ylab = "Value")
plot(FBM5, main = "H = 0.5 - Brownian Motion", ylab = "Value")
plot(FBM10, main = "H = 0.9 - Positive Correlation", ylab = "Value")
| /MT CODE/FBM plots.R | no_license | alebellotta/MTWbackup | R | false | false | 379 | r | library("somebm")
FBM1 = fbm(hurst = 0.1, n = 10000)
plot(FBM1)
FBM5 = fbm(hurst = 0.5, n = 10000)
plot(FBM5)
FBM10 = fbm(hurst = 0.9, n = 10000)
plot(FBM10)
par(mfrow=c(1,3))
plot(FBM1, main = "H = 0.1 - Negative Correlation", ylab = "Value")
plot(FBM5, main = "H = 0.5 - Brownian Motion", ylab = "Value")
plot(FBM10, main = "H = 0.9 - Positive Correlation", ylab = "Value")
|
#cut(Sys.Date(), "month")
#ymd(Sys.Date()) - years(1)
#
shinyServer(function(input, output,session){
library(lubridate)
library(shiny)
library(ggplot2)
library(shinyBS)
#2014 Kilifi
#to create a subset of the admsions data for graphs
consolidated <- reactive( {
last_D <- format(ymd(Sys.Date()) - years(1), "%Y-%m-%d")
last_D <- format(ymd(last_D) - months(1), "%Y-%m-%d")
last_D <- cut(as.Date(last_D , format = "%Y-%m-%d"), "month")
cur_D <- cut(Sys.Date(), "month")
#change the selected dates to last year
selected <- paste("01" , input$month , format(Sys.Date(), "%Y") , sep = "-")
selected_D <-as.Date(selected , format = "%d-%b-%Y")
##change to last year
selected_D_last <- as.Date(ymd(selected_D) - years(1))
selected_D_last <- format(ymd(selected_D_last) - months(1), "%Y-%m-%d")
tickets <- tickets[tickets$CLOSE_DATE < selected_D & tickets$REPORT_DATE> selected_D_last & !is.na(tickets$REPORT_DATE ) ,]
#consolidted tickets
open_all <-ddply(tickets,.(month_report,yr_report, LOCATION_NAME,month_report2) ,nrow )
closed_all <-ddply(tickets,.(month_close,yr_close, LOCATION_NAME , month_close2) ,nrow )
names(open_all) <- c( 'month_report', 'yr_report','LOCATION','month_2' ,'count')
names(closed_all) <- c( 'month_report', 'yr_report','LOCATION','month_2' , 'count')
open_all$type <- 'open'
closed_all$type <- 'closed'
cons_merged <- rbind(open_all,closed_all)
cons_merged$yr_report <- as.numeric(as.character(cons_merged$yr_report))
cons_merged$month <- as.numeric(as.character(cons_merged$month_report))
cons_merged$month_yr <- paste(cons_merged$yr_report , cons_merged$month_report)
cons_merged$month_yr2 <- paste(cons_merged$yr_report , cons_merged$month_2)
# table(cons_merged$month_yr2)
cons_merged$month_report <- factor(cons_merged$month_report,labels=c("Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"),ordered=TRUE)
cons_merged
})
autoInvalidate <- reactiveTimer(360000000, session)
observe({
# Invalidate and re-execute this reactive expression every time the
# timer fires.
autoInvalidate()
source("global.R")
# Do something each time this is invalidated.
# The isolate() makes this observer _not_ get invalidated and re-executed
# when input$n changes.
# print(paste("The value of input$x is", isolate(input$xMonth)))
})
plotInput <- reactive( {
selected2 <- paste("01" , input$month , format(Sys.Date(), "%Y") , sep = "-")
selected_D2 <-as.Date(selected2 , format = "%d-%b-%Y")
if (input$location=="Kilifi" & Sys.Date()>selected_D2) {
cons_mergedAll <- consolidated()
cons_mergedAll <- cons_mergedAll[ cons_mergedAll$LOCATION =="Kilifi" & !is.na(cons_mergedAll$month) ,]
cons_mergedAll$month_yr_s <- as.numeric(gsub(" " ,"",x=cons_mergedAll$month_yr))
cons_mergedAll <- cons_mergedAll[order(cons_mergedAll$month_yr_s),]
cons_mergedAll$order = c(1:length(cons_mergedAll$count))
graphKlf <- ggplot(cons_mergedAll , aes( reorder(month_yr2, order) ,y=count, fill=type )) + geom_bar( stat="identity",position="dodge") +
ylab("Number of Tickets") + xlab("Month") + ggtitle("Tickets in Kilifi") +theme(axis.text.x=element_text(angle = 45, hjust = 1)) +
theme_bw() + scale_fill_manual(values=cbPalette) + scale_y_continuous(breaks=integer_breaks) + scale_x_discrete(drop = FALSE) +
geom_text(aes(label = count), size = 3, hjust = 0.5, vjust = 3, position = position_dodge(width = 0.8))
#ggsave(file="images/Consolidated Kilifi 2015.png", width=12.03, height=10.9)
# print(graphKlf)
}
else if (input$location=="Nairobi" & Sys.Date()>selected_D2) {
cons_mergedAll <- consolidated()
cons_mergedAll <- cons_mergedAll[ cons_mergedAll$LOCATION =="Nairobi" & !is.na(cons_mergedAll$month) ,]
cons_mergedAll$month_yr_s <- as.numeric(gsub(" " ,"",x=cons_mergedAll$month_yr))
cons_mergedAll <- cons_mergedAll[order(cons_mergedAll$month_yr_s),]
cons_mergedAll$order = c(1:length(cons_mergedAll$count))
graphNrb <- ggplot(cons_mergedAll , aes( reorder(month_yr2, order) ,y=count, fill=type )) + geom_bar( stat="identity",position="dodge") +
ylab("Number of Tickets") + xlab("Month") + ggtitle("Tickets in Nairobi") +theme(axis.text.x=element_text(angle = 45, hjust = 1)) +
theme_bw() + scale_fill_manual(values=cbPalette) + scale_y_continuous(breaks=integer_breaks2) + scale_x_discrete(drop = FALSE)+
geom_text(aes(label = count), size = 3, hjust = 0.5, vjust = 3, position = position_dodge(width = 0.8))
}
else if (input$location=="Mtwapa" & Sys.Date()>selected_D2) {
cons_mergedAll <- consolidated()
cons_mergedAll <- cons_mergedAll[ cons_mergedAll$LOCATION =="Mtwapa" & !is.na(cons_mergedAll$month) ,]
cons_mergedAll$month_yr_s <- as.numeric(gsub(" " ,"",x=cons_mergedAll$month_yr))
cons_mergedAll <- cons_mergedAll[order(cons_mergedAll$month_yr_s),]
cons_mergedAll$order = c(1:length(cons_mergedAll$count))
graphMtp <- ggplot(cons_mergedAll , aes( reorder(month_yr2, order) ,y=count, fill=type )) + geom_bar( stat="identity",position="dodge") +
ylab("Number of Tickets") + xlab("Month") + ggtitle("Tickets in Mtwapa") +theme(axis.text.x=element_text(angle = 45, hjust = 1)) +
theme_bw() + scale_fill_manual(values=cbPalette) + scale_y_continuous(breaks=integer_breaks2) + scale_x_discrete(drop = FALSE) +
geom_text(aes(label = count), size = 3, hjust = 0.5, vjust = 3, position = position_dodge(width = 0.8))
#ggsave(file="images/Consolidated Kilifi 2015.png", width=12.03, height=10.9)
# print(graphMtp)
}
else {
#warning("Selected a future month")
# addPopover(session,id="yearlyPlot", title="!!FUTURE MONTH!!", content = paste0("You have selected a month which is
# after the current month"), trigger = 'hover')
#
showshinyalert(session, "shinyalert1", paste("Select a month less than" ,format(Sys.Date(), '%b'),
"WARNING: You have selected a future month"), styleclass = "danger")
}
})
output$yearlyPlot <- renderPlot({
print(plotInput())
})
# output$downloadGraph <- downloadHandler(
# filename <- function() { paste(input$location,"_consolidated" , '.png', sep='') },
# #filename <- paste(input$location,"_consolidated" , '.jpeg', sep=''),
# content <- function(file) {
#
# # jpeg(file)
# plot <- plotInput()
# print(plot)
# dev.copy(png,file)
# dev.off()
# #
# })
output$downloadGraph <- downloadHandler(
filename = function() {
paste(input$location,"_consolidated" , '.png', sep='')
},
content = function(file) {
name <- paste(input$location,"_consolidated" , '.png', sep='')
ggsave(name, plotInput() ,width=16.67 , height=5.33)
file.copy(name, file, overwrite=TRUE)
}
)
#------------------------------------------------------------------------------------#
#this creates the tables for closed and open tickets
output$ticket_tables <- renderDataTable({
if (input$type=="Open") {
all_mergedOpen <- all_merged[all_merged$yr_report==input$yrTicket & all_merged$type=="open",]
all_mergedOpen <- all_mergedOpen[c("NAME" , "month_report", "LOCATION" , "count" )]
all_mergedOpen <- dcast(melt(all_mergedOpen), NAME + LOCATION ~ month_report )
}
else if(input$type=="Closed") {
all_mergedClosed <- all_merged[all_merged$yr_report==input$yrTicket & all_merged$type=="closed",]
all_mergedClosed <- all_mergedClosed[c("NAME" , "month_report", "LOCATION" , "count" )]
all_mergedClosed <- all_mergedClosed[!is.na(all_mergedClosed$count),]
all_mergedClosed <- dcast(melt(all_mergedClosed), NAME + LOCATION ~ month_report )
}
} , options = list(paging = FALSE, searching = FALSE)) #end renderDataTable
ticket_tablesData <- reactive({
if (input$type=="Open") {
all_mergedOpen <- all_merged[all_merged$yr_report==input$yrTicket & all_merged$type=="open",]
all_mergedOpen <- all_mergedOpen[c("NAME" , "month_report", "LOCATION" , "count" )]
all_mergedOpen <- dcast(melt(all_mergedOpen), NAME + LOCATION ~ month_report )
}
else if(input$type=="Closed") {
all_mergedClosed <- all_merged[all_merged$yr_report==input$yrTicket & all_merged$type=="closed",]
all_mergedClosed <- all_mergedClosed[c("NAME" , "month_report", "LOCATION" , "count" )]
all_mergedClosed <- all_mergedClosed[!is.na(all_mergedClosed$count),]
all_mergedClosed <- dcast(melt(all_mergedClosed), NAME + LOCATION ~ month_report )
}
})
#this downloads the table of the created tickets
output$downloadTicketTable <- downloadHandler(
# This function returns a string which tells the client
# browser what name to use when saving the file.
filename = function() {
month<- format( Sys.Date(), "%b")
name <- paste(month , input$yrTicket , sep = "_")
paste(name, "csv", sep = ".")
},
# This function should write data to a file given to it by
# the argument 'file'.
content = function(file) {
# data <- switch(input$type, "Open" = all_mergedOpen ,"Closed" =all_mergedClosed )
data <- ticket_tablesData()
# Write to a file specified by the 'file' argument
write.csv(data, file, row.names = F, na="")
}
)
})#end server
| /server.R | no_license | mwaithaka/tickets | R | false | false | 9,877 | r | #cut(Sys.Date(), "month")
#ymd(Sys.Date()) - years(1)
#
shinyServer(function(input, output,session){
library(lubridate)
library(shiny)
library(ggplot2)
library(shinyBS)
#2014 Kilifi
#to create a subset of the admsions data for graphs
consolidated <- reactive( {
last_D <- format(ymd(Sys.Date()) - years(1), "%Y-%m-%d")
last_D <- format(ymd(last_D) - months(1), "%Y-%m-%d")
last_D <- cut(as.Date(last_D , format = "%Y-%m-%d"), "month")
cur_D <- cut(Sys.Date(), "month")
#change the selected dates to last year
selected <- paste("01" , input$month , format(Sys.Date(), "%Y") , sep = "-")
selected_D <-as.Date(selected , format = "%d-%b-%Y")
##change to last year
selected_D_last <- as.Date(ymd(selected_D) - years(1))
selected_D_last <- format(ymd(selected_D_last) - months(1), "%Y-%m-%d")
tickets <- tickets[tickets$CLOSE_DATE < selected_D & tickets$REPORT_DATE> selected_D_last & !is.na(tickets$REPORT_DATE ) ,]
#consolidted tickets
open_all <-ddply(tickets,.(month_report,yr_report, LOCATION_NAME,month_report2) ,nrow )
closed_all <-ddply(tickets,.(month_close,yr_close, LOCATION_NAME , month_close2) ,nrow )
names(open_all) <- c( 'month_report', 'yr_report','LOCATION','month_2' ,'count')
names(closed_all) <- c( 'month_report', 'yr_report','LOCATION','month_2' , 'count')
open_all$type <- 'open'
closed_all$type <- 'closed'
cons_merged <- rbind(open_all,closed_all)
cons_merged$yr_report <- as.numeric(as.character(cons_merged$yr_report))
cons_merged$month <- as.numeric(as.character(cons_merged$month_report))
cons_merged$month_yr <- paste(cons_merged$yr_report , cons_merged$month_report)
cons_merged$month_yr2 <- paste(cons_merged$yr_report , cons_merged$month_2)
# table(cons_merged$month_yr2)
cons_merged$month_report <- factor(cons_merged$month_report,labels=c("Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"),ordered=TRUE)
cons_merged
})
autoInvalidate <- reactiveTimer(360000000, session)
observe({
# Invalidate and re-execute this reactive expression every time the
# timer fires.
autoInvalidate()
source("global.R")
# Do something each time this is invalidated.
# The isolate() makes this observer _not_ get invalidated and re-executed
# when input$n changes.
# print(paste("The value of input$x is", isolate(input$xMonth)))
})
plotInput <- reactive( {
selected2 <- paste("01" , input$month , format(Sys.Date(), "%Y") , sep = "-")
selected_D2 <-as.Date(selected2 , format = "%d-%b-%Y")
if (input$location=="Kilifi" & Sys.Date()>selected_D2) {
cons_mergedAll <- consolidated()
cons_mergedAll <- cons_mergedAll[ cons_mergedAll$LOCATION =="Kilifi" & !is.na(cons_mergedAll$month) ,]
cons_mergedAll$month_yr_s <- as.numeric(gsub(" " ,"",x=cons_mergedAll$month_yr))
cons_mergedAll <- cons_mergedAll[order(cons_mergedAll$month_yr_s),]
cons_mergedAll$order = c(1:length(cons_mergedAll$count))
graphKlf <- ggplot(cons_mergedAll , aes( reorder(month_yr2, order) ,y=count, fill=type )) + geom_bar( stat="identity",position="dodge") +
ylab("Number of Tickets") + xlab("Month") + ggtitle("Tickets in Kilifi") +theme(axis.text.x=element_text(angle = 45, hjust = 1)) +
theme_bw() + scale_fill_manual(values=cbPalette) + scale_y_continuous(breaks=integer_breaks) + scale_x_discrete(drop = FALSE) +
geom_text(aes(label = count), size = 3, hjust = 0.5, vjust = 3, position = position_dodge(width = 0.8))
#ggsave(file="images/Consolidated Kilifi 2015.png", width=12.03, height=10.9)
# print(graphKlf)
}
else if (input$location=="Nairobi" & Sys.Date()>selected_D2) {
cons_mergedAll <- consolidated()
cons_mergedAll <- cons_mergedAll[ cons_mergedAll$LOCATION =="Nairobi" & !is.na(cons_mergedAll$month) ,]
cons_mergedAll$month_yr_s <- as.numeric(gsub(" " ,"",x=cons_mergedAll$month_yr))
cons_mergedAll <- cons_mergedAll[order(cons_mergedAll$month_yr_s),]
cons_mergedAll$order = c(1:length(cons_mergedAll$count))
graphNrb <- ggplot(cons_mergedAll , aes( reorder(month_yr2, order) ,y=count, fill=type )) + geom_bar( stat="identity",position="dodge") +
ylab("Number of Tickets") + xlab("Month") + ggtitle("Tickets in Nairobi") +theme(axis.text.x=element_text(angle = 45, hjust = 1)) +
theme_bw() + scale_fill_manual(values=cbPalette) + scale_y_continuous(breaks=integer_breaks2) + scale_x_discrete(drop = FALSE)+
geom_text(aes(label = count), size = 3, hjust = 0.5, vjust = 3, position = position_dodge(width = 0.8))
}
else if (input$location=="Mtwapa" & Sys.Date()>selected_D2) {
cons_mergedAll <- consolidated()
cons_mergedAll <- cons_mergedAll[ cons_mergedAll$LOCATION =="Mtwapa" & !is.na(cons_mergedAll$month) ,]
cons_mergedAll$month_yr_s <- as.numeric(gsub(" " ,"",x=cons_mergedAll$month_yr))
cons_mergedAll <- cons_mergedAll[order(cons_mergedAll$month_yr_s),]
cons_mergedAll$order = c(1:length(cons_mergedAll$count))
graphMtp <- ggplot(cons_mergedAll , aes( reorder(month_yr2, order) ,y=count, fill=type )) + geom_bar( stat="identity",position="dodge") +
ylab("Number of Tickets") + xlab("Month") + ggtitle("Tickets in Mtwapa") +theme(axis.text.x=element_text(angle = 45, hjust = 1)) +
theme_bw() + scale_fill_manual(values=cbPalette) + scale_y_continuous(breaks=integer_breaks2) + scale_x_discrete(drop = FALSE) +
geom_text(aes(label = count), size = 3, hjust = 0.5, vjust = 3, position = position_dodge(width = 0.8))
#ggsave(file="images/Consolidated Kilifi 2015.png", width=12.03, height=10.9)
# print(graphMtp)
}
else {
#warning("Selected a future month")
# addPopover(session,id="yearlyPlot", title="!!FUTURE MONTH!!", content = paste0("You have selected a month which is
# after the current month"), trigger = 'hover')
#
showshinyalert(session, "shinyalert1", paste("Select a month less than" ,format(Sys.Date(), '%b'),
"WARNING: You have selected a future month"), styleclass = "danger")
}
})
output$yearlyPlot <- renderPlot({
print(plotInput())
})
# output$downloadGraph <- downloadHandler(
# filename <- function() { paste(input$location,"_consolidated" , '.png', sep='') },
# #filename <- paste(input$location,"_consolidated" , '.jpeg', sep=''),
# content <- function(file) {
#
# # jpeg(file)
# plot <- plotInput()
# print(plot)
# dev.copy(png,file)
# dev.off()
# #
# })
output$downloadGraph <- downloadHandler(
filename = function() {
paste(input$location,"_consolidated" , '.png', sep='')
},
content = function(file) {
name <- paste(input$location,"_consolidated" , '.png', sep='')
ggsave(name, plotInput() ,width=16.67 , height=5.33)
file.copy(name, file, overwrite=TRUE)
}
)
#------------------------------------------------------------------------------------#
#this creates the tables for closed and open tickets
output$ticket_tables <- renderDataTable({
if (input$type=="Open") {
all_mergedOpen <- all_merged[all_merged$yr_report==input$yrTicket & all_merged$type=="open",]
all_mergedOpen <- all_mergedOpen[c("NAME" , "month_report", "LOCATION" , "count" )]
all_mergedOpen <- dcast(melt(all_mergedOpen), NAME + LOCATION ~ month_report )
}
else if(input$type=="Closed") {
all_mergedClosed <- all_merged[all_merged$yr_report==input$yrTicket & all_merged$type=="closed",]
all_mergedClosed <- all_mergedClosed[c("NAME" , "month_report", "LOCATION" , "count" )]
all_mergedClosed <- all_mergedClosed[!is.na(all_mergedClosed$count),]
all_mergedClosed <- dcast(melt(all_mergedClosed), NAME + LOCATION ~ month_report )
}
} , options = list(paging = FALSE, searching = FALSE)) #end renderDataTable
ticket_tablesData <- reactive({
if (input$type=="Open") {
all_mergedOpen <- all_merged[all_merged$yr_report==input$yrTicket & all_merged$type=="open",]
all_mergedOpen <- all_mergedOpen[c("NAME" , "month_report", "LOCATION" , "count" )]
all_mergedOpen <- dcast(melt(all_mergedOpen), NAME + LOCATION ~ month_report )
}
else if(input$type=="Closed") {
all_mergedClosed <- all_merged[all_merged$yr_report==input$yrTicket & all_merged$type=="closed",]
all_mergedClosed <- all_mergedClosed[c("NAME" , "month_report", "LOCATION" , "count" )]
all_mergedClosed <- all_mergedClosed[!is.na(all_mergedClosed$count),]
all_mergedClosed <- dcast(melt(all_mergedClosed), NAME + LOCATION ~ month_report )
}
})
#this downloads the table of the created tickets
output$downloadTicketTable <- downloadHandler(
# This function returns a string which tells the client
# browser what name to use when saving the file.
filename = function() {
month<- format( Sys.Date(), "%b")
name <- paste(month , input$yrTicket , sep = "_")
paste(name, "csv", sep = ".")
},
# This function should write data to a file given to it by
# the argument 'file'.
content = function(file) {
# data <- switch(input$type, "Open" = all_mergedOpen ,"Closed" =all_mergedClosed )
data <- ticket_tablesData()
# Write to a file specified by the 'file' argument
write.csv(data, file, row.names = F, na="")
}
)
})#end server
|
\name{mcmcplot1}
\alias{mcmcplot1}
\title{ MCMC Diagnostics Plots for one Model Parameter}
\description{
Creates a graph window containing three different plots---a trace plot, a density plot, and an autocorrelation plot---for one parameter in an MCMC run. This function is used by \code{mcmcplot} to construct an html file of MCMC diagnostics. This function is intended for internal use only.
}
\usage{
mcmcplot1(x, col = mcmcplotsPalette(n), lty = 1, xlim = NULL, ylim =
NULL, style = c("gray", "plain"), greek = FALSE)
}
\arguments{
\item{x}{an \code{mcmc} object with a single variable.}
\item{col}{ colors for plotting each parallel chain. The default is \code{seq(nchains)+1} where \code{nchains} is the number of parallel chains in \code{mcmcout}. If there is only one parallel chain, then the default is 1.}
\item{lty}{ line types for plotting each parallel chain. The default is 1 for all parallel chains.}
\item{xlim}{ limits for the x axis of the density plot.}
\item{ylim}{ limits for the y axis of the density plot.}
\item{style}{ if "gray", then the plotting region is printed with a gray background, otherwise the default plotting region is used.}
\item{greek}{if \code{TRUE}, the names of greek letters in the \code{labels} will be displayed as greek characters on the plot.}
}
\value{
Creates a plot.
}
\references{ No references. }
\author{ S. McKay Curtis }
\note{
Only the first parallel chain is used to create the autocorrelation plot. This function is used by \code{mcmcplot} to create html output for all the parameters of an MCMC simulation.
}
\seealso{ \code{\link{mcmcplot}} }
\examples{
## Create fake MCMC output
fakemcmc <- as.mcmc.list(mcmc(sapply(1:5, function(dum) rnorm(1000))))
varnames(fakemcmc) <- c("gamma[1,1]", "gamma[1,2]", "gamma[1,3]", "sigma[1]", "sigma[2]")
mcmcplot1(fakemcmc[, "sigma[1]", drop=FALSE])
mcmcplot1(fakemcmc[, "gamma[1,3]", drop=FALSE], style="plain")
}
\keyword{hplot}
| /mcmcplots/man/mcmcplot1.Rd | no_license | sumtxt/mcmcplots | R | false | false | 1,969 | rd | \name{mcmcplot1}
\alias{mcmcplot1}
\title{ MCMC Diagnostics Plots for one Model Parameter}
\description{
Creates a graph window containing three different plots---a trace plot, a density plot, and an autocorrelation plot---for one parameter in an MCMC run. This function is used by \code{mcmcplot} to construct an html file of MCMC diagnostics. This function is intended for internal use only.
}
\usage{
mcmcplot1(x, col = mcmcplotsPalette(n), lty = 1, xlim = NULL, ylim =
NULL, style = c("gray", "plain"), greek = FALSE)
}
\arguments{
\item{x}{an \code{mcmc} object with a single variable.}
\item{col}{ colors for plotting each parallel chain. The default is \code{seq(nchains)+1} where \code{nchains} is the number of parallel chains in \code{mcmcout}. If there is only one parallel chain, then the default is 1.}
\item{lty}{ line types for plotting each parallel chain. The default is 1 for all parallel chains.}
\item{xlim}{ limits for the x axis of the density plot.}
\item{ylim}{ limits for the y axis of the density plot.}
\item{style}{ if "gray", then the plotting region is printed with a gray background, otherwise the default plotting region is used.}
\item{greek}{if \code{TRUE}, the names of greek letters in the \code{labels} will be displayed as greek characters on the plot.}
}
\value{
Creates a plot.
}
\references{ No references. }
\author{ S. McKay Curtis }
\note{
Only the first parallel chain is used to create the autocorrelation plot. This function is used by \code{mcmcplot} to create html output for all the parameters of an MCMC simulation.
}
\seealso{ \code{\link{mcmcplot}} }
\examples{
## Create fake MCMC output
fakemcmc <- as.mcmc.list(mcmc(sapply(1:5, function(dum) rnorm(1000))))
varnames(fakemcmc) <- c("gamma[1,1]", "gamma[1,2]", "gamma[1,3]", "sigma[1]", "sigma[2]")
mcmcplot1(fakemcmc[, "sigma[1]", drop=FALSE])
mcmcplot1(fakemcmc[, "gamma[1,3]", drop=FALSE], style="plain")
}
\keyword{hplot}
|
#' @method gplot matrix
#' @export
#' @export gplot.matrix
"gplot.matrix" <-
function(x, y, se.y = NULL, xlab, ylab, residuals = NULL, rugplot = FALSE, scale =
0, se = FALSE, fit, ...)
{
if(ncol(x) != 2) {
warning(paste("A perspective plot was requested for \"", ylab,
"\" but the \"x\" variable has dimension other than 2",
sep = ""))
invisible(return(0))
}
bivar.dup <- function(x)
{
if(is.null(dx <- dim(x)) || dx[2] > 2)
stop("x must be bivariate")
duplicated(x[, 1] + (1i) * x[, 2])
}
xname <- dimnames(x)[[2]]
dups <- bivar.dup(x)
if (requireNamespace("interp", quietly = TRUE)) {
xyz <- interp::interp(x[!dups, 1], x[!dups, 2], y[!dups])
} else {
stop("You need to install the package 'interp' from the R contributed libraries to use this plotting method for bivariate functions")
}
zmin <- min(xyz$z[!is.na(xyz$z)])
z <- ifelse(is.na(xyz$z), zmin, xyz$z)
scale2 <- diff(range(z))
# Adjust scale
scale <- max(scale, scale2)
# persp(xyz$x, xyz$y, (z - zmin)/scale, xlab = xname[1], ylab = xname[
# 2], zlab = ylab, ...)
persp(xyz$x, xyz$y, z, xlab = xname[1], ylab = xname[2], zlab = ylab,
...)
invisible(scale)
}
| /R/gplot.matrix.R | no_license | cran/gam | R | false | false | 1,375 | r | #' @method gplot matrix
#' @export
#' @export gplot.matrix
"gplot.matrix" <-
function(x, y, se.y = NULL, xlab, ylab, residuals = NULL, rugplot = FALSE, scale =
0, se = FALSE, fit, ...)
{
if(ncol(x) != 2) {
warning(paste("A perspective plot was requested for \"", ylab,
"\" but the \"x\" variable has dimension other than 2",
sep = ""))
invisible(return(0))
}
bivar.dup <- function(x)
{
if(is.null(dx <- dim(x)) || dx[2] > 2)
stop("x must be bivariate")
duplicated(x[, 1] + (1i) * x[, 2])
}
xname <- dimnames(x)[[2]]
dups <- bivar.dup(x)
if (requireNamespace("interp", quietly = TRUE)) {
xyz <- interp::interp(x[!dups, 1], x[!dups, 2], y[!dups])
} else {
stop("You need to install the package 'interp' from the R contributed libraries to use this plotting method for bivariate functions")
}
zmin <- min(xyz$z[!is.na(xyz$z)])
z <- ifelse(is.na(xyz$z), zmin, xyz$z)
scale2 <- diff(range(z))
# Adjust scale
scale <- max(scale, scale2)
# persp(xyz$x, xyz$y, (z - zmin)/scale, xlab = xname[1], ylab = xname[
# 2], zlab = ylab, ...)
persp(xyz$x, xyz$y, z, xlab = xname[1], ylab = xname[2], zlab = ylab,
...)
invisible(scale)
}
|
CoefVar <- function(data, type = 1) {
# Encontra o Coeficiente de Variacao dos dados,
# funcao desenvolvida por Paulo Cesar Ossani em 05/2016
# data - Dados a serem analizados
# type - 1 Coefiente de variacao global (default)
# 2 Coefiente de variacao por coluna
# Retorna:
# CVar - Coeficiente de variacao
if (!is.data.frame(data) && !is.matrix(data))
stop("'data' input is incorrect, it should be of type data frame. Verify!")
if (type!=1 && type!=2)
stop("'type' input is incorrect, it should be numeric, being 1 or 2. Verify!")
data <- as.matrix(data) # Dados a serem analizados
if (type==1) { # Coeficiente de variacao global
CVar <- as.matrix(sd(data)/mean(data) * 100)
colnames(CVar) <- c("C.V. in %")
}
if (type==2) { # Coeficiente de variacao por coluna
Media <- apply(data, 2, mean) # encontra as medias por colunas
data <- sweep(data, 2, Media, FUN = "-") # Centraliza na media
Desvio <- sqrt(colSums(data^2)/(nrow(data)-1)) # raiz da soma do quadrado - desvio padrao amostral
CVar <- as.matrix(Desvio/Media * 100)
colnames(CVar) <- c("C.V. em %")
}
return(CVar)
}
| /R/CoefVar_English.R | no_license | cran/MVar | R | false | false | 1,218 | r | CoefVar <- function(data, type = 1) {
# Encontra o Coeficiente de Variacao dos dados,
# funcao desenvolvida por Paulo Cesar Ossani em 05/2016
# data - Dados a serem analizados
# type - 1 Coefiente de variacao global (default)
# 2 Coefiente de variacao por coluna
# Retorna:
# CVar - Coeficiente de variacao
if (!is.data.frame(data) && !is.matrix(data))
stop("'data' input is incorrect, it should be of type data frame. Verify!")
if (type!=1 && type!=2)
stop("'type' input is incorrect, it should be numeric, being 1 or 2. Verify!")
data <- as.matrix(data) # Dados a serem analizados
if (type==1) { # Coeficiente de variacao global
CVar <- as.matrix(sd(data)/mean(data) * 100)
colnames(CVar) <- c("C.V. in %")
}
if (type==2) { # Coeficiente de variacao por coluna
Media <- apply(data, 2, mean) # encontra as medias por colunas
data <- sweep(data, 2, Media, FUN = "-") # Centraliza na media
Desvio <- sqrt(colSums(data^2)/(nrow(data)-1)) # raiz da soma do quadrado - desvio padrao amostral
CVar <- as.matrix(Desvio/Media * 100)
colnames(CVar) <- c("C.V. em %")
}
return(CVar)
}
|
# We want to get the ROC information, then compute the AUC (adjusting for the
# maximum FPR amongst the paths). Then, take an average over 100 runs so that
# we get an accurate idea of how the estimator performs.
nruns<-100
auc=matrix(0,nrow=nruns,ncol=1)
for (t in 1:nruns){
# Generate MVN data and true structure
source('~/Tripos/III/[Essay] Graphical Modelling for High-Dimensional Data/R scripts/Generate.R')
out.clime = sugm(Gaussdata,method="clime",nlambda=30) #perform CLIME
h<-huge.roc(out.clime$path,a)
auc[t]<-h$AUC + h$tp[30]*(1-h$fp[30]) + 0.5*(1-h$tp[30])*(1-h$fp[30])
# this adjusts for the maximum TPR and FPR on the curve, by filling the rest in linearly
print(t)
}
mean_area<-mean(auc)
median_area<-median(auc)
print(mean_area)
print(median_area) | /R scripts/rocCLIME.R | no_license | oksmith/Part-III-essay | R | false | false | 801 | r | # We want to get the ROC information, then compute the AUC (adjusting for the
# maximum FPR amongst the paths). Then, take an average over 100 runs so that
# we get an accurate idea of how the estimator performs.
nruns<-100
auc=matrix(0,nrow=nruns,ncol=1)
for (t in 1:nruns){
# Generate MVN data and true structure
source('~/Tripos/III/[Essay] Graphical Modelling for High-Dimensional Data/R scripts/Generate.R')
out.clime = sugm(Gaussdata,method="clime",nlambda=30) #perform CLIME
h<-huge.roc(out.clime$path,a)
auc[t]<-h$AUC + h$tp[30]*(1-h$fp[30]) + 0.5*(1-h$tp[30])*(1-h$fp[30])
# this adjusts for the maximum TPR and FPR on the curve, by filling the rest in linearly
print(t)
}
mean_area<-mean(auc)
median_area<-median(auc)
print(mean_area)
print(median_area) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/merge-fields.R
\name{merge_field_create}
\alias{merge_field_create}
\title{Create a merge field for a list}
\usage{
merge_field_create(
api = NULL,
list_id,
name,
type = c("text", "number", "address", "phone", "date", "url", "image", "url", "radio",
"dropdown", "birthday", "zip"),
required = FALSE
)
}
\arguments{
\item{api}{Character. Your private api key. If api is `NULL`, the environment variable `Sys.getenv("mailchimp_api")` is used.}
\item{list_id}{Character. The ID of a list. See `list_get()`.}
\item{name}{Character. The name of the merge_field. See `merge_field_get()`.}
\item{type}{Character. The type of the merge field. Can be one of "text", "number", "address",
"phone", "date", "url", "image", "url", "radio", "dropdown", "birthday" or "zip".}
\item{required}{Boolean. Is this merge field required?}
}
\description{
Create a merge field for a list
}
| /man/merge_field_create.Rd | no_license | theoroe3/mailchimpr | R | false | true | 963 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/merge-fields.R
\name{merge_field_create}
\alias{merge_field_create}
\title{Create a merge field for a list}
\usage{
merge_field_create(
api = NULL,
list_id,
name,
type = c("text", "number", "address", "phone", "date", "url", "image", "url", "radio",
"dropdown", "birthday", "zip"),
required = FALSE
)
}
\arguments{
\item{api}{Character. Your private api key. If api is `NULL`, the environment variable `Sys.getenv("mailchimp_api")` is used.}
\item{list_id}{Character. The ID of a list. See `list_get()`.}
\item{name}{Character. The name of the merge_field. See `merge_field_get()`.}
\item{type}{Character. The type of the merge field. Can be one of "text", "number", "address",
"phone", "date", "url", "image", "url", "radio", "dropdown", "birthday" or "zip".}
\item{required}{Boolean. Is this merge field required?}
}
\description{
Create a merge field for a list
}
|
library(shiny)
library(shinycssloaders)
library(shinyhelper)
#create_help_files(files = c("numericInput", "plotOutput"),
# help_dir = "helpfiles")
numericInput_h <- function(id, label, value,...){
helper(numericInput(id, label, value, ...),
icon = "question-circle",
colour = "blue",
type = "markdown",
content = "numericInput")
}
plotOutput_h <- function(id, ...){
helper(withSpinner(plotOutput(id, ...)),
icon = "question-circle",
colour = "blue",
type = "markdown",
content = "plotOutput")
}
ui <- fluidPage(plotOutput_h("simple_plot", height = "350px"),
plotOutput_h("simpler_plot", height = "350px"),
numericInput_h("n_points", label = "Number of points", value = 15))
server <- function(input, output) {
observe_helpers()
output[["simple_plot"]] <- renderPlot({
plot(1L:input[["n_points"]], type = "b")
})
output[["simpler_plot"]] <- renderPlot({
plot(1L:input[["n_points"]] * 2, type = "b")
})
}
shinyApp(ui = ui, server = server) | /programative-component-generation/Przybyłek_Paulina/polecenie_2.R | no_license | p-przybylek/SandalenCamp2021 | R | false | false | 1,118 | r | library(shiny)
library(shinycssloaders)
library(shinyhelper)
#create_help_files(files = c("numericInput", "plotOutput"),
# help_dir = "helpfiles")
numericInput_h <- function(id, label, value,...){
helper(numericInput(id, label, value, ...),
icon = "question-circle",
colour = "blue",
type = "markdown",
content = "numericInput")
}
plotOutput_h <- function(id, ...){
helper(withSpinner(plotOutput(id, ...)),
icon = "question-circle",
colour = "blue",
type = "markdown",
content = "plotOutput")
}
ui <- fluidPage(plotOutput_h("simple_plot", height = "350px"),
plotOutput_h("simpler_plot", height = "350px"),
numericInput_h("n_points", label = "Number of points", value = 15))
server <- function(input, output) {
observe_helpers()
output[["simple_plot"]] <- renderPlot({
plot(1L:input[["n_points"]], type = "b")
})
output[["simpler_plot"]] <- renderPlot({
plot(1L:input[["n_points"]] * 2, type = "b")
})
}
shinyApp(ui = ui, server = server) |
#10-5-20
#read in ortholog cds matrices
xyl1<- read.delim("~/xylose_optimization_project/data/orthologs/cds/xyl1_cds_MSA.tsv",
stringsAsFactors=FALSE)
xyl2<- read.delim("~/xylose_optimization_project/data/orthologs/cds/xyl2_cds_MSA.tsv",
stringsAsFactors=FALSE)
xyl3<- read.delim("~/xylose_optimization_project/data/orthologs/cds/xyl3_cds_MSA.tsv",
stringsAsFactors=FALSE)
tal1<- read.delim("~/xylose_optimization_project/data/orthologs/cds/tal1_cds_MSA.tsv",
stringsAsFactors=FALSE)
tkl1<- read.delim("~/xylose_optimization_project/data/orthologs/cds/tkl1_cds_MSA.tsv",
stringsAsFactors=FALSE)
###NOTE - as of 10/5/20 - the ortholog search for TKL genes looks like it was overly conservative,
#or perhaps not sensitive enough. I am switching to using the orthoMCL cluster containing Scer tkl1
#from Shen et al. 2018 Cell. For now we can use the old file, and I'll push the new file
#ASAP.
#remove identical sequences using the unique() command
#then, note which taxa remain duplicated with which() command
xyl1<-unique(xyl1)
#xyl1_dups<-xyl1[which(duplicated(xyl1$V1)), 1]
xyl2<-unique(xyl2)
#xyl2_dups<-xyl2[which(duplicated(xyl2$V1)), 1]
xyl3<-unique(xyl3)
#xyl3_dups<-xyl3[which(duplicated(xyl3$V1)), 1]
tal1<-unique(tal1)
#tal1_dups<-tal1[which(duplicated(tal1$V1)), 1]
tkl1<-unique(tkl1)
#tkl1_dups<-tkl1[which(duplicated(tkl1$V1)), 1]
#make a master list of all taxa for all genes
all_taxa<-append(xyl1$V1, xyl2$V1)
all_taxa<-append(all_taxa, xyl3$V1)
all_taxa<-append(all_taxa, tal1$V1)
all_taxa<-append(all_taxa, tkl1$V1)
all_taxa<-unique(all_taxa)
#make a matrix of taxa x gene ID
##ex.
######## xyl1 ##### xyl2 ##### xyl3 #####
#taxa1 1 0 1
######
#taxa2 0 1 1
xylose_utilization_gene_presence<- data.frame(all_taxa)
xylose_utilization_gene_presence$xyl1<-0
xylose_utilization_gene_presence$xyl2<-0
xylose_utilization_gene_presence$xyl3<-0
xylose_utilization_gene_presence$tkl1<-0
xylose_utilization_gene_presence$tal1<-0
# fill in xyl1 genes in the xylose_utilization_gene_presence table
xylose_utilization_gene_presence[,1]<- as.character(xylose_utilization_gene_presence[,1])
for (i in 1:nrow(xylose_utilization_gene_presence)){
if(xylose_utilization_gene_presence[i,1] %in% xyl1[,1]){
xylose_utilization_gene_presence[i, 2] <- 1
}
}
for (i in 1:nrow(xylose_utilization_gene_presence)){
if(xylose_utilization_gene_presence[i,1] %in% xyl2[,1]){
xylose_utilization_gene_presence[i, 3] <- 1
}
}
for (i in 1:nrow(xylose_utilization_gene_presence)){
if(xylose_utilization_gene_presence[i,1] %in% xyl3[,1]){
xylose_utilization_gene_presence[i, 4] <- 1
}
}
for (i in 1:nrow(xylose_utilization_gene_presence)){
if(xylose_utilization_gene_presence[i,1] %in% tkl1[,1]){
xylose_utilization_gene_presence[i, 5] <- 1
}
}
for (i in 1:nrow(xylose_utilization_gene_presence)){
if(xylose_utilization_gene_presence[i,1] %in% tal1[,1]){
xylose_utilization_gene_presence[i, 6] <- 1
}
}
write.table(xylose_utilization_gene_presence,
"~/xylose_optimization_project/data/XYLpthwy_gene_presence_absence_matrix.txt", sep="\t", quote=FALSE, row.names=FALSE)
wi_vals<-read.delim("~/xylose_optimization_project/data/labella_et_al/wi_values.txt",
stringsAsFactors=FALSE)
# are there any spp. in our lists that are not in abbey's wi values?
which(!xylose_utilization_gene_presence[,1] %in% wi_vals[,1])
xylose_utilization_gene_presence[which(!xylose_utilization_gene_presence[,1] %in% wi_vals[,1]), 1]
# are there any spp. in abbey's list that are not in ours?
which(!wi_vals[,1] %in% xylose_utilization_gene_presence[,1])->x
wi_vals[x,1]
### start a new section of code! ;) In this section we will
#write code to calculate the mean wi value for each gene (estAI)
require(EnvStats)
stAI_dataframe<-xylose_utilization_gene_presence
stAI_dataframe$xyl1<-NA
stAI_dataframe$xyl2<-NA
stAI_dataframe$xyl3<-NA
stAI_dataframe$tal1<-NA
stAI_dataframe$tkl1<-NA
#make new columns for 2nd copy of genes
stAI_dataframe$xyl1.2<-NA
stAI_dataframe$xyl2.2<-NA
stAI_dataframe$xyl3.2<-NA
stAI_dataframe$tal1.2<-NA
stAI_dataframe$tkl1.2<-NA
#iterate by row of the xylose_ut_df and check for values of 1
#start
for(i in 1:nrow(xylose_utilization_gene_presence)){
#check if gene is present for species
if(xylose_utilization_gene_presence[i,2]==1){
# create x variable that refers to row number in xyl1 of species
x<- which(xyl1[,1] == xylose_utilization_gene_presence[i,1])
# set up if statement to check if x has 2 numbers
y<- which(wi_vals[,1] == xylose_utilization_gene_presence[i,1])
#identify the gene sequence for the respective gene:
gene_x<-as.character(xyl1[x[1],(5:ncol(xyl1))])
#below if statement checks to see if NAs are at end of gene and then removes them
if(is.na(xyl1[x[1], ncol(xyl1)])){
z<-which(is.na(gene_x))
gene_x<-gene_x[1:(z[1]-1)]
}
#create a vector to store the wi values
gene_x_wi_vals<-numeric()
for(j in seq(1,length(gene_x), 3)){
codon<-paste(gene_x[j], gene_x[j+1], gene_x[j+2], sep="")
codon_column<-which(grepl(codon, colnames(wi_vals), ignore.case=TRUE))
codon_wi_val<-as.numeric(wi_vals[y,codon_column])
gene_x_wi_vals<-append(gene_x_wi_vals, codon_wi_val)
}
stAI<-geoMean(gene_x_wi_vals)
stAI_dataframe[i,2]<-stAI
# add an if statement for if there's a second copy
if(length(x)>1){
gene_x<-as.character(xyl1[x[2],(5:ncol(xyl1))])
if(is.na(xyl1[x[2], ncol(xyl1)])){
z<-which(is.na(gene_x))
gene_x<-gene_x[1:(z[1]-1)]
}
#create a vector to store the wi values
gene_x_wi_vals<-numeric()
for(j in seq(1,length(gene_x), 3)){
codon<-paste(gene_x[j], gene_x[j+1], gene_x[j+2], sep="")
codon_column<-which(grepl(codon, colnames(wi_vals), ignore.case=TRUE))
codon_wi_val<-as.numeric(wi_vals[y,codon_column])
gene_x_wi_vals<-append(gene_x_wi_vals, codon_wi_val)
}
stAI<-geoMean(gene_x_wi_vals)
stAI_dataframe[i,7]<-stAI
}
}
}
### copy 2 of loop
for(i in 1:nrow(xylose_utilization_gene_presence)){
#check if xyl1 (column 2)==1
if(xylose_utilization_gene_presence[i,3]==1){
# create x variable that refers to row number in xyl1 of species
x<- which(xyl2[,1] == xylose_utilization_gene_presence[i,1])
# set up if statement to check if x has 2 numbers
y<- which(wi_vals[,1] == xylose_utilization_gene_presence[i,1])
#identify the gene sequence for the respective gene:
gene_x<-as.character(xyl2[x[1],(5:ncol(xyl2))])
#below if statement checks to see if NAs are at end of gene and then removes them
if(is.na(xyl2[x[1], ncol(xyl2)])){
z<-which(is.na(gene_x))
gene_x<-gene_x[1:(z[1]-1)]
}
#create a vector to store the wi values
gene_x_wi_vals<-numeric()
for(j in seq(1,length(gene_x), 3)){
codon<-paste(gene_x[j], gene_x[j+1], gene_x[j+2], sep="")
codon_column<-which(grepl(codon, colnames(wi_vals), ignore.case=TRUE))
codon_wi_val<-as.numeric(wi_vals[y,codon_column])
gene_x_wi_vals<-append(gene_x_wi_vals, codon_wi_val)
}
stAI<-geoMean(gene_x_wi_vals)
stAI_dataframe[i,3]<-stAI
# add an if statement for if there's a second copy
if(length(x)>1){
gene_x<-as.character(xyl2[x[2],(5:ncol(xyl2))])
if(is.na(xyl2[x[2], ncol(xyl2)])){
z<-which(is.na(gene_x))
gene_x<-gene_x[1:(z[1]-1)]
}
#create a vector to store the wi values
gene_x_wi_vals<-numeric()
for(j in seq(1,length(gene_x), 3)){
codon<-paste(gene_x[j], gene_x[j+1], gene_x[j+2], sep="")
codon_column<-which(grepl(codon, colnames(wi_vals), ignore.case=TRUE))
codon_wi_val<-as.numeric(wi_vals[y,codon_column])
gene_x_wi_vals<-append(gene_x_wi_vals, codon_wi_val)
}
stAI<-geoMean(gene_x_wi_vals)
stAI_dataframe[i,8]<-stAI
}
}
}
#copy 3 of loop - xyl3
for(i in 1:nrow(xylose_utilization_gene_presence)){
#check if xyl1 (column 2)==1
if(xylose_utilization_gene_presence[i,4]==1){
# create x variable that refers to row number in xyl1 of species
x<- which(xyl3[,1] == xylose_utilization_gene_presence[i,1])
# set up if statement to check if x has 2 numbers
y<- which(wi_vals[,1] == xylose_utilization_gene_presence[i,1])
#identify the gene sequence for the respective gene:
gene_x<-as.character(xyl3[x[1],(5:ncol(xyl3))])
#below if statement checks to see if NAs are at end of gene and then removes them
if(is.na(xyl3[x[1], ncol(xyl3)])){
z<-which(is.na(gene_x))
gene_x<-gene_x[1:(z[1]-1)]
}
#create a vector to store the wi values
gene_x_wi_vals<-numeric()
for(j in seq(1,length(gene_x), 3)){
codon<-paste(gene_x[j], gene_x[j+1], gene_x[j+2], sep="")
codon_column<-which(grepl(codon, colnames(wi_vals), ignore.case=TRUE))
codon_wi_val<-as.numeric(wi_vals[y,codon_column])
gene_x_wi_vals<-append(gene_x_wi_vals, codon_wi_val)
}
stAI<-geoMean(gene_x_wi_vals)
stAI_dataframe[i,4]<-stAI
# add an if statement for if there's a second copy
if(length(x)>1){
gene_x<-as.character(xyl3[x[2],(5:ncol(xyl3))])
if(is.na(xyl3[x[2], ncol(xyl3)])){
z<-which(is.na(gene_x))
gene_x<-gene_x[1:(z[1]-1)]
}
#create a vector to store the wi values
gene_x_wi_vals<-numeric()
for(j in seq(1,length(gene_x), 3)){
codon<-paste(gene_x[j], gene_x[j+1], gene_x[j+2], sep="")
codon_column<-which(grepl(codon, colnames(wi_vals), ignore.case=TRUE))
codon_wi_val<-as.numeric(wi_vals[y,codon_column])
gene_x_wi_vals<-append(gene_x_wi_vals, codon_wi_val)
}
stAI<-geoMean(gene_x_wi_vals)
stAI_dataframe[i,9]<-stAI
}
}
}
#copy 4 of loop - tkl1
for(i in 1:nrow(xylose_utilization_gene_presence)){
#check if xyl1 (column 2)==1
if(xylose_utilization_gene_presence[i,5]==1){
# create x variable that refers to row number in xyl1 of species
x<- which(tkl1[,1] == xylose_utilization_gene_presence[i,1])
# set up if statement to check if x has 2 numbers
y<- which(wi_vals[,1] == xylose_utilization_gene_presence[i,1])
#identify the gene sequence for the respective gene:
gene_x<-as.character(tkl1[x[1],(5:ncol(tkl1))])
#below if statement checks to see if NAs are at end of gene and then removes them
if(is.na(tkl1[x[1], ncol(tkl1)])){
z<-which(is.na(gene_x))
gene_x<-gene_x[1:(z[1]-1)]
}
#create a vector to store the wi values
gene_x_wi_vals<-numeric()
for(j in seq(1,length(gene_x), 3)){
codon<-paste(gene_x[j], gene_x[j+1], gene_x[j+2], sep="")
codon_column<-which(grepl(codon, colnames(wi_vals), ignore.case=TRUE))
codon_wi_val<-as.numeric(wi_vals[y,codon_column])
gene_x_wi_vals<-append(gene_x_wi_vals, codon_wi_val)
}
stAI<-geoMean(gene_x_wi_vals)
stAI_dataframe[i,5]<-stAI
# add an if statement for if there's a second copy
if(length(x)>1){
gene_x<-as.character(tkl1[x[2],(5:ncol(tkl1))])
if(is.na(tkl1[x[2], ncol(tkl1)])){
z<-which(is.na(gene_x))
gene_x<-gene_x[1:(z[1]-1)]
}
#create a vector to store the wi values
gene_x_wi_vals<-numeric()
for(j in seq(1,length(gene_x), 3)){
codon<-paste(gene_x[j], gene_x[j+1], gene_x[j+2], sep="")
codon_column<-which(grepl(codon, colnames(wi_vals), ignore.case=TRUE))
codon_wi_val<-as.numeric(wi_vals[y,codon_column])
gene_x_wi_vals<-append(gene_x_wi_vals, codon_wi_val)
}
stAI<-geoMean(gene_x_wi_vals)
stAI_dataframe[i,10]<-stAI
}
}
}
#copy 5 of loop - tal1
for(i in 1:nrow(xylose_utilization_gene_presence)){
#check if xyl1 (column 2)==1
if(xylose_utilization_gene_presence[i,6]==1){
# create x variable that refers to row number in xyl1 of species
x<- which(tal1[,1] == xylose_utilization_gene_presence[i,1])
# set up if statement to check if x has 2 numbers
y<- which(wi_vals[,1] == xylose_utilization_gene_presence[i,1])
#identify the gene sequence for the respective gene:
gene_x<-as.character(tal1[x[1],(5:ncol(tal1))])
#below if statement checks to see if NAs are at end of gene and then removes them
if(is.na(tal1[x[1], ncol(tal1)])){
z<-which(is.na(gene_x))
gene_x<-gene_x[1:(z[1]-1)]
}
#create a vector to store the wi values
gene_x_wi_vals<-numeric()
for(j in seq(1,length(gene_x), 3)){
codon<-paste(gene_x[j], gene_x[j+1], gene_x[j+2], sep="")
codon_column<-which(grepl(codon, colnames(wi_vals), ignore.case=TRUE))
codon_wi_val<-as.numeric(wi_vals[y,codon_column])
gene_x_wi_vals<-append(gene_x_wi_vals, codon_wi_val)
}
stAI<-geoMean(gene_x_wi_vals)
stAI_dataframe[i,6]<-stAI
# add an if statement for if there's a second copy
if(length(x)>1){
gene_x<-as.character(tal1[x[2],(5:ncol(tal1))])
if(is.na(tal1[x[2], ncol(tal1)])){
z<-which(is.na(gene_x))
gene_x<-gene_x[1:(z[1]-1)]
}
#create a vector to store the wi values
gene_x_wi_vals<-numeric()
for(j in seq(1,length(gene_x), 3)){
codon<-paste(gene_x[j], gene_x[j+1], gene_x[j+2], sep="")
codon_column<-which(grepl(codon, colnames(wi_vals), ignore.case=TRUE))
codon_wi_val<-as.numeric(wi_vals[y,codon_column])
gene_x_wi_vals<-append(gene_x_wi_vals, codon_wi_val)
}
stAI<-geoMean(gene_x_wi_vals)
stAI_dataframe[i,11]<-stAI
}
}
}
rm(all_taxa, codon, codon_column, codon_wi_val, gene_x,
gene_x_wi_vals,i, j, stAI, x, y, z)
#import genome-wide stAI values
genome_wide_tAI<-read.delim("~/xylose_optimization_project/data/labella_et_al/cds_mito_processed_tAI_recalc/genome_wide_tAI_all_spp.txt")
#after unzipping before import had to correct taxa names in file on command line using sed to replace
#underscores with spaces.
#have to fix taxa names
x<-as.character(genome_wide_tAI$taxa)
taxa_IDs<-x
for (i in 1:length(x)){
if (grepl("yHMP", x[i]) | grepl("yHAB", x[i])){
name<-paste(strsplit(x[i], " ")[[1]][2], strsplit(x[i], " ")[[1]][3], sep=" ")
}
else{
name<-paste(strsplit(x[i], " ")[[1]][1], strsplit(x[i], " ")[[1]][2], sep=" ")
}
taxa_IDs[i]<-name
}
####
genome_wide_tAI$taxa<-taxa_IDs
###NAMES FIXED ABOVE
#Hash out and import in the future.
write.table(genome_wide_tAI, "~/xylose_optimization_project/data/labella_et_al/cds_mito_processed_tAI_recalc/genome_wide_tAI_all_spp.txt", sep="\t", quote=FALSE, row.names=FALSE)
#Create empty datafrme to populate with estAI values
estAI_df<-stAI_dataframe
for(i in 2:ncol(estAI_df)){
for(j in 1:nrow(estAI_df)){
estAI_df[j,i]<-NA
}
}
genome_wide_tAI$taxa<-as.character(genome_wide_tAI$taxa)
stAI_dataframe$all_taxa<-as.character(stAI_dataframe$all_taxa)
# iterate through stAI df and populate estAI df
for(i in 2:ncol(stAI_dataframe)){
for(j in 1:nrow(stAI_dataframe)){
# grab the species tAI vals for all genes
if(!is.na(stAI_dataframe[j,i])){
species_tAI_vals<-genome_wide_tAI[which(genome_wide_tAI$taxa==stAI_dataframe$all_taxa[j]), 2]
ecdf_func<-ecdf(species_tAI_vals)
estAI_df[j,i]<-ecdf_func(stAI_dataframe[j,i])
}
}
}
rm(ecdf_func, i, j, name, species_tAI_vals, taxa_IDs, x)
####writing tAI, estAI results as tables to import
write.table(stAI_dataframe, "~/xylose_optimization_project/data/spp_by_gene_tAI_vals.txt", sep="\t", quote=FALSE, row.name=FALSE)
write.table(estAI_df, "~/xylose_optimization_project/data/spp_by_gene_estAI_vals.txt", sep="\t", quote=FALSE, row.name=FALSE)
###Abbe's paper uses the empirical distribution function to find
#the percent of genes with lower tAI vals. Does this differ from
#our division function?
# create ecdf distribution function
# ecdf_func<-ecdf(species_tAI_vals)
# ecdf_func(stAI_dataframe[j,i])
# I did a couple by hand and the numbers are the exact same.
# I guess there's no harm in doing EXACTLY what Abbe did,
# I'll adjust the code
#Compare histograms of estAI vals between genes
quartz()
par(mar=c(2,2,2,2))
par(mfrow=c(2,3))
hist(estAI_df$xyl1)
hist(estAI_df$xyl2)
hist(estAI_df$xyl3)
hist(estAI_df$tkl1)
hist(estAI_df$tal1)
#strategy for finding good xylose species - find species for which
#all genes are in top 10% of respective distributions
#we can't directly compare tAI values (raw data)
#we have to compare estAI values (normalized data)
#first just making a df of just the highest estAI
#for those spp with more than one.
max_estAI_df<-estAI_df
for (i in 2:6){
for(j in 1:nrow(estAI_df)){
if(!is.na(estAI_df[j,i]) & !is.na(estAI_df[(j+5), i])){
max_estAI_df[j,i]<-max(estAI_df[j,i],estAI_df[(j+5),i])
}
}
}
max_estAI_df<-max_estAI_df[1:6]
#can import from now on
max_estAI_df$all_taxa<-tolower(max_estAI_df$all_taxa)
write.table(max_estAI_df, "~/xylose_optimization_project/data/spp_by_gene_maximum_paralog_estAI_vals.txt", sep="\t", quote=FALSE, row.names=FALSE)
#now finding spp in top ten percent for all
toptens<- as.character(unique(max_estAI_df$all_taxa))
for (i in 2:ncol(max_estAI_df)){
vec<-sort(max_estAI_df[,i], decreasing = TRUE)
spp<-max_estAI_df[which(max_estAI_df[,i]>=
quantile(vec, 0.90)), 1]
keep<-which(toptens %in% spp)
toptens<-toptens[keep]
}
##3 spp in 90th percentile for xyl1, xyl2, xyl3:
#spathaspora gorwiae
#kluyveromyces aestuarii
#sugiyamaella lignohabitans
##1 sp in 90th percentile for xyl1, xyl2, xyl3, tkl1:
#spathaspora gorwiae
#0 spp in top 10% for all 5 genes
#wider net of top 25% (75th percentile)
top25s<- as.character(unique(max_estAI_df$all_taxa))
for (i in 2:ncol(max_estAI_df)){
vec<-sort(max_estAI_df[,i], decreasing = TRUE)
spp<-max_estAI_df[which(max_estAI_df[,i]>=
quantile(vec, 0.75)), 1]
keep<-which(top25s %in% spp)
top25s<-top25s[keep]
}
#in top 25 for all 5 genes:
# spathaspora gorwiae
# spathaspora hagerdaliae
# spathaspora girioi
# kodamaea ohmeri
###EXAMINGING CORRELLATIONS BTW XYLOSE ESTAI AND XYLOSE GROWTH DATA
#data from Dana Opulente
#have to merge spp. to keys.
key<-read.delim("~/xylose_optimization_project/data/Spp_indices.txt", strip.white = TRUE)
key$Species<-tolower(key$Species)
growth_data<-read.delim("~/xylose_optimization_project/data/Xylose_growth_data_DO.txt", strip.white = TRUE)
x<-merge(growth_data, key[c(1,3)], by="PU.")
which(!x$all_taxa %in% max_estAI_df$all_taxa)->not_in_data
#Ok - the spp. we have that are NOT in the tree mostly seem to be due to renaming.
#looking up possible new spp. names using second name of sp.
for (i in 1:length(not_in_data)){
x[(not_in_data[i]), 3]->old_sp
spName<-strsplit(old_sp, " ")[[1]][2]
possiblechange<-max_estAI_df$all_taxa[which(grepl(spName, max_estAI_df$all_taxa))]
if (length(possiblechange)==1){
x[(not_in_data[i]), 3]<-possiblechange
}
}
which(!x$all_taxa %in% max_estAI_df$all_taxa)->not_in_data
x[(not_in_data[1]), 3]<-"hanseniaspora vinae"
x[(not_in_data[2]), 3]<-"spencermartinsiella europaea"
x[(not_in_data[3]), 3]<-"martiniozyma abiesophila"
x[(not_in_data[4]), 3]<-"nakaseomyces castellii"
#x[(not_in_data[5]), 3]<-albicans - which we don't have
x[(not_in_data[6]), 3]<-"suhomyces pyralidae"
x[(not_in_data[7]), 3]<-"metschnikowia lockheadii"
x[(not_in_data[8]), 3]<-"metschnikowia dekortum"
#x[(not_in_data[9]), 3]<-"metschnikowia gruessii"-> not sure who this is.
colnames(x)[3]<-"all_taxa"
growth_rates_df<-merge(max_estAI_df, x, by="all_taxa")
write.table(growth_rates_df, "xylose_optimization_project/data/growth_rate_master_df.txt", sep="\t", quote=FALSE, row.names=FALSE)
#######work from above table from now on
install.packages("ade4")
require(ade4)
library(ade4)
growth_rates<-read.delim("~/xylose_optimization_project/data/growth_rate_master_df.txt", stringsAsFactors = FALSE)
plot(x=growth_rates$xyl1, y=growth_rates$Growth.Rate)
filtered_growth_rates<-growth_rates[which(growth_rates$Growth.Rate>0), ]
plot(x=filtered_growth_rates$xyl1, y=filtered_growth_rates$Growth.Rate)
cor.test(x=filtered_growth_rates$xyl1, y=filtered_growth_rates$Growth.Rate, method = "pearson")
cor.test(x=filtered_growth_rates$xyl2, y=filtered_growth_rates$Growth.Rate, method = "pearson")
cor.test(x=filtered_growth_rates$xyl3, y=filtered_growth_rates$Growth.Rate, method = "pearson")
cor.test(x=filtered_growth_rates$tkl1, y=filtered_growth_rates$Growth.Rate, method = "pearson")
cor.test(x=filtered_growth_rates$tal1, y=filtered_growth_rates$Growth.Rate, method = "pearson")
########
library(ape)
require(stringr)
tree<-read.tree("~/xylose_optimization_project/data/iTol_files/332_Newick_tree.txt")
key<-read.delim("~/xylose_optimization_project/data/Spp_indices.txt", strip.white = TRUE, stringsAsFactors = FALSE)
tree$tip.label
tree$tip.label<-str_replace(string = tree$tip.label, pattern = "_", replacement = " ")
tree$tip.label<-tolower(tree$tip.label)
tree$tip.label
growth_data<-read.delim("~/xylose_optimization_project/data/growth_rate_master_df.txt", stringsAsFactors = FALSE)
x<-growth_data
x$all_taxa[which(!x$all_taxa %in% tree$tip.label)]
#Ok - the spp. we have that are NOT in the tree mostly seem to be due to renaming.
#looking up possible new spp. names using second name of sp.
which(!x$all_taxa %in% tree$tip.label)->not_in_data
x[(not_in_data[1]), 1]<-"blastobotrys raffinosifermentans"
#x[(not_in_data[2]), 1]<-"spencermartinsiella europaea"
x[(not_in_data[3]), 1]<-"hanseniaspora vineae"
x[(not_in_data[4]), 1]<-"lachancea fantastica_nom_nud"
x[(not_in_data[5]), 1]<-"magnusiomyces tetraspermus"
x[(not_in_data[6]), 1]<-"metschnikowia dekortorum"
x[(not_in_data[7]), 1]<-"metschnikowia lochheadii"
x[(not_in_data[8]), 1]<-"metschnikowia matae_var._matae"
x[(not_in_data[9]), 1]<-"metschnikowia matae_var._maris"
x[(not_in_data[10]), 1]<-"candida castellii"
x[(not_in_data[11]), 1]<-"ogataea philodendri"
x[(not_in_data[12]), 1]<-"ogataea populialbae"
#pulling candida azyma out b/c it's not in the tree
x<-x[-24, ]
#now need NA rows for those in the tree but not in the data
tree$tip.label[which(!tree$tip.label %in% x$all_taxa)]->all_taxa
tobind<-data.frame(all_taxa)
tobind$"xyl1"<-NA
tobind$"xyl2"<-NA
tobind$"xyl3"<-NA
tobind$"tkl1"<-NA
tobind$"tal1"<-NA
tobind$"PU."<-NA
tobind$"Growth.Rate"<-NA
x<-rbind(x, tobind)
labs<-data.frame(tree$tip.label, c(1:length(tree$tip.label)))
colnames(labs)<-c("all_taxa", "phylo_order")
x<-merge(x, labs, by="all_taxa")
x<-x[order(x$phylo_order), ]
####################################################################
##IGNORE ABOVE -> fixed names in data to match 332 tree
###############
##Import the below written dataframe for PIC analysis
###################################################################
#write.table(x, "~/xylose_optimization_project/data/growth_rate_master_df_treematchednames.txt", sep="\t", quote=FALSE, row.names = FALSE)
#growth data and estAI vals
growth_data<-read.delim("~/xylose_optimization_project/data/growth_rate_master_df_treematchednames.txt")
#S values supplied by abbe's supp data
s_values<-read.delim("~/xylose_optimization_project/data/labella_et_al/s_values.txt", stringsAsFactors = FALSE)
#only taking spp with s values greater than .5
s_values<-s_values[which(s_values$species.name %in% growth_data$all_taxa), ]
colnames(s_values)<-c("all_taxa", "s_value")
growth_data<-merge(growth_data, s_values, by="all_taxa")
growth_data<-growth_data[which(growth_data$s_value > .5),]
#332 tree
library(ape)
require(stringr)
require(ggpubr)
tree<-read.tree("~/xylose_optimization_project/data/iTol_files/332_Newick_tree.txt")
tree$tip.label<-str_replace(string = tree$tip.label, pattern = "_", replacement = " ")
tree$tip.label<-tolower(tree$tip.label)
#
require(ade4)
require(adephylo)
#for each orthogram I need to drop the tree taxa that are missing data
#they're in the same order - so should be easy
removes<-which(is.na(growth_data$xyl1))
xyl1_tree<-drop.tip(tree, tree$tip.label[removes])
removes<-which(is.na(growth_data$xyl2))
xyl2_tree<-drop.tip(tree, tree$tip.label[removes])
removes<-which(is.na(growth_data$xyl3))
xyl3_tree<-drop.tip(tree, tree$tip.label[removes])
removes<-which(is.na(growth_data$tkl1))
tkl1_tree<-drop.tip(tree, tree$tip.label[removes])
removes<-which(is.na(growth_data$tal1))
tal1_tree<-drop.tip(tree, tree$tip.label[removes])
removes<-which(is.na(growth_data$Growth.Rate))
growth_tree<-drop.tip(tree, tree$tip.label[removes])
#phylo <- ape::read.tree(text = tree)
xyl1<-as.numeric(na.omit(growth_data$xyl1))
orthogram(xyl1, tre = xyl1_tree)
#xyl1=diffuse dependence
xyl2<-as.numeric(na.omit(growth_data$xyl2))
orthogram(xyl2, tre=xyl2_tree)
#xyl2= no phylogenetic dependence
xyl3<-as.numeric(na.omit(growth_data$xyl3))
orthogram(xyl3, tre=xyl3_tree)
#xyl3=diffuse dependence and specific node importance
tal1<-as.numeric(na.omit(growth_data$tal1))
orthogram(tal1, tre=tal1_tree)
#tal1 = no phylo dependence
tkl1<-as.numeric(na.omit(growth_data$tkl1))
orthogram(tkl1, tre=tkl1_tree)
#tkl1 = no phylo dependence
growth<-as.numeric(na.omit(growth_data$Growth.Rate))
orthogram(growth, growth_tree)
#growth = diffuse phylo dependence
##PIC below:
#PIC growth data x XYL1
removes<-which(is.na(growth_data$xyl1) | is.na(growth_data$Growth.Rate))
df<-growth_data[-removes, ]
removes<-which(!tree$tip.label %in% df$all_taxa)
xyl1PICtree<-drop.tip(tree, tree$tip.label[removes])
#calculate and compare PIC values
PIC.xyl1<-pic(df$xyl1, xyl1PICtree)
PIC.growth<-pic(df$Growth.Rate, xyl1PICtree)
cor.test(PIC.xyl1, PIC.growth)
cordf<-data.frame(PIC.xyl1, PIC.growth)
ggscatter(data=cordf, x="PIC.xyl1", y="PIC.growth",
col="red", size=2, add="reg.line",add.params = list(color = "blue", fill = "gray"),
cor.coeff.args = list(method = "pearson", label.sep = "\n"), conf.int=TRUE, cor.coef=TRUE, cor.method="pearson",
xlab="PIC XYL1", ylab="PIC xylose growth rate")
#PIC growth data x XYL2
removes<-which(is.na(growth_data$xyl2) | is.na(growth_data$Growth.Rate))
df<-growth_data[-removes, ]
#remove optical outliers
removes<-which(!tree$tip.label %in% df$all_taxa)
xyl2PICtree<-drop.tip(tree, tree$tip.label[removes])
#calculate and compare PIC values
PIC.xyl2<-pic(df$xyl2, xyl2PICtree)
PIC.growth<-pic(df$Growth.Rate, xyl2PICtree)
cor.test(PIC.xyl2, PIC.growth)
cordf<-data.frame(PIC.xyl2, PIC.growth)
ggscatter(data=cordf, x="PIC.xyl2", y="PIC.growth",
col="red", size=2, add="reg.line",add.params = list(color = "blue", fill = "gray"),
cor.coeff.args = list(method = "pearson", label.sep = "\n"), conf.int=TRUE, cor.coef=TRUE, cor.method="pearson",
xlab="PIC xyl2", ylab="PIC xylose growth rate")
#PIC growth data x XYL3
removes<-which(is.na(growth_data$xyl3) | is.na(growth_data$Growth.Rate))
df<-growth_data[-removes, ]
#remove optical outliers
removes<-which(!tree$tip.label %in% df$all_taxa)
xyl3PICtree<-drop.tip(tree, tree$tip.label[removes])
#calculate and compare PIC values
PIC.xyl3<-pic(df$xyl3, xyl3PICtree)
PIC.growth<-pic(df$Growth.Rate, xyl3PICtree)
cor.test(PIC.xyl3, PIC.growth)
cordf<-data.frame(PIC.xyl3, PIC.growth)
ggscatter(data=cordf, x="PIC.xyl3", y="PIC.growth",
col="red", size=2, add="reg.line",add.params = list(color = "blue", fill = "gray"),
cor.coeff.args = list(method = "pearson", label.sep = "\n"), conf.int=TRUE, cor.coef=TRUE, cor.method="pearson",
xlab="PIC xyl3", ylab="PIC xylose growth rate")
###### Comparison of growers and non-growers
growers<-growth_data[which(growth_data$Growth.Rate>0), ]
nongrowers<-growth_data[which(growth_data$Growth.Rate <= 0), ]
growth_data<-growth_data[which(!is.na(growth_data$Growth.Rate)), ]
growth_data$growth.binary<-NA
for(i in 1:nrow(growth_data)){
if(growth_data$Growth.Rate[i]>0){
growth_data$growth.binary[i]<-1
}
if(growth_data$Growth.Rate[i]==0){
growth_data$growth.binary[i]<-0
}
}
boxplot(growth_data$xyl1 ~ as.factor(growth_data$growth.binary),
col="lightblue", xlab="codon optimization")
getwd()
length(which(!s_values$all_taxa %in% tree$tip.label))
| /scripts/Processing_xylose_MSA_tsvs.R | no_license | KatieFish/xylose_optimization_project | R | false | false | 28,203 | r |
#10-5-20
#read in ortholog cds matrices
xyl1<- read.delim("~/xylose_optimization_project/data/orthologs/cds/xyl1_cds_MSA.tsv",
stringsAsFactors=FALSE)
xyl2<- read.delim("~/xylose_optimization_project/data/orthologs/cds/xyl2_cds_MSA.tsv",
stringsAsFactors=FALSE)
xyl3<- read.delim("~/xylose_optimization_project/data/orthologs/cds/xyl3_cds_MSA.tsv",
stringsAsFactors=FALSE)
tal1<- read.delim("~/xylose_optimization_project/data/orthologs/cds/tal1_cds_MSA.tsv",
stringsAsFactors=FALSE)
tkl1<- read.delim("~/xylose_optimization_project/data/orthologs/cds/tkl1_cds_MSA.tsv",
stringsAsFactors=FALSE)
###NOTE - as of 10/5/20 - the ortholog search for TKL genes looks like it was overly conservative,
#or perhaps not sensitive enough. I am switching to using the orthoMCL cluster containing Scer tkl1
#from Shen et al. 2018 Cell. For now we can use the old file, and I'll push the new file
#ASAP.
#remove identical sequences using the unique() command
#then, note which taxa remain duplicated with which() command
xyl1<-unique(xyl1)
#xyl1_dups<-xyl1[which(duplicated(xyl1$V1)), 1]
xyl2<-unique(xyl2)
#xyl2_dups<-xyl2[which(duplicated(xyl2$V1)), 1]
xyl3<-unique(xyl3)
#xyl3_dups<-xyl3[which(duplicated(xyl3$V1)), 1]
tal1<-unique(tal1)
#tal1_dups<-tal1[which(duplicated(tal1$V1)), 1]
tkl1<-unique(tkl1)
#tkl1_dups<-tkl1[which(duplicated(tkl1$V1)), 1]
#make a master list of all taxa for all genes
all_taxa<-append(xyl1$V1, xyl2$V1)
all_taxa<-append(all_taxa, xyl3$V1)
all_taxa<-append(all_taxa, tal1$V1)
all_taxa<-append(all_taxa, tkl1$V1)
all_taxa<-unique(all_taxa)
#make a matrix of taxa x gene ID
##ex.
######## xyl1 ##### xyl2 ##### xyl3 #####
#taxa1 1 0 1
######
#taxa2 0 1 1
xylose_utilization_gene_presence<- data.frame(all_taxa)
xylose_utilization_gene_presence$xyl1<-0
xylose_utilization_gene_presence$xyl2<-0
xylose_utilization_gene_presence$xyl3<-0
xylose_utilization_gene_presence$tkl1<-0
xylose_utilization_gene_presence$tal1<-0
# fill in xyl1 genes in the xylose_utilization_gene_presence table
xylose_utilization_gene_presence[,1]<- as.character(xylose_utilization_gene_presence[,1])
for (i in 1:nrow(xylose_utilization_gene_presence)){
if(xylose_utilization_gene_presence[i,1] %in% xyl1[,1]){
xylose_utilization_gene_presence[i, 2] <- 1
}
}
for (i in 1:nrow(xylose_utilization_gene_presence)){
if(xylose_utilization_gene_presence[i,1] %in% xyl2[,1]){
xylose_utilization_gene_presence[i, 3] <- 1
}
}
for (i in 1:nrow(xylose_utilization_gene_presence)){
if(xylose_utilization_gene_presence[i,1] %in% xyl3[,1]){
xylose_utilization_gene_presence[i, 4] <- 1
}
}
for (i in 1:nrow(xylose_utilization_gene_presence)){
if(xylose_utilization_gene_presence[i,1] %in% tkl1[,1]){
xylose_utilization_gene_presence[i, 5] <- 1
}
}
for (i in 1:nrow(xylose_utilization_gene_presence)){
if(xylose_utilization_gene_presence[i,1] %in% tal1[,1]){
xylose_utilization_gene_presence[i, 6] <- 1
}
}
write.table(xylose_utilization_gene_presence,
"~/xylose_optimization_project/data/XYLpthwy_gene_presence_absence_matrix.txt", sep="\t", quote=FALSE, row.names=FALSE)
wi_vals<-read.delim("~/xylose_optimization_project/data/labella_et_al/wi_values.txt",
stringsAsFactors=FALSE)
# are there any spp. in our lists that are not in abbey's wi values?
which(!xylose_utilization_gene_presence[,1] %in% wi_vals[,1])
xylose_utilization_gene_presence[which(!xylose_utilization_gene_presence[,1] %in% wi_vals[,1]), 1]
# are there any spp. in abbey's list that are not in ours?
which(!wi_vals[,1] %in% xylose_utilization_gene_presence[,1])->x
wi_vals[x,1]
### start a new section of code! ;) In this section we will
#write code to calculate the mean wi value for each gene (estAI)
require(EnvStats)
stAI_dataframe<-xylose_utilization_gene_presence
stAI_dataframe$xyl1<-NA
stAI_dataframe$xyl2<-NA
stAI_dataframe$xyl3<-NA
stAI_dataframe$tal1<-NA
stAI_dataframe$tkl1<-NA
#make new columns for 2nd copy of genes
stAI_dataframe$xyl1.2<-NA
stAI_dataframe$xyl2.2<-NA
stAI_dataframe$xyl3.2<-NA
stAI_dataframe$tal1.2<-NA
stAI_dataframe$tkl1.2<-NA
#iterate by row of the xylose_ut_df and check for values of 1
#start
for(i in 1:nrow(xylose_utilization_gene_presence)){
#check if gene is present for species
if(xylose_utilization_gene_presence[i,2]==1){
# create x variable that refers to row number in xyl1 of species
x<- which(xyl1[,1] == xylose_utilization_gene_presence[i,1])
# set up if statement to check if x has 2 numbers
y<- which(wi_vals[,1] == xylose_utilization_gene_presence[i,1])
#identify the gene sequence for the respective gene:
gene_x<-as.character(xyl1[x[1],(5:ncol(xyl1))])
#below if statement checks to see if NAs are at end of gene and then removes them
if(is.na(xyl1[x[1], ncol(xyl1)])){
z<-which(is.na(gene_x))
gene_x<-gene_x[1:(z[1]-1)]
}
#create a vector to store the wi values
gene_x_wi_vals<-numeric()
for(j in seq(1,length(gene_x), 3)){
codon<-paste(gene_x[j], gene_x[j+1], gene_x[j+2], sep="")
codon_column<-which(grepl(codon, colnames(wi_vals), ignore.case=TRUE))
codon_wi_val<-as.numeric(wi_vals[y,codon_column])
gene_x_wi_vals<-append(gene_x_wi_vals, codon_wi_val)
}
stAI<-geoMean(gene_x_wi_vals)
stAI_dataframe[i,2]<-stAI
# add an if statement for if there's a second copy
if(length(x)>1){
gene_x<-as.character(xyl1[x[2],(5:ncol(xyl1))])
if(is.na(xyl1[x[2], ncol(xyl1)])){
z<-which(is.na(gene_x))
gene_x<-gene_x[1:(z[1]-1)]
}
#create a vector to store the wi values
gene_x_wi_vals<-numeric()
for(j in seq(1,length(gene_x), 3)){
codon<-paste(gene_x[j], gene_x[j+1], gene_x[j+2], sep="")
codon_column<-which(grepl(codon, colnames(wi_vals), ignore.case=TRUE))
codon_wi_val<-as.numeric(wi_vals[y,codon_column])
gene_x_wi_vals<-append(gene_x_wi_vals, codon_wi_val)
}
stAI<-geoMean(gene_x_wi_vals)
stAI_dataframe[i,7]<-stAI
}
}
}
### copy 2 of loop
for(i in 1:nrow(xylose_utilization_gene_presence)){
#check if xyl1 (column 2)==1
if(xylose_utilization_gene_presence[i,3]==1){
# create x variable that refers to row number in xyl1 of species
x<- which(xyl2[,1] == xylose_utilization_gene_presence[i,1])
# set up if statement to check if x has 2 numbers
y<- which(wi_vals[,1] == xylose_utilization_gene_presence[i,1])
#identify the gene sequence for the respective gene:
gene_x<-as.character(xyl2[x[1],(5:ncol(xyl2))])
#below if statement checks to see if NAs are at end of gene and then removes them
if(is.na(xyl2[x[1], ncol(xyl2)])){
z<-which(is.na(gene_x))
gene_x<-gene_x[1:(z[1]-1)]
}
#create a vector to store the wi values
gene_x_wi_vals<-numeric()
for(j in seq(1,length(gene_x), 3)){
codon<-paste(gene_x[j], gene_x[j+1], gene_x[j+2], sep="")
codon_column<-which(grepl(codon, colnames(wi_vals), ignore.case=TRUE))
codon_wi_val<-as.numeric(wi_vals[y,codon_column])
gene_x_wi_vals<-append(gene_x_wi_vals, codon_wi_val)
}
stAI<-geoMean(gene_x_wi_vals)
stAI_dataframe[i,3]<-stAI
# add an if statement for if there's a second copy
if(length(x)>1){
gene_x<-as.character(xyl2[x[2],(5:ncol(xyl2))])
if(is.na(xyl2[x[2], ncol(xyl2)])){
z<-which(is.na(gene_x))
gene_x<-gene_x[1:(z[1]-1)]
}
#create a vector to store the wi values
gene_x_wi_vals<-numeric()
for(j in seq(1,length(gene_x), 3)){
codon<-paste(gene_x[j], gene_x[j+1], gene_x[j+2], sep="")
codon_column<-which(grepl(codon, colnames(wi_vals), ignore.case=TRUE))
codon_wi_val<-as.numeric(wi_vals[y,codon_column])
gene_x_wi_vals<-append(gene_x_wi_vals, codon_wi_val)
}
stAI<-geoMean(gene_x_wi_vals)
stAI_dataframe[i,8]<-stAI
}
}
}
#copy 3 of loop - xyl3
for(i in 1:nrow(xylose_utilization_gene_presence)){
#check if xyl1 (column 2)==1
if(xylose_utilization_gene_presence[i,4]==1){
# create x variable that refers to row number in xyl1 of species
x<- which(xyl3[,1] == xylose_utilization_gene_presence[i,1])
# set up if statement to check if x has 2 numbers
y<- which(wi_vals[,1] == xylose_utilization_gene_presence[i,1])
#identify the gene sequence for the respective gene:
gene_x<-as.character(xyl3[x[1],(5:ncol(xyl3))])
#below if statement checks to see if NAs are at end of gene and then removes them
if(is.na(xyl3[x[1], ncol(xyl3)])){
z<-which(is.na(gene_x))
gene_x<-gene_x[1:(z[1]-1)]
}
#create a vector to store the wi values
gene_x_wi_vals<-numeric()
for(j in seq(1,length(gene_x), 3)){
codon<-paste(gene_x[j], gene_x[j+1], gene_x[j+2], sep="")
codon_column<-which(grepl(codon, colnames(wi_vals), ignore.case=TRUE))
codon_wi_val<-as.numeric(wi_vals[y,codon_column])
gene_x_wi_vals<-append(gene_x_wi_vals, codon_wi_val)
}
stAI<-geoMean(gene_x_wi_vals)
stAI_dataframe[i,4]<-stAI
# add an if statement for if there's a second copy
if(length(x)>1){
gene_x<-as.character(xyl3[x[2],(5:ncol(xyl3))])
if(is.na(xyl3[x[2], ncol(xyl3)])){
z<-which(is.na(gene_x))
gene_x<-gene_x[1:(z[1]-1)]
}
#create a vector to store the wi values
gene_x_wi_vals<-numeric()
for(j in seq(1,length(gene_x), 3)){
codon<-paste(gene_x[j], gene_x[j+1], gene_x[j+2], sep="")
codon_column<-which(grepl(codon, colnames(wi_vals), ignore.case=TRUE))
codon_wi_val<-as.numeric(wi_vals[y,codon_column])
gene_x_wi_vals<-append(gene_x_wi_vals, codon_wi_val)
}
stAI<-geoMean(gene_x_wi_vals)
stAI_dataframe[i,9]<-stAI
}
}
}
#copy 4 of loop - tkl1
for(i in 1:nrow(xylose_utilization_gene_presence)){
#check if xyl1 (column 2)==1
if(xylose_utilization_gene_presence[i,5]==1){
# create x variable that refers to row number in xyl1 of species
x<- which(tkl1[,1] == xylose_utilization_gene_presence[i,1])
# set up if statement to check if x has 2 numbers
y<- which(wi_vals[,1] == xylose_utilization_gene_presence[i,1])
#identify the gene sequence for the respective gene:
gene_x<-as.character(tkl1[x[1],(5:ncol(tkl1))])
#below if statement checks to see if NAs are at end of gene and then removes them
if(is.na(tkl1[x[1], ncol(tkl1)])){
z<-which(is.na(gene_x))
gene_x<-gene_x[1:(z[1]-1)]
}
#create a vector to store the wi values
gene_x_wi_vals<-numeric()
for(j in seq(1,length(gene_x), 3)){
codon<-paste(gene_x[j], gene_x[j+1], gene_x[j+2], sep="")
codon_column<-which(grepl(codon, colnames(wi_vals), ignore.case=TRUE))
codon_wi_val<-as.numeric(wi_vals[y,codon_column])
gene_x_wi_vals<-append(gene_x_wi_vals, codon_wi_val)
}
stAI<-geoMean(gene_x_wi_vals)
stAI_dataframe[i,5]<-stAI
# add an if statement for if there's a second copy
if(length(x)>1){
gene_x<-as.character(tkl1[x[2],(5:ncol(tkl1))])
if(is.na(tkl1[x[2], ncol(tkl1)])){
z<-which(is.na(gene_x))
gene_x<-gene_x[1:(z[1]-1)]
}
#create a vector to store the wi values
gene_x_wi_vals<-numeric()
for(j in seq(1,length(gene_x), 3)){
codon<-paste(gene_x[j], gene_x[j+1], gene_x[j+2], sep="")
codon_column<-which(grepl(codon, colnames(wi_vals), ignore.case=TRUE))
codon_wi_val<-as.numeric(wi_vals[y,codon_column])
gene_x_wi_vals<-append(gene_x_wi_vals, codon_wi_val)
}
stAI<-geoMean(gene_x_wi_vals)
stAI_dataframe[i,10]<-stAI
}
}
}
#copy 5 of loop - tal1
for(i in 1:nrow(xylose_utilization_gene_presence)){
#check if xyl1 (column 2)==1
if(xylose_utilization_gene_presence[i,6]==1){
# create x variable that refers to row number in xyl1 of species
x<- which(tal1[,1] == xylose_utilization_gene_presence[i,1])
# set up if statement to check if x has 2 numbers
y<- which(wi_vals[,1] == xylose_utilization_gene_presence[i,1])
#identify the gene sequence for the respective gene:
gene_x<-as.character(tal1[x[1],(5:ncol(tal1))])
#below if statement checks to see if NAs are at end of gene and then removes them
if(is.na(tal1[x[1], ncol(tal1)])){
z<-which(is.na(gene_x))
gene_x<-gene_x[1:(z[1]-1)]
}
#create a vector to store the wi values
gene_x_wi_vals<-numeric()
for(j in seq(1,length(gene_x), 3)){
codon<-paste(gene_x[j], gene_x[j+1], gene_x[j+2], sep="")
codon_column<-which(grepl(codon, colnames(wi_vals), ignore.case=TRUE))
codon_wi_val<-as.numeric(wi_vals[y,codon_column])
gene_x_wi_vals<-append(gene_x_wi_vals, codon_wi_val)
}
stAI<-geoMean(gene_x_wi_vals)
stAI_dataframe[i,6]<-stAI
# add an if statement for if there's a second copy
if(length(x)>1){
gene_x<-as.character(tal1[x[2],(5:ncol(tal1))])
if(is.na(tal1[x[2], ncol(tal1)])){
z<-which(is.na(gene_x))
gene_x<-gene_x[1:(z[1]-1)]
}
#create a vector to store the wi values
gene_x_wi_vals<-numeric()
for(j in seq(1,length(gene_x), 3)){
codon<-paste(gene_x[j], gene_x[j+1], gene_x[j+2], sep="")
codon_column<-which(grepl(codon, colnames(wi_vals), ignore.case=TRUE))
codon_wi_val<-as.numeric(wi_vals[y,codon_column])
gene_x_wi_vals<-append(gene_x_wi_vals, codon_wi_val)
}
stAI<-geoMean(gene_x_wi_vals)
stAI_dataframe[i,11]<-stAI
}
}
}
rm(all_taxa, codon, codon_column, codon_wi_val, gene_x,
gene_x_wi_vals,i, j, stAI, x, y, z)
#import genome-wide stAI values
genome_wide_tAI<-read.delim("~/xylose_optimization_project/data/labella_et_al/cds_mito_processed_tAI_recalc/genome_wide_tAI_all_spp.txt")
#after unzipping before import had to correct taxa names in file on command line using sed to replace
#underscores with spaces.
#have to fix taxa names
x<-as.character(genome_wide_tAI$taxa)
taxa_IDs<-x
for (i in 1:length(x)){
if (grepl("yHMP", x[i]) | grepl("yHAB", x[i])){
name<-paste(strsplit(x[i], " ")[[1]][2], strsplit(x[i], " ")[[1]][3], sep=" ")
}
else{
name<-paste(strsplit(x[i], " ")[[1]][1], strsplit(x[i], " ")[[1]][2], sep=" ")
}
taxa_IDs[i]<-name
}
####
genome_wide_tAI$taxa<-taxa_IDs
###NAMES FIXED ABOVE
#Hash out and import in the future.
write.table(genome_wide_tAI, "~/xylose_optimization_project/data/labella_et_al/cds_mito_processed_tAI_recalc/genome_wide_tAI_all_spp.txt", sep="\t", quote=FALSE, row.names=FALSE)
#Create empty datafrme to populate with estAI values
estAI_df<-stAI_dataframe
for(i in 2:ncol(estAI_df)){
for(j in 1:nrow(estAI_df)){
estAI_df[j,i]<-NA
}
}
genome_wide_tAI$taxa<-as.character(genome_wide_tAI$taxa)
stAI_dataframe$all_taxa<-as.character(stAI_dataframe$all_taxa)
# iterate through stAI df and populate estAI df
for(i in 2:ncol(stAI_dataframe)){
for(j in 1:nrow(stAI_dataframe)){
# grab the species tAI vals for all genes
if(!is.na(stAI_dataframe[j,i])){
species_tAI_vals<-genome_wide_tAI[which(genome_wide_tAI$taxa==stAI_dataframe$all_taxa[j]), 2]
ecdf_func<-ecdf(species_tAI_vals)
estAI_df[j,i]<-ecdf_func(stAI_dataframe[j,i])
}
}
}
rm(ecdf_func, i, j, name, species_tAI_vals, taxa_IDs, x)
####writing tAI, estAI results as tables to import
write.table(stAI_dataframe, "~/xylose_optimization_project/data/spp_by_gene_tAI_vals.txt", sep="\t", quote=FALSE, row.name=FALSE)
write.table(estAI_df, "~/xylose_optimization_project/data/spp_by_gene_estAI_vals.txt", sep="\t", quote=FALSE, row.name=FALSE)
###Abbe's paper uses the empirical distribution function to find
#the percent of genes with lower tAI vals. Does this differ from
#our division function?
# create ecdf distribution function
# ecdf_func<-ecdf(species_tAI_vals)
# ecdf_func(stAI_dataframe[j,i])
# I did a couple by hand and the numbers are the exact same.
# I guess there's no harm in doing EXACTLY what Abbe did,
# I'll adjust the code
#Compare histograms of estAI vals between genes
quartz()
par(mar=c(2,2,2,2))
par(mfrow=c(2,3))
hist(estAI_df$xyl1)
hist(estAI_df$xyl2)
hist(estAI_df$xyl3)
hist(estAI_df$tkl1)
hist(estAI_df$tal1)
#strategy for finding good xylose species - find species for which
#all genes are in top 10% of respective distributions
#we can't directly compare tAI values (raw data)
#we have to compare estAI values (normalized data)
#first just making a df of just the highest estAI
#for those spp with more than one.
max_estAI_df<-estAI_df
for (i in 2:6){
for(j in 1:nrow(estAI_df)){
if(!is.na(estAI_df[j,i]) & !is.na(estAI_df[(j+5), i])){
max_estAI_df[j,i]<-max(estAI_df[j,i],estAI_df[(j+5),i])
}
}
}
max_estAI_df<-max_estAI_df[1:6]
#can import from now on
max_estAI_df$all_taxa<-tolower(max_estAI_df$all_taxa)
write.table(max_estAI_df, "~/xylose_optimization_project/data/spp_by_gene_maximum_paralog_estAI_vals.txt", sep="\t", quote=FALSE, row.names=FALSE)
#now finding spp in top ten percent for all
toptens<- as.character(unique(max_estAI_df$all_taxa))
for (i in 2:ncol(max_estAI_df)){
vec<-sort(max_estAI_df[,i], decreasing = TRUE)
spp<-max_estAI_df[which(max_estAI_df[,i]>=
quantile(vec, 0.90)), 1]
keep<-which(toptens %in% spp)
toptens<-toptens[keep]
}
##3 spp in 90th percentile for xyl1, xyl2, xyl3:
#spathaspora gorwiae
#kluyveromyces aestuarii
#sugiyamaella lignohabitans
##1 sp in 90th percentile for xyl1, xyl2, xyl3, tkl1:
#spathaspora gorwiae
#0 spp in top 10% for all 5 genes
#wider net of top 25% (75th percentile)
top25s<- as.character(unique(max_estAI_df$all_taxa))
for (i in 2:ncol(max_estAI_df)){
vec<-sort(max_estAI_df[,i], decreasing = TRUE)
spp<-max_estAI_df[which(max_estAI_df[,i]>=
quantile(vec, 0.75)), 1]
keep<-which(top25s %in% spp)
top25s<-top25s[keep]
}
#in top 25 for all 5 genes:
# spathaspora gorwiae
# spathaspora hagerdaliae
# spathaspora girioi
# kodamaea ohmeri
###EXAMINGING CORRELLATIONS BTW XYLOSE ESTAI AND XYLOSE GROWTH DATA
#data from Dana Opulente
#have to merge spp. to keys.
key<-read.delim("~/xylose_optimization_project/data/Spp_indices.txt", strip.white = TRUE)
key$Species<-tolower(key$Species)
growth_data<-read.delim("~/xylose_optimization_project/data/Xylose_growth_data_DO.txt", strip.white = TRUE)
x<-merge(growth_data, key[c(1,3)], by="PU.")
which(!x$all_taxa %in% max_estAI_df$all_taxa)->not_in_data
#Ok - the spp. we have that are NOT in the tree mostly seem to be due to renaming.
#looking up possible new spp. names using second name of sp.
for (i in 1:length(not_in_data)){
x[(not_in_data[i]), 3]->old_sp
spName<-strsplit(old_sp, " ")[[1]][2]
possiblechange<-max_estAI_df$all_taxa[which(grepl(spName, max_estAI_df$all_taxa))]
if (length(possiblechange)==1){
x[(not_in_data[i]), 3]<-possiblechange
}
}
which(!x$all_taxa %in% max_estAI_df$all_taxa)->not_in_data
x[(not_in_data[1]), 3]<-"hanseniaspora vinae"
x[(not_in_data[2]), 3]<-"spencermartinsiella europaea"
x[(not_in_data[3]), 3]<-"martiniozyma abiesophila"
x[(not_in_data[4]), 3]<-"nakaseomyces castellii"
#x[(not_in_data[5]), 3]<-albicans - which we don't have
x[(not_in_data[6]), 3]<-"suhomyces pyralidae"
x[(not_in_data[7]), 3]<-"metschnikowia lockheadii"
x[(not_in_data[8]), 3]<-"metschnikowia dekortum"
#x[(not_in_data[9]), 3]<-"metschnikowia gruessii"-> not sure who this is.
colnames(x)[3]<-"all_taxa"
growth_rates_df<-merge(max_estAI_df, x, by="all_taxa")
write.table(growth_rates_df, "xylose_optimization_project/data/growth_rate_master_df.txt", sep="\t", quote=FALSE, row.names=FALSE)
#######work from above table from now on
install.packages("ade4")
require(ade4)
library(ade4)
growth_rates<-read.delim("~/xylose_optimization_project/data/growth_rate_master_df.txt", stringsAsFactors = FALSE)
plot(x=growth_rates$xyl1, y=growth_rates$Growth.Rate)
filtered_growth_rates<-growth_rates[which(growth_rates$Growth.Rate>0), ]
plot(x=filtered_growth_rates$xyl1, y=filtered_growth_rates$Growth.Rate)
cor.test(x=filtered_growth_rates$xyl1, y=filtered_growth_rates$Growth.Rate, method = "pearson")
cor.test(x=filtered_growth_rates$xyl2, y=filtered_growth_rates$Growth.Rate, method = "pearson")
cor.test(x=filtered_growth_rates$xyl3, y=filtered_growth_rates$Growth.Rate, method = "pearson")
cor.test(x=filtered_growth_rates$tkl1, y=filtered_growth_rates$Growth.Rate, method = "pearson")
cor.test(x=filtered_growth_rates$tal1, y=filtered_growth_rates$Growth.Rate, method = "pearson")
########
library(ape)
require(stringr)
tree<-read.tree("~/xylose_optimization_project/data/iTol_files/332_Newick_tree.txt")
key<-read.delim("~/xylose_optimization_project/data/Spp_indices.txt", strip.white = TRUE, stringsAsFactors = FALSE)
tree$tip.label
tree$tip.label<-str_replace(string = tree$tip.label, pattern = "_", replacement = " ")
tree$tip.label<-tolower(tree$tip.label)
tree$tip.label
growth_data<-read.delim("~/xylose_optimization_project/data/growth_rate_master_df.txt", stringsAsFactors = FALSE)
x<-growth_data
x$all_taxa[which(!x$all_taxa %in% tree$tip.label)]
#Ok - the spp. we have that are NOT in the tree mostly seem to be due to renaming.
#looking up possible new spp. names using second name of sp.
which(!x$all_taxa %in% tree$tip.label)->not_in_data
x[(not_in_data[1]), 1]<-"blastobotrys raffinosifermentans"
#x[(not_in_data[2]), 1]<-"spencermartinsiella europaea"
x[(not_in_data[3]), 1]<-"hanseniaspora vineae"
x[(not_in_data[4]), 1]<-"lachancea fantastica_nom_nud"
x[(not_in_data[5]), 1]<-"magnusiomyces tetraspermus"
x[(not_in_data[6]), 1]<-"metschnikowia dekortorum"
x[(not_in_data[7]), 1]<-"metschnikowia lochheadii"
x[(not_in_data[8]), 1]<-"metschnikowia matae_var._matae"
x[(not_in_data[9]), 1]<-"metschnikowia matae_var._maris"
x[(not_in_data[10]), 1]<-"candida castellii"
x[(not_in_data[11]), 1]<-"ogataea philodendri"
x[(not_in_data[12]), 1]<-"ogataea populialbae"
#pulling candida azyma out b/c it's not in the tree
x<-x[-24, ]
#now need NA rows for those in the tree but not in the data
tree$tip.label[which(!tree$tip.label %in% x$all_taxa)]->all_taxa
tobind<-data.frame(all_taxa)
tobind$"xyl1"<-NA
tobind$"xyl2"<-NA
tobind$"xyl3"<-NA
tobind$"tkl1"<-NA
tobind$"tal1"<-NA
tobind$"PU."<-NA
tobind$"Growth.Rate"<-NA
x<-rbind(x, tobind)
labs<-data.frame(tree$tip.label, c(1:length(tree$tip.label)))
colnames(labs)<-c("all_taxa", "phylo_order")
x<-merge(x, labs, by="all_taxa")
x<-x[order(x$phylo_order), ]
####################################################################
##IGNORE ABOVE -> fixed names in data to match 332 tree
###############
##Import the below written dataframe for PIC analysis
###################################################################
#write.table(x, "~/xylose_optimization_project/data/growth_rate_master_df_treematchednames.txt", sep="\t", quote=FALSE, row.names = FALSE)
#growth data and estAI vals
growth_data<-read.delim("~/xylose_optimization_project/data/growth_rate_master_df_treematchednames.txt")
#S values supplied by abbe's supp data
s_values<-read.delim("~/xylose_optimization_project/data/labella_et_al/s_values.txt", stringsAsFactors = FALSE)
#only taking spp with s values greater than .5
s_values<-s_values[which(s_values$species.name %in% growth_data$all_taxa), ]
colnames(s_values)<-c("all_taxa", "s_value")
growth_data<-merge(growth_data, s_values, by="all_taxa")
growth_data<-growth_data[which(growth_data$s_value > .5),]
#332 tree
library(ape)
require(stringr)
require(ggpubr)
tree<-read.tree("~/xylose_optimization_project/data/iTol_files/332_Newick_tree.txt")
tree$tip.label<-str_replace(string = tree$tip.label, pattern = "_", replacement = " ")
tree$tip.label<-tolower(tree$tip.label)
#
require(ade4)
require(adephylo)
#for each orthogram I need to drop the tree taxa that are missing data
#they're in the same order - so should be easy
removes<-which(is.na(growth_data$xyl1))
xyl1_tree<-drop.tip(tree, tree$tip.label[removes])
removes<-which(is.na(growth_data$xyl2))
xyl2_tree<-drop.tip(tree, tree$tip.label[removes])
removes<-which(is.na(growth_data$xyl3))
xyl3_tree<-drop.tip(tree, tree$tip.label[removes])
removes<-which(is.na(growth_data$tkl1))
tkl1_tree<-drop.tip(tree, tree$tip.label[removes])
removes<-which(is.na(growth_data$tal1))
tal1_tree<-drop.tip(tree, tree$tip.label[removes])
removes<-which(is.na(growth_data$Growth.Rate))
growth_tree<-drop.tip(tree, tree$tip.label[removes])
#phylo <- ape::read.tree(text = tree)
xyl1<-as.numeric(na.omit(growth_data$xyl1))
orthogram(xyl1, tre = xyl1_tree)
#xyl1=diffuse dependence
xyl2<-as.numeric(na.omit(growth_data$xyl2))
orthogram(xyl2, tre=xyl2_tree)
#xyl2= no phylogenetic dependence
xyl3<-as.numeric(na.omit(growth_data$xyl3))
orthogram(xyl3, tre=xyl3_tree)
#xyl3=diffuse dependence and specific node importance
tal1<-as.numeric(na.omit(growth_data$tal1))
orthogram(tal1, tre=tal1_tree)
#tal1 = no phylo dependence
tkl1<-as.numeric(na.omit(growth_data$tkl1))
orthogram(tkl1, tre=tkl1_tree)
#tkl1 = no phylo dependence
growth<-as.numeric(na.omit(growth_data$Growth.Rate))
orthogram(growth, growth_tree)
#growth = diffuse phylo dependence
##PIC below:
#PIC growth data x XYL1
removes<-which(is.na(growth_data$xyl1) | is.na(growth_data$Growth.Rate))
df<-growth_data[-removes, ]
removes<-which(!tree$tip.label %in% df$all_taxa)
xyl1PICtree<-drop.tip(tree, tree$tip.label[removes])
#calculate and compare PIC values
PIC.xyl1<-pic(df$xyl1, xyl1PICtree)
PIC.growth<-pic(df$Growth.Rate, xyl1PICtree)
cor.test(PIC.xyl1, PIC.growth)
cordf<-data.frame(PIC.xyl1, PIC.growth)
ggscatter(data=cordf, x="PIC.xyl1", y="PIC.growth",
col="red", size=2, add="reg.line",add.params = list(color = "blue", fill = "gray"),
cor.coeff.args = list(method = "pearson", label.sep = "\n"), conf.int=TRUE, cor.coef=TRUE, cor.method="pearson",
xlab="PIC XYL1", ylab="PIC xylose growth rate")
#PIC growth data x XYL2
removes<-which(is.na(growth_data$xyl2) | is.na(growth_data$Growth.Rate))
df<-growth_data[-removes, ]
#remove optical outliers
removes<-which(!tree$tip.label %in% df$all_taxa)
xyl2PICtree<-drop.tip(tree, tree$tip.label[removes])
#calculate and compare PIC values
PIC.xyl2<-pic(df$xyl2, xyl2PICtree)
PIC.growth<-pic(df$Growth.Rate, xyl2PICtree)
cor.test(PIC.xyl2, PIC.growth)
cordf<-data.frame(PIC.xyl2, PIC.growth)
ggscatter(data=cordf, x="PIC.xyl2", y="PIC.growth",
col="red", size=2, add="reg.line",add.params = list(color = "blue", fill = "gray"),
cor.coeff.args = list(method = "pearson", label.sep = "\n"), conf.int=TRUE, cor.coef=TRUE, cor.method="pearson",
xlab="PIC xyl2", ylab="PIC xylose growth rate")
#PIC growth data x XYL3
removes<-which(is.na(growth_data$xyl3) | is.na(growth_data$Growth.Rate))
df<-growth_data[-removes, ]
#remove optical outliers
removes<-which(!tree$tip.label %in% df$all_taxa)
xyl3PICtree<-drop.tip(tree, tree$tip.label[removes])
#calculate and compare PIC values
PIC.xyl3<-pic(df$xyl3, xyl3PICtree)
PIC.growth<-pic(df$Growth.Rate, xyl3PICtree)
cor.test(PIC.xyl3, PIC.growth)
cordf<-data.frame(PIC.xyl3, PIC.growth)
ggscatter(data=cordf, x="PIC.xyl3", y="PIC.growth",
col="red", size=2, add="reg.line",add.params = list(color = "blue", fill = "gray"),
cor.coeff.args = list(method = "pearson", label.sep = "\n"), conf.int=TRUE, cor.coef=TRUE, cor.method="pearson",
xlab="PIC xyl3", ylab="PIC xylose growth rate")
###### Comparison of growers and non-growers
growers<-growth_data[which(growth_data$Growth.Rate>0), ]
nongrowers<-growth_data[which(growth_data$Growth.Rate <= 0), ]
growth_data<-growth_data[which(!is.na(growth_data$Growth.Rate)), ]
growth_data$growth.binary<-NA
for(i in 1:nrow(growth_data)){
if(growth_data$Growth.Rate[i]>0){
growth_data$growth.binary[i]<-1
}
if(growth_data$Growth.Rate[i]==0){
growth_data$growth.binary[i]<-0
}
}
boxplot(growth_data$xyl1 ~ as.factor(growth_data$growth.binary),
col="lightblue", xlab="codon optimization")
getwd()
length(which(!s_values$all_taxa %in% tree$tip.label))
|
## Two functions one is to create a special vector and other
## is to compute the inverse of matrix. Compute if applicable,
##else return from the cache
## makeCacheMatrix is a function which creates a square matrix
makeCacheMatrix <- function(x = matrix()) {
mat <- NULL # define the variable mat to NULL
## Option to set a matrix through set function
set <- function(y) {
x <<- y
mat <<- NULL ## mat should be set to NULL to ensure even outside scope of this function
}
## Option to get a matrix , which would be set by the call of the parent
## function - makeCacheMatrix
get <- function()x
## Option to set value of matrix , but seldom used in this scenario
setmatrix <- function(solve) mat <<- solve
## Option to get the matrix returned,
getmatrix <- function() mat
list(set = set, get = get,
setmatrix = setmatrix,
getmatrix = getmatrix)
}
## Returns the inverse of a square matrix.
## advantage of this function - computes only when required, else
## returns from cache
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
mat <- x$getmatrix() # get value of mat from cache
if(!is.null(mat)) {
message("getting cached data")
return(mat)
}
## If mat is NULL, compute the inverse of the matrix
data <- x$get()
mat <- solve(data, ...) ## invoking the solve function to retutn
##the inverse of a square matrix
x$setmatrix(mat)
mat
}
| /cachematrix.R | no_license | aravindan25/Assignment2 | R | false | false | 1,629 | r | ## Two functions one is to create a special vector and other
## is to compute the inverse of matrix. Compute if applicable,
##else return from the cache
## makeCacheMatrix is a function which creates a square matrix
makeCacheMatrix <- function(x = matrix()) {
mat <- NULL # define the variable mat to NULL
## Option to set a matrix through set function
set <- function(y) {
x <<- y
mat <<- NULL ## mat should be set to NULL to ensure even outside scope of this function
}
## Option to get a matrix , which would be set by the call of the parent
## function - makeCacheMatrix
get <- function()x
## Option to set value of matrix , but seldom used in this scenario
setmatrix <- function(solve) mat <<- solve
## Option to get the matrix returned,
getmatrix <- function() mat
list(set = set, get = get,
setmatrix = setmatrix,
getmatrix = getmatrix)
}
## Returns the inverse of a square matrix.
## advantage of this function - computes only when required, else
## returns from cache
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
mat <- x$getmatrix() # get value of mat from cache
if(!is.null(mat)) {
message("getting cached data")
return(mat)
}
## If mat is NULL, compute the inverse of the matrix
data <- x$get()
mat <- solve(data, ...) ## invoking the solve function to retutn
##the inverse of a square matrix
x$setmatrix(mat)
mat
}
|
#' manhattanLTFHSNP plot
#'
#' This function plots a manhatten plot and highlight a specific SNP for the LTFH-method
#' @param SNPno The specific SNP we are looking at
#' @param total_indiv Number of indicviduls in the dataset
#' @param SNP Is the number of SNPs in the data
#' @param h Is the heritability usually 0.5
#' @param sib Is a binary for indicating if we look at sibling history
#' @keywords manhattanLTFHSNP
#' @export
#' @importFrom dplyr %>%
#' @examples
#' manhattanLTFHSNP(SNPno = 10, total_indiv = 1000, SNP = 1000, h = 0.5, sib=0)
manhattanLTFHSNP <- function(SNPno, total_indiv, SNP, h, sib){
stringer <- paste("./data/LTFH","_",format(total_indiv,scientific = F),"_",format(SNP,scientific = F),"_",h*100,"_5_",sib,".qassoc", sep="")
if (file.exists(stringer)) {
assoc_file <- fread(stringer)
assoc_file$is_annotate <- ifelse(assoc_file$SNP==SNPno,1,0)
don <- assoc_file %>%
dplyr::filter(-log10(P)>1 | is_annotate == 1) # to make plot lighter
plot1 <- ggplot2::ggplot(don, ggplot2::aes(x=SNP, y=-log10(P))) +
# Show all points
ggplot2::geom_point(color="darkgrey", alpha=0.8, size=1) +
ggplot2::geom_point(data=subset(don, is_annotate==1), color="red", size=2.5) +
#geom_point(data=subset(don, -log10(P)>thr), color="orange", size=2.5) +
ggrepel::geom_label_repel(data=subset(don, is_annotate==1), ggplot2::aes(label=SNP),
fontface = 'bold',
box.padding = 10,
point.padding = 0.75,
nudge_x = .15,
nudge_y = .5,
segment.linetype = 1,
arrow = arrow(length = unit(0.015, "npc"))
) +
# Custom the theme:
ggplot2::theme_light() +
ggplot2::theme(
legend.position="none",
panel.border = element_blank(),
panel.grid.major.x = element_blank(),
panel.grid.minor.x = element_blank()
)
return(plot1)
} else {
print("No data exists! - Try another predefined or run our simulation")
}
}
| /GWAS/R/manhattenLTFHSNP.R | permissive | madsemilmj/DataProjectGwas | R | false | false | 2,090 | r | #' manhattanLTFHSNP plot
#'
#' This function plots a manhatten plot and highlight a specific SNP for the LTFH-method
#' @param SNPno The specific SNP we are looking at
#' @param total_indiv Number of indicviduls in the dataset
#' @param SNP Is the number of SNPs in the data
#' @param h Is the heritability usually 0.5
#' @param sib Is a binary for indicating if we look at sibling history
#' @keywords manhattanLTFHSNP
#' @export
#' @importFrom dplyr %>%
#' @examples
#' manhattanLTFHSNP(SNPno = 10, total_indiv = 1000, SNP = 1000, h = 0.5, sib=0)
manhattanLTFHSNP <- function(SNPno, total_indiv, SNP, h, sib){
stringer <- paste("./data/LTFH","_",format(total_indiv,scientific = F),"_",format(SNP,scientific = F),"_",h*100,"_5_",sib,".qassoc", sep="")
if (file.exists(stringer)) {
assoc_file <- fread(stringer)
assoc_file$is_annotate <- ifelse(assoc_file$SNP==SNPno,1,0)
don <- assoc_file %>%
dplyr::filter(-log10(P)>1 | is_annotate == 1) # to make plot lighter
plot1 <- ggplot2::ggplot(don, ggplot2::aes(x=SNP, y=-log10(P))) +
# Show all points
ggplot2::geom_point(color="darkgrey", alpha=0.8, size=1) +
ggplot2::geom_point(data=subset(don, is_annotate==1), color="red", size=2.5) +
#geom_point(data=subset(don, -log10(P)>thr), color="orange", size=2.5) +
ggrepel::geom_label_repel(data=subset(don, is_annotate==1), ggplot2::aes(label=SNP),
fontface = 'bold',
box.padding = 10,
point.padding = 0.75,
nudge_x = .15,
nudge_y = .5,
segment.linetype = 1,
arrow = arrow(length = unit(0.015, "npc"))
) +
# Custom the theme:
ggplot2::theme_light() +
ggplot2::theme(
legend.position="none",
panel.border = element_blank(),
panel.grid.major.x = element_blank(),
panel.grid.minor.x = element_blank()
)
return(plot1)
} else {
print("No data exists! - Try another predefined or run our simulation")
}
}
|
#The purpose of this file was to conduct a PCA on temperature related variables for ALL of my available populations
#This PCA aided my selection of populations which were used in my temperature growth experiments by showing how populations were distributed across the range of temperature-related bioclimatic variables, which allowed me to select populations without over-representing specific thermal environments
# load libraries
library(tidyverse)
library(raster)
library(sf)
library(spData)
library(viridis)
library(readr)
library(ggplot2)
library(FactoMineR)
library(maps)
library(mapdata)
library(tidyr)
library(broom)
library(knitr)
library(ggfortify)
#Read in files. Rename them.
guttatus_worldclim_seed_collections <- read_csv("Raw-data/guttatus_worldclim_seed_collections.csv")
View(guttatus_worldclim_seed_collections)#%>%
# (guttatus_worldclim_seed_collections$bio_1<-as.numeric(guttatus_worldclim_seed_collections$bio_1))
#Boxplot of US vs UK: TEMPERATURE
boxplot((guttatus_worldclim_seed_collections$bio_1/10)~guttatus_worldclim_seed_collections$ID1)
#T-tests for variables of interest
t.test (mg_wcTEMP$bio_1~mg_wcTEMP$ID1)
t.test (mg_wcTEMP$bio_4~mg_wcTEMP$ID1)
###################################################################################
#PCA to tell us which clim variables are most valuable for clustering data #
###################################################################################
#####################
## 1. Prepare data ##
#####################
## read CSV file ##
mg_wcTEMP <- read_csv("Raw-data/guttatus_worldclim_seed_collections.csv")
View(mg_wcTEMP)
## remove variables unnecessary for TEMPERATURE PCA and rows with NA ##
mg_wcTEMP <- subset(mg_wcTEMP, select = -c(bio_12, bio_13, bio_14, bio_15, bio_16, bio_17, bio_18, bio_19, Longitude, Latitude))
mg_wcTEMP <- na.omit(mg_wcTEMP)
mg_wcTEMP2 <- subset(mg_wcTEMP, select = -c(ID1))
##change column names to abbreviated variable names##
names(mg_wcTEMP) <- c("Region", "Population", "MAT", "MDR", "ISO", "TS", "MTWM", "MTCM", "TAR", "MTWQ", "MTDQ", "MTWAQ", "MTCQ")
#################################
## 2. run PCA using FactoMineR ##
#################################
library(FactoMineR)
library(multcompView)
mg_pcaTEMP <- PCA(mg_wcTEMP2, scale.unit=TRUE, quali.sup = 1, graph = FALSE)
## (optional) view PCA summary and variable and individual ##
## values(coordinates, correlation, cosine2, contribuytion) ##
summary(mg_pcaTEMP)
write.csv(mg_pcaTEMP$var, "Processed-data/pcaTEMPvar.csv")
mg_pcaTEMP$var
mg_pcaTEMP$ind
#########################
## 3. Plot PCA results ##
#########################
## extract individual pc1 and pc2 coordinates from FactoMineR PCA ##
mg_wcTEMP$pc1 <- mg_pcaTEMP$ind$coord[,1]
mg_wcTEMP$pc2 <- mg_pcaTEMP$ind$coord[,2]
#geomtext repel
library(ggrepel)
## create PCA plot, point color by country ##
ggplot(data=mg_wcTEMP, aes(x=pc1, y=pc2, label=Population)) + scale_fill_manual(values=c("red","blue","green","purple"), name="") + geom_point(aes(fill=Region), shape=21, color="gray90", size=5) + labs(x="PC1",y="PC2")
#save as IMG: 600x600--MAINTAIN ASPECT RATIO
#####################################################################################
#Plot with labelled variables
#Extract loadings and create a new dataframe#
pc1 <- mg_pcaTEMP$var$cor[,1]
pc2 <- mg_pcaTEMP$var$cor[,2]
load <- data.frame("var"=c("MAT", "MDR", "ISO", "TS", "MTWM", "MTCM", "TAR", "MTWQ", "MTDQ", "MTWAQ", "MTCQ"), pc1, pc2)
load <- data.frame("var"=c("MAT", "MDR", "ISO", "TS", "MTWM", "MTCM", "TAR", "MTWQ", "MTDQ", "MTWAQ", "MTCQ"), pc1, pc2)
#subset to only include important variables#
load <- load[c("bio_1", "bio_5", "bio_6", "bio_7", "bio_9", "bio_10" ),]
#create lengthen value for plotting loading lines#
lengthen <-6
#Final PCA with important variables plotted
ggplot(data=mg_wcTEMP, aes(x=pc1, y=pc2)) + scale_fill_manual(values=c("red","blue"), labels=c("UK","US"), name="") +
geom_point(aes(fill=Region),shape=21, color="gray90", size=6) + labs(x="PC1",y="PC2") + geom_segment(data=load, aes(x=0, y=0, xend=pc1*lengthen, yend=pc2*lengthen), color="gray75") +
geom_text(data=load, aes(x=pc1*lengthen*1.1, y=pc2*lengthen*1.1, label=var),color="black", size=5) + theme(text=element_text(size=20), title=element_text(size=20), axis.text=element_text(size=20))
#save as IMG: 900x900--MAINTAIN ASPECT RATIO
#####################################################################################
#export data
write.csv(mg_wcTEMP, "Processed-data/PCATEMP.csv")
#What populations are most similar based on the top 3 climate variables from PCA?
#a Tukey test to group them together?
#Once you know which are most similar, create lists of each group
#********************************
# 4. Definitions and units of bioclim variables (from http://www.worldclim.org/bioclim)
#********************************
# BIO1 = Annual Mean Temperature CHECK
# BIO2 = Mean Diurnal Range (Mean of monthly (max temp - min temp))
# BIO3 = Isothermality (BIO2/BIO7) (* 100)
# BIO4 = Temperature Seasonality (standard deviation *100) (CHECK)
# BIO5 = Max Temperature of Warmest Month
# BIO6 = Min Temperature of Coldest Month
# BIO7 = Temperature Annual Range (BIO5-BIO6)
# BIO8 = Mean Temperature of Wettest Quarter
# BIO9 = Mean Temperature of Driest Quarter CHECK
# BIO10 = Mean Temperature of Warmest Quarter
# BIO11 = Mean Temperature of Coldest Quarter
| /Scripts/1_PCA/1A_PCAJustTemp.R | no_license | akquerns/Guttatus_EvolTPCClines | R | false | false | 5,401 | r | #The purpose of this file was to conduct a PCA on temperature related variables for ALL of my available populations
#This PCA aided my selection of populations which were used in my temperature growth experiments by showing how populations were distributed across the range of temperature-related bioclimatic variables, which allowed me to select populations without over-representing specific thermal environments
# load libraries
library(tidyverse)
library(raster)
library(sf)
library(spData)
library(viridis)
library(readr)
library(ggplot2)
library(FactoMineR)
library(maps)
library(mapdata)
library(tidyr)
library(broom)
library(knitr)
library(ggfortify)
#Read in files. Rename them.
guttatus_worldclim_seed_collections <- read_csv("Raw-data/guttatus_worldclim_seed_collections.csv")
View(guttatus_worldclim_seed_collections)#%>%
# (guttatus_worldclim_seed_collections$bio_1<-as.numeric(guttatus_worldclim_seed_collections$bio_1))
#Boxplot of US vs UK: TEMPERATURE
boxplot((guttatus_worldclim_seed_collections$bio_1/10)~guttatus_worldclim_seed_collections$ID1)
#T-tests for variables of interest
t.test (mg_wcTEMP$bio_1~mg_wcTEMP$ID1)
t.test (mg_wcTEMP$bio_4~mg_wcTEMP$ID1)
###################################################################################
#PCA to tell us which clim variables are most valuable for clustering data #
###################################################################################
#####################
## 1. Prepare data ##
#####################
## read CSV file ##
mg_wcTEMP <- read_csv("Raw-data/guttatus_worldclim_seed_collections.csv")
View(mg_wcTEMP)
## remove variables unnecessary for TEMPERATURE PCA and rows with NA ##
mg_wcTEMP <- subset(mg_wcTEMP, select = -c(bio_12, bio_13, bio_14, bio_15, bio_16, bio_17, bio_18, bio_19, Longitude, Latitude))
mg_wcTEMP <- na.omit(mg_wcTEMP)
mg_wcTEMP2 <- subset(mg_wcTEMP, select = -c(ID1))
##change column names to abbreviated variable names##
names(mg_wcTEMP) <- c("Region", "Population", "MAT", "MDR", "ISO", "TS", "MTWM", "MTCM", "TAR", "MTWQ", "MTDQ", "MTWAQ", "MTCQ")
#################################
## 2. run PCA using FactoMineR ##
#################################
library(FactoMineR)
library(multcompView)
mg_pcaTEMP <- PCA(mg_wcTEMP2, scale.unit=TRUE, quali.sup = 1, graph = FALSE)
## (optional) view PCA summary and variable and individual ##
## values(coordinates, correlation, cosine2, contribuytion) ##
summary(mg_pcaTEMP)
write.csv(mg_pcaTEMP$var, "Processed-data/pcaTEMPvar.csv")
mg_pcaTEMP$var
mg_pcaTEMP$ind
#########################
## 3. Plot PCA results ##
#########################
## extract individual pc1 and pc2 coordinates from FactoMineR PCA ##
mg_wcTEMP$pc1 <- mg_pcaTEMP$ind$coord[,1]
mg_wcTEMP$pc2 <- mg_pcaTEMP$ind$coord[,2]
#geomtext repel
library(ggrepel)
## create PCA plot, point color by country ##
ggplot(data=mg_wcTEMP, aes(x=pc1, y=pc2, label=Population)) + scale_fill_manual(values=c("red","blue","green","purple"), name="") + geom_point(aes(fill=Region), shape=21, color="gray90", size=5) + labs(x="PC1",y="PC2")
#save as IMG: 600x600--MAINTAIN ASPECT RATIO
#####################################################################################
#Plot with labelled variables
#Extract loadings and create a new dataframe#
pc1 <- mg_pcaTEMP$var$cor[,1]
pc2 <- mg_pcaTEMP$var$cor[,2]
load <- data.frame("var"=c("MAT", "MDR", "ISO", "TS", "MTWM", "MTCM", "TAR", "MTWQ", "MTDQ", "MTWAQ", "MTCQ"), pc1, pc2)
load <- data.frame("var"=c("MAT", "MDR", "ISO", "TS", "MTWM", "MTCM", "TAR", "MTWQ", "MTDQ", "MTWAQ", "MTCQ"), pc1, pc2)
#subset to only include important variables#
load <- load[c("bio_1", "bio_5", "bio_6", "bio_7", "bio_9", "bio_10" ),]
#create lengthen value for plotting loading lines#
lengthen <-6
#Final PCA with important variables plotted
ggplot(data=mg_wcTEMP, aes(x=pc1, y=pc2)) + scale_fill_manual(values=c("red","blue"), labels=c("UK","US"), name="") +
geom_point(aes(fill=Region),shape=21, color="gray90", size=6) + labs(x="PC1",y="PC2") + geom_segment(data=load, aes(x=0, y=0, xend=pc1*lengthen, yend=pc2*lengthen), color="gray75") +
geom_text(data=load, aes(x=pc1*lengthen*1.1, y=pc2*lengthen*1.1, label=var),color="black", size=5) + theme(text=element_text(size=20), title=element_text(size=20), axis.text=element_text(size=20))
#save as IMG: 900x900--MAINTAIN ASPECT RATIO
#####################################################################################
#export data
write.csv(mg_wcTEMP, "Processed-data/PCATEMP.csv")
#What populations are most similar based on the top 3 climate variables from PCA?
#a Tukey test to group them together?
#Once you know which are most similar, create lists of each group
#********************************
# 4. Definitions and units of bioclim variables (from http://www.worldclim.org/bioclim)
#********************************
# BIO1 = Annual Mean Temperature CHECK
# BIO2 = Mean Diurnal Range (Mean of monthly (max temp - min temp))
# BIO3 = Isothermality (BIO2/BIO7) (* 100)
# BIO4 = Temperature Seasonality (standard deviation *100) (CHECK)
# BIO5 = Max Temperature of Warmest Month
# BIO6 = Min Temperature of Coldest Month
# BIO7 = Temperature Annual Range (BIO5-BIO6)
# BIO8 = Mean Temperature of Wettest Quarter
# BIO9 = Mean Temperature of Driest Quarter CHECK
# BIO10 = Mean Temperature of Warmest Quarter
# BIO11 = Mean Temperature of Coldest Quarter
|
setwd("~/OneDrive/Transfer/Coursera/Tasks/Exploratoy Data Analysis/Week 1/ExData_Plotting1")
library(RSQLite)
library(sqldf)
library(lubridate)
#Use SQL commands to read in just the lines with the specified dates
df<- read.csv.sql("household_power_consumption.txt", sql = 'select * from file where Date == "1/2/2007" OR Date == "2/2/2007"', sep = ";")
closeAllConnections()
#Set all ? to NA
df[df == "?"] <- NA
#Create new Variable containing Date and Time
df$datetime<- with(df, dmy(Date) + hms(Time))
#Plot 4
#Set System to English because otherwise weekdays in legend would appear in German
Sys.setlocale("LC_TIME", "en_US")
#Prepare multiple plots in one view
par(mfcol = c(2,2))
par(cex.lab=0.75,cex.axis=0.75)
#Create Plot topleft
#Prepare plot w/o data, no X-Label, and Y-Label name
plot(df$datetime,df$Global_active_power, type = "n", xlab = "", cex.lab = 0.75, cex.axis = 0.75,
ylab="Global Active Power")
#Insert line in plot
lines(df$datetime,df$Global_active_power, type = "l")
#Create Plot bottomleft
#Prepare plot w/o data, no X-Label, and Y-Label name
plot(df$datetime,df$Sub_metering_1, type = "n", xlab = "",
ylab="Energy sub metering")
#Insert lines in plot
lines(df$datetime,df$Sub_metering_1, type = "l")
lines(df$datetime,df$Sub_metering_2, type = "l", col = "red")
lines(df$datetime,df$Sub_metering_3, type = "l", col = "blue")
#Insert legend
legend("topright", lty = "solid", col = c("black","red","blue"), legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),cex=0.75, bty = "n")
#Create Plot topright
plot(df$datetime,df$Voltage, type = "n", xlab = "datetime", cex.lab = 0.75, cex.axis = 0.75,
ylab="Voltage")
#Insert line in plot
lines(df$datetime,df$Voltage, type = "l")
#Create Plot bottomright
#Prepare plot w/o data, no X-Label, and Y-Label name
plot(df$datetime,df$Global_reactive_power, type = "n", xlab = "datetime", cex.lab = 0.75, cex.axis = 0.75,
ylab="Global_reactive_power")
#Insert line in plot
lines(df$datetime,df$Global_reactive_power, type = "l")
#Repeat code but save to .png
png("plot4.png", width)
#Set System to English because otherwise weekdays in legend would appear in German
Sys.setlocale("LC_TIME", "en_US")
#Prepare multiple plots in one view
par(mfcol = c(2,2))
par(cex.lab=0.75,cex.axis=0.75)
#Create Plot topleft
#Prepare plot w/o data, no X-Label, and Y-Label name
plot(df$datetime,df$Global_active_power, type = "n", xlab = "", cex.lab = 0.75, cex.axis = 0.75,
ylab="Global Active Power")
#Insert line in plot
lines(df$datetime,df$Global_active_power, type = "l")
#Create Plot bottomleft
#Prepare plot w/o data, no X-Label, and Y-Label name
plot(df$datetime,df$Sub_metering_1, type = "n", xlab = "",
ylab="Energy sub metering")
#Insert lines in plot
lines(df$datetime,df$Sub_metering_1, type = "l")
lines(df$datetime,df$Sub_metering_2, type = "l", col = "red")
lines(df$datetime,df$Sub_metering_3, type = "l", col = "blue")
#Insert legend
legend("topright", lty = "solid", col = c("black","red","blue"), legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),cex=0.75, bty = "n")
#Create Plot topright
plot(df$datetime,df$Voltage, type = "n", xlab = "datetime", cex.lab = 0.75, cex.axis = 0.75,
ylab="Voltage")
#Insert line in plot
lines(df$datetime,df$Voltage, type = "l")
#Create Plot bottomright
#Prepare plot w/o data, no X-Label, and Y-Label name
plot(df$datetime,df$Global_reactive_power, type = "n", xlab = "datetime", cex.lab = 0.75, cex.axis = 0.75,
ylab="Global_reactive_power")
#Insert line in plot
lines(df$datetime,df$Global_reactive_power, type = "l")
dev.off()
| /plot4.R | no_license | setX76/ExData_Plotting1 | R | false | false | 3,614 | r | setwd("~/OneDrive/Transfer/Coursera/Tasks/Exploratoy Data Analysis/Week 1/ExData_Plotting1")
library(RSQLite)
library(sqldf)
library(lubridate)
#Use SQL commands to read in just the lines with the specified dates
df<- read.csv.sql("household_power_consumption.txt", sql = 'select * from file where Date == "1/2/2007" OR Date == "2/2/2007"', sep = ";")
closeAllConnections()
#Set all ? to NA
df[df == "?"] <- NA
#Create new Variable containing Date and Time
df$datetime<- with(df, dmy(Date) + hms(Time))
#Plot 4
#Set System to English because otherwise weekdays in legend would appear in German
Sys.setlocale("LC_TIME", "en_US")
#Prepare multiple plots in one view
par(mfcol = c(2,2))
par(cex.lab=0.75,cex.axis=0.75)
#Create Plot topleft
#Prepare plot w/o data, no X-Label, and Y-Label name
plot(df$datetime,df$Global_active_power, type = "n", xlab = "", cex.lab = 0.75, cex.axis = 0.75,
ylab="Global Active Power")
#Insert line in plot
lines(df$datetime,df$Global_active_power, type = "l")
#Create Plot bottomleft
#Prepare plot w/o data, no X-Label, and Y-Label name
plot(df$datetime,df$Sub_metering_1, type = "n", xlab = "",
ylab="Energy sub metering")
#Insert lines in plot
lines(df$datetime,df$Sub_metering_1, type = "l")
lines(df$datetime,df$Sub_metering_2, type = "l", col = "red")
lines(df$datetime,df$Sub_metering_3, type = "l", col = "blue")
#Insert legend
legend("topright", lty = "solid", col = c("black","red","blue"), legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),cex=0.75, bty = "n")
#Create Plot topright
plot(df$datetime,df$Voltage, type = "n", xlab = "datetime", cex.lab = 0.75, cex.axis = 0.75,
ylab="Voltage")
#Insert line in plot
lines(df$datetime,df$Voltage, type = "l")
#Create Plot bottomright
#Prepare plot w/o data, no X-Label, and Y-Label name
plot(df$datetime,df$Global_reactive_power, type = "n", xlab = "datetime", cex.lab = 0.75, cex.axis = 0.75,
ylab="Global_reactive_power")
#Insert line in plot
lines(df$datetime,df$Global_reactive_power, type = "l")
#Repeat code but save to .png
png("plot4.png", width)
#Set System to English because otherwise weekdays in legend would appear in German
Sys.setlocale("LC_TIME", "en_US")
#Prepare multiple plots in one view
par(mfcol = c(2,2))
par(cex.lab=0.75,cex.axis=0.75)
#Create Plot topleft
#Prepare plot w/o data, no X-Label, and Y-Label name
plot(df$datetime,df$Global_active_power, type = "n", xlab = "", cex.lab = 0.75, cex.axis = 0.75,
ylab="Global Active Power")
#Insert line in plot
lines(df$datetime,df$Global_active_power, type = "l")
#Create Plot bottomleft
#Prepare plot w/o data, no X-Label, and Y-Label name
plot(df$datetime,df$Sub_metering_1, type = "n", xlab = "",
ylab="Energy sub metering")
#Insert lines in plot
lines(df$datetime,df$Sub_metering_1, type = "l")
lines(df$datetime,df$Sub_metering_2, type = "l", col = "red")
lines(df$datetime,df$Sub_metering_3, type = "l", col = "blue")
#Insert legend
legend("topright", lty = "solid", col = c("black","red","blue"), legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),cex=0.75, bty = "n")
#Create Plot topright
plot(df$datetime,df$Voltage, type = "n", xlab = "datetime", cex.lab = 0.75, cex.axis = 0.75,
ylab="Voltage")
#Insert line in plot
lines(df$datetime,df$Voltage, type = "l")
#Create Plot bottomright
#Prepare plot w/o data, no X-Label, and Y-Label name
plot(df$datetime,df$Global_reactive_power, type = "n", xlab = "datetime", cex.lab = 0.75, cex.axis = 0.75,
ylab="Global_reactive_power")
#Insert line in plot
lines(df$datetime,df$Global_reactive_power, type = "l")
dev.off()
|
## Course Project Data Products Coursera
## Author: "Dale Stewart"
## date: "Nov 7, 2017"
## Build User interface (ui.R), which will control the layout and appearance
## Load Shiny library
library(shiny)
shinyUI(fluidPage(
# Application title
titlePanel("Number of Major Championships per State in 4 Major U.S. Sporting Leagues"),
br(),
br(),
sidebarPanel(
helpText("Documentation and Instructions:"),
helpText("In the input boxes below please choose one of the 4 major sporting leagues in the United States as well as a range of years. The output on the left will display associated counts for championships won in that state as well as a horizontal bar chart representing the same data."),
selectInput("league",
label = "Choose a league",
choices = c("NBA", "MLB",
"NFL", "NHL"),
selected = "NBA"),
sliderInput("year", "Year Range:", min = 1950, max = 2017, value = c(1993, 2011), sep = ""),
helpText("Based on the league selection that you have made this page will show you the data and a histogram of the states that have won championships in the time frame selected")),
br(),
br(),
# Call Data table
mainPanel(dataTableOutput('Details'),
# Pass state name
textOutput("league"),
br(),
br(),
# Plot coliform levels state-wise
plotOutput("plot1"))
)
) | /ui.R | no_license | bigstewosu/Champs | R | false | false | 1,464 | r | ## Course Project Data Products Coursera
## Author: "Dale Stewart"
## date: "Nov 7, 2017"
## Build User interface (ui.R), which will control the layout and appearance
## Load Shiny library
library(shiny)
shinyUI(fluidPage(
# Application title
titlePanel("Number of Major Championships per State in 4 Major U.S. Sporting Leagues"),
br(),
br(),
sidebarPanel(
helpText("Documentation and Instructions:"),
helpText("In the input boxes below please choose one of the 4 major sporting leagues in the United States as well as a range of years. The output on the left will display associated counts for championships won in that state as well as a horizontal bar chart representing the same data."),
selectInput("league",
label = "Choose a league",
choices = c("NBA", "MLB",
"NFL", "NHL"),
selected = "NBA"),
sliderInput("year", "Year Range:", min = 1950, max = 2017, value = c(1993, 2011), sep = ""),
helpText("Based on the league selection that you have made this page will show you the data and a histogram of the states that have won championships in the time frame selected")),
br(),
br(),
# Call Data table
mainPanel(dataTableOutput('Details'),
# Pass state name
textOutput("league"),
br(),
br(),
# Plot coliform levels state-wise
plotOutput("plot1"))
)
) |
##Rule3
##This player attempted but did not match the
##reasoning wheel sentence correctly
##at all during the last case played.
##rsnwhl = Reasoning Wheel Performance
aw <- inData
r3set1 <- aw %>%
group_by(sid) %>%
select(sid, aw$CaseOrder, rsnwhl) %>%
filter(aw$CaseOrder==max(aw$CaseOrder))
##rsnwhl: 0 = missing; 1 = attempted but wrong; 2 = attempted and correct
#list(r3set1)
# str(aw)
r3set2 <- aw %>%
group_by(sid) %>%
select(sid, aw$CaseOrder, rsnwhl) %>%
filter(aw$CaseOrder==max(aw$CaseOrder)) %>%
summarise_each(funs(max), rsnwhl)
#r3set2 <- mutate(r3set2, rwhl1 = rsnwhl == 1);
#list(r3set2)
#r3set3 <- merge(r3set1, r3set2, by=c("sid"))
#list(r3set3)
| /servers/lib/aeng/engines/r/games/AW/aw_job5_rule3.r | no_license | GlasslabGames/Assessment | R | false | false | 685 | r | ##Rule3
##This player attempted but did not match the
##reasoning wheel sentence correctly
##at all during the last case played.
##rsnwhl = Reasoning Wheel Performance
aw <- inData
r3set1 <- aw %>%
group_by(sid) %>%
select(sid, aw$CaseOrder, rsnwhl) %>%
filter(aw$CaseOrder==max(aw$CaseOrder))
##rsnwhl: 0 = missing; 1 = attempted but wrong; 2 = attempted and correct
#list(r3set1)
# str(aw)
r3set2 <- aw %>%
group_by(sid) %>%
select(sid, aw$CaseOrder, rsnwhl) %>%
filter(aw$CaseOrder==max(aw$CaseOrder)) %>%
summarise_each(funs(max), rsnwhl)
#r3set2 <- mutate(r3set2, rwhl1 = rsnwhl == 1);
#list(r3set2)
#r3set3 <- merge(r3set1, r3set2, by=c("sid"))
#list(r3set3)
|
/lectures/11 Spatial Simulations/11.1a ABM Diagnostic Graphs.r | no_license | nmmarquez/MPSpatialDemog | R | false | false | 15,108 | r | ||
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generate_column_names.R
\name{generate_column_names}
\alias{generate_column_names}
\title{Generate the column names from a prefix for our time periods of interest.}
\usage{
generate_column_names(prefix, from = 1976, to = 2018)
}
\arguments{
\item{prefix}{The prefix of the variable name.}
\item{from}{The starting year, as an integer, for our time periods of interest.}
\item{to}{The ending year, as an integer, for our time periods of interest.}
}
\value{
A character vector of the column names.
}
\description{
Generate the column names from a prefix for our time periods of interest.
}
\examples{
generate_column_names(prefix = 'irt', from = 1990, to = 2002)
}
| /man/generate_column_names.Rd | permissive | nurseshealthstudy/Nurses | R | false | true | 744 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generate_column_names.R
\name{generate_column_names}
\alias{generate_column_names}
\title{Generate the column names from a prefix for our time periods of interest.}
\usage{
generate_column_names(prefix, from = 1976, to = 2018)
}
\arguments{
\item{prefix}{The prefix of the variable name.}
\item{from}{The starting year, as an integer, for our time periods of interest.}
\item{to}{The ending year, as an integer, for our time periods of interest.}
}
\value{
A character vector of the column names.
}
\description{
Generate the column names from a prefix for our time periods of interest.
}
\examples{
generate_column_names(prefix = 'irt', from = 1990, to = 2002)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DAMisc_functions.R
\name{crTest}
\alias{crTest}
\title{Test of linearity for Component + Residual Plots}
\usage{
crTest(
model,
adjust.method = "none",
cat = 5,
var = NULL,
span.as = TRUE,
span = 0.75,
...
)
}
\arguments{
\item{model}{A model object of class \code{lm}}
\item{adjust.method}{Adjustment method for multiple-testing procedure, using
\code{p.adjust} from \code{stats}.}
\item{cat}{Number of unique values below which numeric variables are
considered categorical for the purposes of the smooth.}
\item{var}{Character string indicating the term desired for testing. If
left \code{NULL}, the default value, all numeric variables will be tested.}
\item{span.as}{Logical indicating whether the span should be automatically
selected through AICC or GCV}
\item{span}{Span to be passed down to the \code{loess} function if
\code{span.as=FALSE}.}
\item{...}{Other arguments to be passed down to the call to \code{loess}.}
}
\value{
A matrix with the following columns for each variable:
\item{RSSp}{Residual sum-of-squares for the parametric (linear) model.}
\item{RSSnp}{Residual sum-of-squares for the non-parametric (loess) model.}
\item{DFnum}{Numerator degrees of freedom for the F-test: tr(S)-(k+1).}
\item{DFdenom}{Denominator degrees of freedom for the F-test: n-tr(S)}
\item{F}{F-statistic} \item{p}{p-value, potentially adjusted for multiple
comparisons.}
}
\description{
This function estimates a linear model and a loess model on the
component-plus-residual plot (i.e., a partial residual plot) for each
quantitative variable in the model. The residual sums of squares for each
are used to calculate an F-test for each quantitative variable.
}
\examples{
data(Prestige, package="carData")
mod <- lm(prestige ~ income + education + women, data=Prestige)
crTest(mod)
}
\author{
Dave Armstrong
}
| /man/crTest.Rd | permissive | davidaarmstrong/damisc | R | false | true | 1,911 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DAMisc_functions.R
\name{crTest}
\alias{crTest}
\title{Test of linearity for Component + Residual Plots}
\usage{
crTest(
model,
adjust.method = "none",
cat = 5,
var = NULL,
span.as = TRUE,
span = 0.75,
...
)
}
\arguments{
\item{model}{A model object of class \code{lm}}
\item{adjust.method}{Adjustment method for multiple-testing procedure, using
\code{p.adjust} from \code{stats}.}
\item{cat}{Number of unique values below which numeric variables are
considered categorical for the purposes of the smooth.}
\item{var}{Character string indicating the term desired for testing. If
left \code{NULL}, the default value, all numeric variables will be tested.}
\item{span.as}{Logical indicating whether the span should be automatically
selected through AICC or GCV}
\item{span}{Span to be passed down to the \code{loess} function if
\code{span.as=FALSE}.}
\item{...}{Other arguments to be passed down to the call to \code{loess}.}
}
\value{
A matrix with the following columns for each variable:
\item{RSSp}{Residual sum-of-squares for the parametric (linear) model.}
\item{RSSnp}{Residual sum-of-squares for the non-parametric (loess) model.}
\item{DFnum}{Numerator degrees of freedom for the F-test: tr(S)-(k+1).}
\item{DFdenom}{Denominator degrees of freedom for the F-test: n-tr(S)}
\item{F}{F-statistic} \item{p}{p-value, potentially adjusted for multiple
comparisons.}
}
\description{
This function estimates a linear model and a loess model on the
component-plus-residual plot (i.e., a partial residual plot) for each
quantitative variable in the model. The residual sums of squares for each
are used to calculate an F-test for each quantitative variable.
}
\examples{
data(Prestige, package="carData")
mod <- lm(prestige ~ income + education + women, data=Prestige)
crTest(mod)
}
\author{
Dave Armstrong
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.