content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
# CA5: R Calculator - 10357791 Alex Brown
# 1.Addition
addition <- function(numb1, numb2) {
return(numb1 + numb2)
}
#Test Addition
addition(4,5)
addition(45,2)
addition(45,-6)
#2.Subtraction
subtraction <- function(numb1, numb2) {
return(numb1 - numb2)
}
#Test Subtraction
subtraction(6,7)
subtraction(23,56)
subtraction(34,-78)
#3.Division
division <- function(numb1, numb2){
return(numb1 / numb2)
}
#Test Division
division(34,2)
division(45,9)
division(6,-2)
#4.Multiplication
multiply <- function(numb1, numb2){
return(numb1 * numb2)
}
#Test Multiplication
multiply(9,4)
multiply(45,2)
multiply(2,-2)
#5.Cube
cube <- function(numb1){
return(numb1 ** 3)
}
#Test Cube
cube(4)
cube(12)
cube(8)
#6.Remainder
remainder <- function(num1, num2){
return(num1 %% num2)
}
#.Test Remainder
remainder(3, 7)
remainder(5, 7)
remainder(3, 11)
#7.Exponential
exponential <- function(numb1, numb2){
return(numb1 ** numb2)}
#Test Exponential
exponential(4, 7)
exponential(9, 4)
exponential(13, 45)
#8.Permutations
perm = function(n, x) {
factorial(n) / factorial(n-x)
}
#Test Permutations
perm (34, 8)
perm (7, 9)
#9.Cube Root
cube_root <- function(numb1, numb2){
return(numb1 ** 1. /3)
}
#Test Cube Root
cube_root(4, 9)
#10.Square Root
sqrt_func <- function(numb1){
return(sqrt(numb1))
}
#Test Square Root C
sqrt_func(4)
sqrt_func(5)
sqrt_func(8)
#Test Permutations
perm (16, 24)
perm (11, 5)
|
/CA5/CA5_R.r
|
no_license
|
Ystwryth/DBS
|
R
| false
| false
| 1,538
|
r
|
# CA5: R Calculator - 10357791 Alex Brown
# 1.Addition
addition <- function(numb1, numb2) {
return(numb1 + numb2)
}
#Test Addition
addition(4,5)
addition(45,2)
addition(45,-6)
#2.Subtraction
subtraction <- function(numb1, numb2) {
return(numb1 - numb2)
}
#Test Subtraction
subtraction(6,7)
subtraction(23,56)
subtraction(34,-78)
#3.Division
division <- function(numb1, numb2){
return(numb1 / numb2)
}
#Test Division
division(34,2)
division(45,9)
division(6,-2)
#4.Multiplication
multiply <- function(numb1, numb2){
return(numb1 * numb2)
}
#Test Multiplication
multiply(9,4)
multiply(45,2)
multiply(2,-2)
#5.Cube
cube <- function(numb1){
return(numb1 ** 3)
}
#Test Cube
cube(4)
cube(12)
cube(8)
#6.Remainder
remainder <- function(num1, num2){
return(num1 %% num2)
}
#.Test Remainder
remainder(3, 7)
remainder(5, 7)
remainder(3, 11)
#7.Exponential
exponential <- function(numb1, numb2){
return(numb1 ** numb2)}
#Test Exponential
exponential(4, 7)
exponential(9, 4)
exponential(13, 45)
#8.Permutations
perm = function(n, x) {
factorial(n) / factorial(n-x)
}
#Test Permutations
perm (34, 8)
perm (7, 9)
#9.Cube Root
cube_root <- function(numb1, numb2){
return(numb1 ** 1. /3)
}
#Test Cube Root
cube_root(4, 9)
#10.Square Root
sqrt_func <- function(numb1){
return(sqrt(numb1))
}
#Test Square Root C
sqrt_func(4)
sqrt_func(5)
sqrt_func(8)
#Test Permutations
perm (16, 24)
perm (11, 5)
|
library(randomForest)
library(SPECIES)
traindata = read.csv("FinalTrain.csv")
test = read.csv("TestInternshipStudent.csv")
#tr = read.csv("DemoTrain.csv")
#te = read.csv("DemoTest.csv")
col1<-ncol(traindata)
traindata<-traindata[,c(3:col1)]
col<-ncol(traindata)
trainlabel <- traindata[,col]
traindata<- traindata[,c(1:col-1)]
trainlabel <- as.factor(trainlabel)
x <- cbind(traindata,trainlabel)
test <- test[,c(3:ncol(test))]
fit <- randomForest(trainlabel ~., data=x,ntree=100, keep.forest=TRUE, do.trace=100)
predicted <- predict(fit,test)
print(fit)
importance(fit)
write.table(predicted,file="RandomPredict1.csv")
#l <- list(predicted)
#le<-length(l[[1]])
#print(le)
#for (i in 1:le){
# write(l[[1]][i],file="RandomPredict.csv",append=TRUE) #write to a file
#}
|
/randomForest.R
|
no_license
|
charusharma1991/RandomForest
|
R
| false
| false
| 775
|
r
|
library(randomForest)
library(SPECIES)
traindata = read.csv("FinalTrain.csv")
test = read.csv("TestInternshipStudent.csv")
#tr = read.csv("DemoTrain.csv")
#te = read.csv("DemoTest.csv")
col1<-ncol(traindata)
traindata<-traindata[,c(3:col1)]
col<-ncol(traindata)
trainlabel <- traindata[,col]
traindata<- traindata[,c(1:col-1)]
trainlabel <- as.factor(trainlabel)
x <- cbind(traindata,trainlabel)
test <- test[,c(3:ncol(test))]
fit <- randomForest(trainlabel ~., data=x,ntree=100, keep.forest=TRUE, do.trace=100)
predicted <- predict(fit,test)
print(fit)
importance(fit)
write.table(predicted,file="RandomPredict1.csv")
#l <- list(predicted)
#le<-length(l[[1]])
#print(le)
#for (i in 1:le){
# write(l[[1]][i],file="RandomPredict.csv",append=TRUE) #write to a file
#}
|
library("twitteR")
library("ROAuth")
library(wordcloud)
library(RColorBrewer)
library(tm)
library(plyr)
library(ggplot2)
library(sentiment)
library(data.table)
library(topicmodels)
#authentication
load("twitter authentication.Rdata")
registerTwitterOAuth(cred)
#data collection
m8 = searchTwitter("#prostatecancer", n=200, geocode="18.9750,72.8258,1000mi",lang="en",cainfo="cacert.pem")
c1 = searchTwitter("#prostatecancer", n=200, geocode="13.0839,80.2700,1000mi",lang="en",cainfo="cacert.pem")
c2 = searchTwitter("#lung cancer", n=200, geocode="13.0839,80.2700,1000mi",lang="en",cainfo="cacert.pem")
c3 = searchTwitter("#breastcancer", n=200, geocode="13.0839,80.2700,1000mi",lang="en",cainfo="cacert.pem")
c4 = searchTwitter("#oralcancer", n=200, geocode="13.0839,80.2700,1000mi",lang="en",cainfo="cacert.pem")
c5 = searchTwitter("#lymphoma", n=200, geocode="13.0839,80.2700,1000mi",lang="en",cainfo="cacert.pem")
c6 = searchTwitter("#cancer research", n=200, geocode="13.0839,80.2700,1000mi",lang="en",cainfo="cacert.pem")
c7 = searchTwitter("#cancer treatment", n=200, geocode="13.0839,80.2700,1000mi",lang="en",cainfo="cacert.pem")
c8 = searchTwitter("#cancer patient", n=200, geocode="13.0839,80.2700,1000mi",lang="en",cainfo="cacert.pem")
m9 = searchTwitter("#cancer treatment", n=200, geocode="18.9750,72.8258,1000mi",lang="en",cainfo="cacert.pem")
m10 = searchTwitter("#cancer patient", n=200, geocode="18.9750,72.8258,1000mi",lang="en",cainfo="cacert.pem")
m11 = searchTwitter("#cancer help", n=200, geocode="18.9750,72.8258,1000mi",lang="en",cainfo="cacert.pem")
c11 = searchTwitter("#cancer help", n=200, geocode="13.0839,80.2700,1000mi",lang="en",cainfo="cacert.pem")
m3 = searchTwitter("#liver cancer", n=200, geocode="18.9750,72.8258,1000mi",lang="en",cainfo="cacert.pem")
c9 = searchTwitter("#liver cancer", n=200, geocode="13.0839,80.2700,1000mi",lang="en",cainfo="cacert.pem")
chemo1 = searchTwitter("#chemotherapy", n=1500,lang="en",cainfo="cacert.pem")
lon1= searchTwitter("#breastcancer",geocode="51.5072,0.1275,1000mi", n=1500, lang="en", cainfo="cacert.pem")
lon2= searchTwitter("#leukemia",geocode="51.5072,0.1275,1000mi", n=1500, lang="en", cainfo="cacert.pem")
lon3= searchTwitter("#lungcancer",geocode="51.5072,0.1275,1000mi", n=1500, lang="en", cainfo="cacert.pem")
lon4= searchTwitter("#lymphoma",geocode="51.5072,0.1275,1000mi", n=1500, lang="en", cainfo="cacert.pem")
lon5= searchTwitter("#tumor",geocode="51.5072,0.1275,1000mi", n=1500, lang="en", cainfo="cacert.pem")
lon6= searchTwitter("#cervicalcancer",geocode="51.5072,0.1275,1000mi", n=1500, lang="en", cainfo="cacert.pem")
lon7= searchTwitter("#prostatecancer",geocode="51.5072,0.1275,1000mi", n=1500, lang="en", cainfo="cacert.pem")
lon8= searchTwitter("#cancerresearch",geocode="51.5072,0.1275,1000mi", n=1500, lang="en", cainfo="cacert.pem")
lon9= searchTwitter("#bladder cancer",geocode="51.5072,0.1275,1000mi", n=1500, lang="en", cainfo="cacert.pem")
lon10= searchTwitter("#cancer treatment",geocode="51.5072,0.1275,1000mi", n=1500, lang="en", cainfo="cacert.pem")
lon11= searchTwitter("#cancer patient",geocode="51.5072,0.1275,1000mi", n=1500, lang="en", cainfo="cacert.pem")
lon12= searchTwitter("#cancer help",geocode="51.5072,0.1275,1000mi", n=1500, lang="en", cainfo="cacert.pem")
lon13= searchTwitter("#chemotherapy", n=1500, lang="en", cainfo="cacert.pem")
#dataset
india=c(c1,c11,c2,c3,c4,c5,c6,c7,c8,c9,m1,m10,m11,m2,m3,m4,m5,m6,m7,m8,m9,chemo1)
uk=c(lon1,lon2,lon3,lon4,lon5,lon6,lon7,lon8,lon9,lon10,lon11,lon12,lon13)
#data frame
india.df <- do.call("rbind", lapply(india, as.data.frame))
uk.df <- do.call("rbind", lapply(uk, as.data.frame))
#corpus and text cleaning
india.corpus <- Corpus(VectorSource(india.df$text))
india.corpus <- tm_map(india.corpus, content_transformer(tolower))
india.corpus <- tm_map(india.corpus, content_transformer(removePunctuation))
india.corpus <- tm_map(india.corpus, content_transformer(removeNumbers))
removeURL <- function(x) gsub("http[[:alnum:]]*", "", x)
india.corpus <- tm_map(india.corpus, content_transformer(removeURL))
myStopwords <- c(stopwords("english"), "available","via","and","or","about","heres","amp","nci","ncjs","et","ncis","can","according","associated","butt","buynowgt","highly","know","one","polls","side","today","use","wearnig","back","lady","new","min","r","stats","whoops","every","take","tweet","glaring","miss","million","us","screening","must","min","varadhkrish")
india.corpus <- tm_map(india.corpus, removeWords, myStopwords)
uk.Corpus <- Corpus(VectorSource(uk.df$text))
uk.Corpus <- tm_map(uk.Corpus, content_transformer(tolower))
uk.Corpus <- tm_map(uk.Corpus, content_transformer(removePunctuation))
uk.Corpus <- tm_map(uk.Corpus, content_transformer(removeNumbers))
removeURL <- function(x) gsub("http[[:alnum:]]*", "", x)
uk.Corpus <- tm_map(uk.Corpus, content_transformer(removeURL))
myStopwords <- c(stopwords("english"), "available","via","and","or","about","heres","amp","nci","ncjs","et","ncis","can","lady","tells","wef","w","wearing","turned","driving","tools","side","personalize")
uk.Corpus <- tm_map(uk.Corpus, removeWords, myStopwords)
#TDM
india.tdm <- TermDocumentMatrix(india.corpus, control=list(wordLengths=c(1,Inf)))
uk.tdm <- TermDocumentMatrix(uk.Corpus, control=list(wordLengths=c(1,Inf)))
#inspect(uk.tdm[,])
#frequency bar plot
findFreqTerms(india.tdm, lowfreq=15)
india.tf <- rowSums(as.matrix(india.tdm))
india.tf <- subset(india.tf, india.tf>=15)
barplot(india.tf, las=2)
findFreqTerms(uk.tdm, lowfreq=20)
uk.tf <- rowSums(as.matrix(uk.tdm))
uk.tf <- subset(uk.tf, uk.tf>=20)
barplot(uk.tf, las=2)
#association
findAssocs(india.tdm,"cervicalcancer", 0.25)
findAssocs(uk.tdm, "lungcancer", 0.25)
#word cloud
wordFreq <- sort(rowSums(as.matrix(india.tdm)), decreasing=TRUE)
set.seed(375)
wordcloud(words=names(wordFreq), freq=wordFreq, min.freq=5, random.order=F,colors=brewer.pal(8,"Dark2"),random.color=TRUE)
wordFreq <- sort(rowSums(as.matrix(uk.tdm)), decreasing=TRUE)
#set.seed(375)
wordcloud(words=names(wordFreq), freq=wordFreq, min.freq=2, random.order=F,colors=brewer.pal(8, "Dark2"),random.color= TRUE)
#hierarchical clustering
india.tdm2 <- removeSparseTerms(india.tdm, sparse=0.95)
india.m2 <- as.matrix(india.tdm2)
india.distMatrix <- dist(scale(india.m2))
fit <- hclust(india.distMatrix, method="ward.D")
plot(fit)
rect.hclust(fit, k=5)
(groups <- cutree(fit, k=5))
uk.tdm2 <- removeSparseTerms(uk.tdm, sparse=0.95)
uk.m2 <- as.matrix(uk.tdm2)
uk.distMatrix <- dist(scale(uk.m2))
fit <- hclust(uk.distMatrix, method="ward.D")
plot(fit)
rect.hclust(fit, k=5)
(groups <- cutree(fit, k=5))
#k-means clustering
india.m3 <- t(india.m2)
set.seed(122)
k <- 5
india.kmeansResult <- kmeans(india.m3, k)
round(india.kmeansResult$centers, digits=3)
for (i in 1:k) {
cat(paste("cluster ", i, ": ", sep=""))
s <- sort(india.kmeansResult$centers[i,], decreasing=T)
cat(names(s)[1:3], "\n")
}
uk.m3 <- t(uk.m2)
set.seed(122)
k <- 5
uk.kmeansResult <- kmeans(uk.m3, k)
round(uk.kmeansResult$centers, digits=3)
for (i in 1:k) {
cat(paste("cluster ", i, ": ", sep=""))
s <- sort(uk.kmeansResult$centers[i,], decreasing=T)
cat(names(s)[1:3], "\n")
}
#sentiment analysis
india.text = laply(india, function(t) t$getText() )
india.text = gsub("(RT|via)((?:\\b\\W*@\\w+)+)", "", india.text)
india.text = gsub("@\\w+", "", india.text)
india.text = gsub("[[:punct:]]", "", india.text)
india.text = gsub("[[:digit:]]", "", india.text)
india.text = gsub("http\\w+", "", india.text)
india.text = gsub("[ \t]{2,}", "", india.text)
india.text = gsub("^\\s+|\\s+$", "", india.text)
try.error = function(x)
{
# create missing value
y = NA
# tryCatch error
try_error = tryCatch(tolower(x), error=function(e) e)
# if not an error
if (!inherits(try_error, "error"))
y = tolower(x)
# result
return(y)
}
india.text = sapply(india.text, try.error)
india.text = india.text[!is.na(india.text)]
names(india.text) = NULL
class_emo = classify_emotion(india.text, algorithm="bayes", prior=1.0)
emotion = class_emo[,7]
emotion[is.na(emotion)] = "unknown"
class_pol = classify_polarity(india.text, algorithm="bayes")
polarity = class_pol[,4]
sent_df = data.frame(text=india.text, emotion=emotion, polarity=polarity,stringsAsFactors=FALSE)
sent_df = within(sent_df, emotion <- factor(emotion, levels=names(sort(table(emotion), decreasing=TRUE))))
ggplot(sent_df, aes(x=emotion)) + geom_bar(aes(y=..count.., fill=emotion))+scale_fill_brewer(palette="Dark2") + labs(x="emotion categories", y="number of tweets") + ggtitle("Sentiment Analysis of Tweets about Cancer\n(classification by emotion)")
ggplot(sent_df, aes(x=polarity)) + geom_bar(aes(y=..count.., fill=polarity)) + scale_fill_brewer(palette="RdGy") + labs(x="polarity categories", y="number of tweets") + ggtitle("Sentiment Analysis of Tweets about Cancer\n(classification by polarity)")
uk.text = laply(uk, function(t) t$getText() )
uk.text = gsub("(RT|via)((?:\\b\\W*@\\w+)+)", "", uk.text)
uk.text = gsub("@\\w+", "", uk.text)
uk.text = gsub("[[:punct:]]", "", uk.text)
uk.text = gsub("[[:digit:]]", "", uk.text)
uk.text = gsub("http\\w+", "", uk.text)
uk.text = gsub("[ \t]{2,}", "", uk.text)
uk.text = gsub("^\\s+|\\s+$", "", uk.text)
try.error = function(x)
{
# create missing value
y = NA
# tryCatch error
try_error = tryCatch(tolower(x), error=function(e) e)
# if not an error
if (!inherits(try_error, "error"))
y = tolower(x)
# result
return(y)
}
uk.text = sapply(uk.text, try.error)
uk.text = uk.text[!is.na(uk.text)]
names(uk.text) = NULL
class_emo = classify_emotion(uk.text, algorithm="bayes", prior=1.0)
emotion = class_emo[,7]
emotion[is.na(emotion)] = "unknown"
class_pol = classify_polarity(uk.text, algorithm="bayes")
polarity = class_pol[,4]
sent_df = data.frame(text=uk.text, emotion=emotion, polarity=polarity,stringsAsFactors=FALSE)
sent_df = within(sent_df, emotion <- factor(emotion, levels=names(sort(table(emotion), decreasing=TRUE))))
ggplot(sent_df, aes(x=emotion)) + geom_bar(aes(y=..count.., fill=emotion))+scale_fill_brewer(palette="Dark2") + labs(x="emotion categories", y="number of tweets") + ggtitle("Sentiment Analysis of Tweets about Cancer\n(classification by emotion)")
ggplot(sent_df, aes(x=polarity)) + geom_bar(aes(y=..count.., fill=polarity)) + scale_fill_brewer(palette="RdGy") + labs(x="polarity categories", y="number of tweets") + ggtitle("Sentiment Analysis of Tweets about Cancer\n(classification by polarity)")
#topic modelling LDA
india.dtm <- as.DocumentTermMatrix(india.tdm)
india.lda <- LDA(india.dtm, k = 8)
term <- terms(india.lda, 4)
term <- apply(term, MARGIN = 2, paste, collapse = ", ")
term
topic <- topics(india.lda, 1)
topics <- data.frame(date=as.IDate(india.df$created), topic)
qplot(date, ..count.., data=topics, geom="density",fill=term[topic], position="stack")
uk.dtm <- as.DocumentTermMatrix(uk.tdm)
uk.lda <- LDA(uk.dtm, k = 8)
term <- terms(uk.lda, 4)
term <- apply(term, MARGIN = 2, paste, collapse = ", ")
term
topic <- topics(uk.lda, 1)
topics <- data.frame(date=as.IDate(uk.df$created), topic)
qplot(date, ..count.., data=topics, geom="density",fill=term[topic], position="stack")
|
/cancer.R
|
no_license
|
divyachandraprakash/Exploratory-Analysis-on-Cancer-using-Twitter-and-R
|
R
| false
| false
| 11,164
|
r
|
library("twitteR")
library("ROAuth")
library(wordcloud)
library(RColorBrewer)
library(tm)
library(plyr)
library(ggplot2)
library(sentiment)
library(data.table)
library(topicmodels)
#authentication
load("twitter authentication.Rdata")
registerTwitterOAuth(cred)
#data collection
m8 = searchTwitter("#prostatecancer", n=200, geocode="18.9750,72.8258,1000mi",lang="en",cainfo="cacert.pem")
c1 = searchTwitter("#prostatecancer", n=200, geocode="13.0839,80.2700,1000mi",lang="en",cainfo="cacert.pem")
c2 = searchTwitter("#lung cancer", n=200, geocode="13.0839,80.2700,1000mi",lang="en",cainfo="cacert.pem")
c3 = searchTwitter("#breastcancer", n=200, geocode="13.0839,80.2700,1000mi",lang="en",cainfo="cacert.pem")
c4 = searchTwitter("#oralcancer", n=200, geocode="13.0839,80.2700,1000mi",lang="en",cainfo="cacert.pem")
c5 = searchTwitter("#lymphoma", n=200, geocode="13.0839,80.2700,1000mi",lang="en",cainfo="cacert.pem")
c6 = searchTwitter("#cancer research", n=200, geocode="13.0839,80.2700,1000mi",lang="en",cainfo="cacert.pem")
c7 = searchTwitter("#cancer treatment", n=200, geocode="13.0839,80.2700,1000mi",lang="en",cainfo="cacert.pem")
c8 = searchTwitter("#cancer patient", n=200, geocode="13.0839,80.2700,1000mi",lang="en",cainfo="cacert.pem")
m9 = searchTwitter("#cancer treatment", n=200, geocode="18.9750,72.8258,1000mi",lang="en",cainfo="cacert.pem")
m10 = searchTwitter("#cancer patient", n=200, geocode="18.9750,72.8258,1000mi",lang="en",cainfo="cacert.pem")
m11 = searchTwitter("#cancer help", n=200, geocode="18.9750,72.8258,1000mi",lang="en",cainfo="cacert.pem")
c11 = searchTwitter("#cancer help", n=200, geocode="13.0839,80.2700,1000mi",lang="en",cainfo="cacert.pem")
m3 = searchTwitter("#liver cancer", n=200, geocode="18.9750,72.8258,1000mi",lang="en",cainfo="cacert.pem")
c9 = searchTwitter("#liver cancer", n=200, geocode="13.0839,80.2700,1000mi",lang="en",cainfo="cacert.pem")
chemo1 = searchTwitter("#chemotherapy", n=1500,lang="en",cainfo="cacert.pem")
lon1= searchTwitter("#breastcancer",geocode="51.5072,0.1275,1000mi", n=1500, lang="en", cainfo="cacert.pem")
lon2= searchTwitter("#leukemia",geocode="51.5072,0.1275,1000mi", n=1500, lang="en", cainfo="cacert.pem")
lon3= searchTwitter("#lungcancer",geocode="51.5072,0.1275,1000mi", n=1500, lang="en", cainfo="cacert.pem")
lon4= searchTwitter("#lymphoma",geocode="51.5072,0.1275,1000mi", n=1500, lang="en", cainfo="cacert.pem")
lon5= searchTwitter("#tumor",geocode="51.5072,0.1275,1000mi", n=1500, lang="en", cainfo="cacert.pem")
lon6= searchTwitter("#cervicalcancer",geocode="51.5072,0.1275,1000mi", n=1500, lang="en", cainfo="cacert.pem")
lon7= searchTwitter("#prostatecancer",geocode="51.5072,0.1275,1000mi", n=1500, lang="en", cainfo="cacert.pem")
lon8= searchTwitter("#cancerresearch",geocode="51.5072,0.1275,1000mi", n=1500, lang="en", cainfo="cacert.pem")
lon9= searchTwitter("#bladder cancer",geocode="51.5072,0.1275,1000mi", n=1500, lang="en", cainfo="cacert.pem")
lon10= searchTwitter("#cancer treatment",geocode="51.5072,0.1275,1000mi", n=1500, lang="en", cainfo="cacert.pem")
lon11= searchTwitter("#cancer patient",geocode="51.5072,0.1275,1000mi", n=1500, lang="en", cainfo="cacert.pem")
lon12= searchTwitter("#cancer help",geocode="51.5072,0.1275,1000mi", n=1500, lang="en", cainfo="cacert.pem")
lon13= searchTwitter("#chemotherapy", n=1500, lang="en", cainfo="cacert.pem")
#dataset
india=c(c1,c11,c2,c3,c4,c5,c6,c7,c8,c9,m1,m10,m11,m2,m3,m4,m5,m6,m7,m8,m9,chemo1)
uk=c(lon1,lon2,lon3,lon4,lon5,lon6,lon7,lon8,lon9,lon10,lon11,lon12,lon13)
#data frame
india.df <- do.call("rbind", lapply(india, as.data.frame))
uk.df <- do.call("rbind", lapply(uk, as.data.frame))
#corpus and text cleaning
india.corpus <- Corpus(VectorSource(india.df$text))
india.corpus <- tm_map(india.corpus, content_transformer(tolower))
india.corpus <- tm_map(india.corpus, content_transformer(removePunctuation))
india.corpus <- tm_map(india.corpus, content_transformer(removeNumbers))
removeURL <- function(x) gsub("http[[:alnum:]]*", "", x)
india.corpus <- tm_map(india.corpus, content_transformer(removeURL))
myStopwords <- c(stopwords("english"), "available","via","and","or","about","heres","amp","nci","ncjs","et","ncis","can","according","associated","butt","buynowgt","highly","know","one","polls","side","today","use","wearnig","back","lady","new","min","r","stats","whoops","every","take","tweet","glaring","miss","million","us","screening","must","min","varadhkrish")
india.corpus <- tm_map(india.corpus, removeWords, myStopwords)
uk.Corpus <- Corpus(VectorSource(uk.df$text))
uk.Corpus <- tm_map(uk.Corpus, content_transformer(tolower))
uk.Corpus <- tm_map(uk.Corpus, content_transformer(removePunctuation))
uk.Corpus <- tm_map(uk.Corpus, content_transformer(removeNumbers))
removeURL <- function(x) gsub("http[[:alnum:]]*", "", x)
uk.Corpus <- tm_map(uk.Corpus, content_transformer(removeURL))
myStopwords <- c(stopwords("english"), "available","via","and","or","about","heres","amp","nci","ncjs","et","ncis","can","lady","tells","wef","w","wearing","turned","driving","tools","side","personalize")
uk.Corpus <- tm_map(uk.Corpus, removeWords, myStopwords)
#TDM
india.tdm <- TermDocumentMatrix(india.corpus, control=list(wordLengths=c(1,Inf)))
uk.tdm <- TermDocumentMatrix(uk.Corpus, control=list(wordLengths=c(1,Inf)))
#inspect(uk.tdm[,])
#frequency bar plot
findFreqTerms(india.tdm, lowfreq=15)
india.tf <- rowSums(as.matrix(india.tdm))
india.tf <- subset(india.tf, india.tf>=15)
barplot(india.tf, las=2)
findFreqTerms(uk.tdm, lowfreq=20)
uk.tf <- rowSums(as.matrix(uk.tdm))
uk.tf <- subset(uk.tf, uk.tf>=20)
barplot(uk.tf, las=2)
#association
findAssocs(india.tdm,"cervicalcancer", 0.25)
findAssocs(uk.tdm, "lungcancer", 0.25)
#word cloud
wordFreq <- sort(rowSums(as.matrix(india.tdm)), decreasing=TRUE)
set.seed(375)
wordcloud(words=names(wordFreq), freq=wordFreq, min.freq=5, random.order=F,colors=brewer.pal(8,"Dark2"),random.color=TRUE)
wordFreq <- sort(rowSums(as.matrix(uk.tdm)), decreasing=TRUE)
#set.seed(375)
wordcloud(words=names(wordFreq), freq=wordFreq, min.freq=2, random.order=F,colors=brewer.pal(8, "Dark2"),random.color= TRUE)
#hierarchical clustering
india.tdm2 <- removeSparseTerms(india.tdm, sparse=0.95)
india.m2 <- as.matrix(india.tdm2)
india.distMatrix <- dist(scale(india.m2))
fit <- hclust(india.distMatrix, method="ward.D")
plot(fit)
rect.hclust(fit, k=5)
(groups <- cutree(fit, k=5))
uk.tdm2 <- removeSparseTerms(uk.tdm, sparse=0.95)
uk.m2 <- as.matrix(uk.tdm2)
uk.distMatrix <- dist(scale(uk.m2))
fit <- hclust(uk.distMatrix, method="ward.D")
plot(fit)
rect.hclust(fit, k=5)
(groups <- cutree(fit, k=5))
#k-means clustering
india.m3 <- t(india.m2)
set.seed(122)
k <- 5
india.kmeansResult <- kmeans(india.m3, k)
round(india.kmeansResult$centers, digits=3)
for (i in 1:k) {
cat(paste("cluster ", i, ": ", sep=""))
s <- sort(india.kmeansResult$centers[i,], decreasing=T)
cat(names(s)[1:3], "\n")
}
uk.m3 <- t(uk.m2)
set.seed(122)
k <- 5
uk.kmeansResult <- kmeans(uk.m3, k)
round(uk.kmeansResult$centers, digits=3)
for (i in 1:k) {
cat(paste("cluster ", i, ": ", sep=""))
s <- sort(uk.kmeansResult$centers[i,], decreasing=T)
cat(names(s)[1:3], "\n")
}
#sentiment analysis
india.text = laply(india, function(t) t$getText() )
india.text = gsub("(RT|via)((?:\\b\\W*@\\w+)+)", "", india.text)
india.text = gsub("@\\w+", "", india.text)
india.text = gsub("[[:punct:]]", "", india.text)
india.text = gsub("[[:digit:]]", "", india.text)
india.text = gsub("http\\w+", "", india.text)
india.text = gsub("[ \t]{2,}", "", india.text)
india.text = gsub("^\\s+|\\s+$", "", india.text)
try.error = function(x)
{
# create missing value
y = NA
# tryCatch error
try_error = tryCatch(tolower(x), error=function(e) e)
# if not an error
if (!inherits(try_error, "error"))
y = tolower(x)
# result
return(y)
}
india.text = sapply(india.text, try.error)
india.text = india.text[!is.na(india.text)]
names(india.text) = NULL
class_emo = classify_emotion(india.text, algorithm="bayes", prior=1.0)
emotion = class_emo[,7]
emotion[is.na(emotion)] = "unknown"
class_pol = classify_polarity(india.text, algorithm="bayes")
polarity = class_pol[,4]
sent_df = data.frame(text=india.text, emotion=emotion, polarity=polarity,stringsAsFactors=FALSE)
sent_df = within(sent_df, emotion <- factor(emotion, levels=names(sort(table(emotion), decreasing=TRUE))))
ggplot(sent_df, aes(x=emotion)) + geom_bar(aes(y=..count.., fill=emotion))+scale_fill_brewer(palette="Dark2") + labs(x="emotion categories", y="number of tweets") + ggtitle("Sentiment Analysis of Tweets about Cancer\n(classification by emotion)")
ggplot(sent_df, aes(x=polarity)) + geom_bar(aes(y=..count.., fill=polarity)) + scale_fill_brewer(palette="RdGy") + labs(x="polarity categories", y="number of tweets") + ggtitle("Sentiment Analysis of Tweets about Cancer\n(classification by polarity)")
uk.text = laply(uk, function(t) t$getText() )
uk.text = gsub("(RT|via)((?:\\b\\W*@\\w+)+)", "", uk.text)
uk.text = gsub("@\\w+", "", uk.text)
uk.text = gsub("[[:punct:]]", "", uk.text)
uk.text = gsub("[[:digit:]]", "", uk.text)
uk.text = gsub("http\\w+", "", uk.text)
uk.text = gsub("[ \t]{2,}", "", uk.text)
uk.text = gsub("^\\s+|\\s+$", "", uk.text)
try.error = function(x)
{
# create missing value
y = NA
# tryCatch error
try_error = tryCatch(tolower(x), error=function(e) e)
# if not an error
if (!inherits(try_error, "error"))
y = tolower(x)
# result
return(y)
}
uk.text = sapply(uk.text, try.error)
uk.text = uk.text[!is.na(uk.text)]
names(uk.text) = NULL
class_emo = classify_emotion(uk.text, algorithm="bayes", prior=1.0)
emotion = class_emo[,7]
emotion[is.na(emotion)] = "unknown"
class_pol = classify_polarity(uk.text, algorithm="bayes")
polarity = class_pol[,4]
sent_df = data.frame(text=uk.text, emotion=emotion, polarity=polarity,stringsAsFactors=FALSE)
sent_df = within(sent_df, emotion <- factor(emotion, levels=names(sort(table(emotion), decreasing=TRUE))))
ggplot(sent_df, aes(x=emotion)) + geom_bar(aes(y=..count.., fill=emotion))+scale_fill_brewer(palette="Dark2") + labs(x="emotion categories", y="number of tweets") + ggtitle("Sentiment Analysis of Tweets about Cancer\n(classification by emotion)")
ggplot(sent_df, aes(x=polarity)) + geom_bar(aes(y=..count.., fill=polarity)) + scale_fill_brewer(palette="RdGy") + labs(x="polarity categories", y="number of tweets") + ggtitle("Sentiment Analysis of Tweets about Cancer\n(classification by polarity)")
#topic modelling LDA
india.dtm <- as.DocumentTermMatrix(india.tdm)
india.lda <- LDA(india.dtm, k = 8)
term <- terms(india.lda, 4)
term <- apply(term, MARGIN = 2, paste, collapse = ", ")
term
topic <- topics(india.lda, 1)
topics <- data.frame(date=as.IDate(india.df$created), topic)
qplot(date, ..count.., data=topics, geom="density",fill=term[topic], position="stack")
uk.dtm <- as.DocumentTermMatrix(uk.tdm)
uk.lda <- LDA(uk.dtm, k = 8)
term <- terms(uk.lda, 4)
term <- apply(term, MARGIN = 2, paste, collapse = ", ")
term
topic <- topics(uk.lda, 1)
topics <- data.frame(date=as.IDate(uk.df$created), topic)
qplot(date, ..count.., data=topics, geom="density",fill=term[topic], position="stack")
|
# RScript that aggregates reports from MiXCR's alignment tool
#
# At this point the script simply accumulates results, but it'd be easy to add
# some visualization, analysis, etc. once the data is aggregated
## Get command-line arguments
### Load dependencies
#.libPaths("/home/exacloud/gscratch/CoussensLab/howellsf/R-4.0.2/library")
library(data.table)
arguments <- commandArgs(trailingOnly=TRUE);
alignDir_v <- arguments[1]
assembleDir_v <- arguments[2]
outDir_v <- arguments[3]
## Get files
alignFiles_v <- list.files(alignDir_v)
assembleFiles_v <- list.files(assembleDir_v)
## Sort files
alignFiles_v <- alignFiles_v[order(as.numeric(gsub("^S|_align.*", "", alignFiles_v)))]
assembleFiles_v <- assembleFiles_v[order(as.numeric(gsub("^S|_assemble.*", "", assembleFiles_v)))]
## Get sample numbers
alignNumbers_v <- gsub("_.*", "", alignFiles_v)
assembleNumbers_v <- gsub("_.*", "", assembleFiles_v)
## Make sure they match
mismatch_v <- which(alignNumbers_v != assembleNumbers_v)
if (length(mismatch_v) > 0) stop("Mismtach report files")
## Make data.frame
output_dt <- data.table(Sample = alignNumbers_v)
output_dt$Input.Reads <- numeric()
output_dt$Aligned.Reads <- numeric()
output_dt$Total.Clones <- numeric()
output_dt$Reads.In.Clones <- numeric()
## Iterate over files and extract info
for (i in 1:length(alignFiles_v)){
## Get files
currAlign_v <- alignFiles_v[i]
currAssemble_v <- assembleFiles_v[i]
## Get data
currAlign_data <- readLines(file.path(alignDir_v, currAlign_v))
currAssemble_data <- readLines(file.path(assembleDir_v, currAssemble_v))
## Get Input reads
currInputReads_v <- grep("Total sequencing reads", currAlign_data, value = T)
currInputReads_v <- as.numeric(gsub("^.*: ", "", currInputReads_v))
## Get Aligned Reads
currAlignedReads_v <- grep("Successfully aligned reads", currAlign_data, value = T)
currAlignedPct_v <- as.numeric(gsub("^.*\\(|%\\)$", "", currAlignedReads_v))
currAlignedReads_v <- as.numeric(gsub("^.*: | \\(.*$", "", currAlignedReads_v))
## Get Clones
currClones_v <- grep("Final clonotype count", currAssemble_data, value = T)
currClones_v <- as.numeric(gsub(".*: ", "", currClones_v))
## Get reads used in clones
currReadsInClones_v <- grep("Reads used in clonotypes, percent of total", currAssemble_data, value = T)
currReadsInClones_v <- as.numeric(gsub(".*: | \\(.*$", "", currReadsInClones_v))
## Update data.table
output_dt[i, "Input.Reads" := currInputReads_v]
output_dt[i, "Aligned.Reads" := currAlignedReads_v]
output_dt[i, "Total.Clones" := currClones_v]
output_dt[i, "Reads.In.Clones" := currReadsInClones_v]
}
### Write output
outName_v <- file.path(outDir_v, "mixcr.QC.summary.txt")
write.table(output_dt, outName_v, quote = F, sep = '\t', row.names = F)
|
/50_QC/mixcr.rnaseq.QC.R
|
no_license
|
CoussensLabOHSU/tcr-seq_pipeline
|
R
| false
| false
| 2,818
|
r
|
# RScript that aggregates reports from MiXCR's alignment tool
#
# At this point the script simply accumulates results, but it'd be easy to add
# some visualization, analysis, etc. once the data is aggregated
## Get command-line arguments
### Load dependencies
#.libPaths("/home/exacloud/gscratch/CoussensLab/howellsf/R-4.0.2/library")
library(data.table)
arguments <- commandArgs(trailingOnly=TRUE);
alignDir_v <- arguments[1]
assembleDir_v <- arguments[2]
outDir_v <- arguments[3]
## Get files
alignFiles_v <- list.files(alignDir_v)
assembleFiles_v <- list.files(assembleDir_v)
## Sort files
alignFiles_v <- alignFiles_v[order(as.numeric(gsub("^S|_align.*", "", alignFiles_v)))]
assembleFiles_v <- assembleFiles_v[order(as.numeric(gsub("^S|_assemble.*", "", assembleFiles_v)))]
## Get sample numbers
alignNumbers_v <- gsub("_.*", "", alignFiles_v)
assembleNumbers_v <- gsub("_.*", "", assembleFiles_v)
## Make sure they match
mismatch_v <- which(alignNumbers_v != assembleNumbers_v)
if (length(mismatch_v) > 0) stop("Mismtach report files")
## Make data.frame
output_dt <- data.table(Sample = alignNumbers_v)
output_dt$Input.Reads <- numeric()
output_dt$Aligned.Reads <- numeric()
output_dt$Total.Clones <- numeric()
output_dt$Reads.In.Clones <- numeric()
## Iterate over files and extract info
for (i in 1:length(alignFiles_v)){
## Get files
currAlign_v <- alignFiles_v[i]
currAssemble_v <- assembleFiles_v[i]
## Get data
currAlign_data <- readLines(file.path(alignDir_v, currAlign_v))
currAssemble_data <- readLines(file.path(assembleDir_v, currAssemble_v))
## Get Input reads
currInputReads_v <- grep("Total sequencing reads", currAlign_data, value = T)
currInputReads_v <- as.numeric(gsub("^.*: ", "", currInputReads_v))
## Get Aligned Reads
currAlignedReads_v <- grep("Successfully aligned reads", currAlign_data, value = T)
currAlignedPct_v <- as.numeric(gsub("^.*\\(|%\\)$", "", currAlignedReads_v))
currAlignedReads_v <- as.numeric(gsub("^.*: | \\(.*$", "", currAlignedReads_v))
## Get Clones
currClones_v <- grep("Final clonotype count", currAssemble_data, value = T)
currClones_v <- as.numeric(gsub(".*: ", "", currClones_v))
## Get reads used in clones
currReadsInClones_v <- grep("Reads used in clonotypes, percent of total", currAssemble_data, value = T)
currReadsInClones_v <- as.numeric(gsub(".*: | \\(.*$", "", currReadsInClones_v))
## Update data.table
output_dt[i, "Input.Reads" := currInputReads_v]
output_dt[i, "Aligned.Reads" := currAlignedReads_v]
output_dt[i, "Total.Clones" := currClones_v]
output_dt[i, "Reads.In.Clones" := currReadsInClones_v]
}
### Write output
outName_v <- file.path(outDir_v, "mixcr.QC.summary.txt")
write.table(output_dt, outName_v, quote = F, sep = '\t', row.names = F)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pedotransfer.R
\name{ksat}
\alias{ksat}
\title{Saturated hydraulic conductivity, including gravel effects.}
\usage{
ksat(sand, clay, soc, DF = 1, gravel = 0)
}
\arguments{
\item{sand}{Fraction of sand}
\item{clay}{Fraction of clay}
\item{soc}{Soil organic matter percent}
\item{DF}{Density factor between 0.9 and 1.3, normal (default) at 1}
\item{gravel}{Gravel percent by weight (0 by default)}
}
\description{
Saturated hydraulic conductivity, including gravel effects.
}
\keyword{internal}
|
/man/ksat.Rd
|
no_license
|
grahamjeffries/rcropmod
|
R
| false
| true
| 576
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pedotransfer.R
\name{ksat}
\alias{ksat}
\title{Saturated hydraulic conductivity, including gravel effects.}
\usage{
ksat(sand, clay, soc, DF = 1, gravel = 0)
}
\arguments{
\item{sand}{Fraction of sand}
\item{clay}{Fraction of clay}
\item{soc}{Soil organic matter percent}
\item{DF}{Density factor between 0.9 and 1.3, normal (default) at 1}
\item{gravel}{Gravel percent by weight (0 by default)}
}
\description{
Saturated hydraulic conductivity, including gravel effects.
}
\keyword{internal}
|
# Misc methods
get_color_hexa <- function(n) {
hues = seq(15, 375, length = n + 1)
hcl(h = hues, l = 65, c = 100)[1:n]
}
|
/R/Misc_methods.R
|
no_license
|
DeprezM/SCsim
|
R
| false
| false
| 133
|
r
|
# Misc methods
get_color_hexa <- function(n) {
hues = seq(15, 375, length = n + 1)
hcl(h = hues, l = 65, c = 100)[1:n]
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/WebApiTools.R
\name{getPriorityVocabKey}
\alias{getPriorityVocabKey}
\title{Get Priority Vocab Source Key}
\usage{
getPriorityVocabKey(baseUrl)
}
\arguments{
\item{baseUrl}{The base URL for the WebApi instance, for example:
"http://api.ohdsi.org:80/WebAPI".}
}
\value{
A string with the source key of the default OMOP Vocab in Atlas.
}
\description{
Get Priority Vocab Source Key
}
\details{
Obtains the source key of the default OMOP Vocab in Atlas.
}
|
/man/getPriorityVocabKey.Rd
|
permissive
|
dikshya5119/OhdsiRTools
|
R
| false
| true
| 531
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/WebApiTools.R
\name{getPriorityVocabKey}
\alias{getPriorityVocabKey}
\title{Get Priority Vocab Source Key}
\usage{
getPriorityVocabKey(baseUrl)
}
\arguments{
\item{baseUrl}{The base URL for the WebApi instance, for example:
"http://api.ohdsi.org:80/WebAPI".}
}
\value{
A string with the source key of the default OMOP Vocab in Atlas.
}
\description{
Get Priority Vocab Source Key
}
\details{
Obtains the source key of the default OMOP Vocab in Atlas.
}
|
#' Splits a composite figure that contains multiple plots.
#'
#' Automatically detects divisions among multiple plots found within a single
#' figure image file. It then uses these divisions to split the image into
#' multiple image files; each containing only a single X-Y plot. Currently only
#' works on composite figures that have a matrix-style presentation where each
#' sub-plot has the same size.
#'
#' @param file The file name and location of a composite figure. Prompts
#' for file name if none is explicitly called.
#' @param binary_threshold A proportion from zero to one designating the
#' gray-scale threshold to convert pixels into black or white. Pixel
#' intensities below the proportion will be converted to black, and those
#' above white.
#' @param space_sensitivity_X A proportion ranging from zero to one that
#' designates the size of the separation among sub-plots along the X-axis
#' relative to the largest empty space detected in the figure image. As
#' space_sensitivity_X approaches 1, finer empty spaces (e.g., empty spaces
#' found in between plot captions and the axis line) will be treated as plot
#' divisions.
#' @param space_sensitivity_Y A proportion ranging from zero to one that
#' designates the size of the seperation among sub-plots along the Y-axis
#' relative to the largest empty space detected in the figure image. As
#' space_sensitivity_Y approaches 1, finer empty spaces (e.g., empty spaces
#' found in between plot captions and the axis line) will be treated as plot
#' divisions.
#' @param border_buffer An integer value designating the amount of empty space
#' around the figure image that should be ignored. As the number increases,
#' more blank space near the image's edge will be ignored.
#' @param guess_limit An integer value designating the number of guesses for
#' within a figure image. The default value designates the top 10 guesses of
#' divisions. Increase this number if there are more than 6 subplots per axis.
#' @param ignoreX When \code{TRUE}, ignores detection of sub-plots along the
#' X-axis.
#' @param ignoreY When \code{TRUE}, ignores detection of sub-plots along the
#' Y-axis.
#' @param quiet When \code{TRUE}, does not print to console the saved file names.
#'
#' @return The number of sub-plots saved to file.
#'
#' @importFrom EBImage readImage
#' @export
figure_splitPlot <- function (file = file.choose(),
binary_threshold = 0.6,
space_sensitivity_X = 0.4,
space_sensitivity_Y = 0.6,
border_buffer = 5,
guess_limit = 10,
ignoreX = FALSE,
ignoreY = FALSE,
quiet = FALSE) {
theFigure <- readImage(file)
# load figure and convert to binary (searchable) format
aBinaryFigure <- figure_transformToBinary(theFigure,
binary_threshold)
# get image dimensions & best guesses for splits
xDim <- dim(aBinaryFigure)[1]
if(ignoreX == TRUE) {
theXtemp <- NULL
}
else {
theXtemp <- figureCutCoord(theFig = aBinaryFigure,
axis = "X",
sensitivity = space_sensitivity_X,
border = border_buffer,
top = guess_limit)
}
theX <- c(0, theXtemp, xDim)
yDim <- dim(aBinaryFigure)[2];
if(ignoreY == TRUE) {
theYtemp <- NULL
}
else {
theYtemp <- figureCutCoord(theFig = aBinaryFigure,
axis = "Y",
sensitivity = space_sensitivity_Y,
border = border_buffer,
top = guess_limit)
}
theY <- c(0, theYtemp, yDim)
# save splits as separate images
countFig <- 1
totalFigs <- (length(theX) - 1) * (length(theY) - 1)
if(totalFigs > 1) {
for (i in 1:(length(theX) - 1)) {
for (j in 1:(length(theY) - 1)) {
croppedFig <- theFigure[theX[i]:theX[i + 1], theY[j]:theY[j + 1], ]
newFileName <- paste0(file_path_sans_ext(file), "_subPlot_", countFig, "_of_", totalFigs, ".jpg")
if(quiet != TRUE) print(newFileName)
figure_write(croppedFig, file = newFileName)
countFig <- countFig + 1
}
}
} else {
.metagearPROBLEM("warning", "sub-plots were not detected in this figure image")
}
return(totalFigs)
}
|
/R/figure_split.R
|
no_license
|
Anj-prog/metagear
|
R
| false
| false
| 4,599
|
r
|
#' Splits a composite figure that contains multiple plots.
#'
#' Automatically detects divisions among multiple plots found within a single
#' figure image file. It then uses these divisions to split the image into
#' multiple image files; each containing only a single X-Y plot. Currently only
#' works on composite figures that have a matrix-style presentation where each
#' sub-plot has the same size.
#'
#' @param file The file name and location of a composite figure. Prompts
#' for file name if none is explicitly called.
#' @param binary_threshold A proportion from zero to one designating the
#' gray-scale threshold to convert pixels into black or white. Pixel
#' intensities below the proportion will be converted to black, and those
#' above white.
#' @param space_sensitivity_X A proportion ranging from zero to one that
#' designates the size of the separation among sub-plots along the X-axis
#' relative to the largest empty space detected in the figure image. As
#' space_sensitivity_X approaches 1, finer empty spaces (e.g., empty spaces
#' found in between plot captions and the axis line) will be treated as plot
#' divisions.
#' @param space_sensitivity_Y A proportion ranging from zero to one that
#' designates the size of the seperation among sub-plots along the Y-axis
#' relative to the largest empty space detected in the figure image. As
#' space_sensitivity_Y approaches 1, finer empty spaces (e.g., empty spaces
#' found in between plot captions and the axis line) will be treated as plot
#' divisions.
#' @param border_buffer An integer value designating the amount of empty space
#' around the figure image that should be ignored. As the number increases,
#' more blank space near the image's edge will be ignored.
#' @param guess_limit An integer value designating the number of guesses for
#' within a figure image. The default value designates the top 10 guesses of
#' divisions. Increase this number if there are more than 6 subplots per axis.
#' @param ignoreX When \code{TRUE}, ignores detection of sub-plots along the
#' X-axis.
#' @param ignoreY When \code{TRUE}, ignores detection of sub-plots along the
#' Y-axis.
#' @param quiet When \code{TRUE}, does not print to console the saved file names.
#'
#' @return The number of sub-plots saved to file.
#'
#' @importFrom EBImage readImage
#' @export
figure_splitPlot <- function (file = file.choose(),
binary_threshold = 0.6,
space_sensitivity_X = 0.4,
space_sensitivity_Y = 0.6,
border_buffer = 5,
guess_limit = 10,
ignoreX = FALSE,
ignoreY = FALSE,
quiet = FALSE) {
theFigure <- readImage(file)
# load figure and convert to binary (searchable) format
aBinaryFigure <- figure_transformToBinary(theFigure,
binary_threshold)
# get image dimensions & best guesses for splits
xDim <- dim(aBinaryFigure)[1]
if(ignoreX == TRUE) {
theXtemp <- NULL
}
else {
theXtemp <- figureCutCoord(theFig = aBinaryFigure,
axis = "X",
sensitivity = space_sensitivity_X,
border = border_buffer,
top = guess_limit)
}
theX <- c(0, theXtemp, xDim)
yDim <- dim(aBinaryFigure)[2];
if(ignoreY == TRUE) {
theYtemp <- NULL
}
else {
theYtemp <- figureCutCoord(theFig = aBinaryFigure,
axis = "Y",
sensitivity = space_sensitivity_Y,
border = border_buffer,
top = guess_limit)
}
theY <- c(0, theYtemp, yDim)
# save splits as separate images
countFig <- 1
totalFigs <- (length(theX) - 1) * (length(theY) - 1)
if(totalFigs > 1) {
for (i in 1:(length(theX) - 1)) {
for (j in 1:(length(theY) - 1)) {
croppedFig <- theFigure[theX[i]:theX[i + 1], theY[j]:theY[j + 1], ]
newFileName <- paste0(file_path_sans_ext(file), "_subPlot_", countFig, "_of_", totalFigs, ".jpg")
if(quiet != TRUE) print(newFileName)
figure_write(croppedFig, file = newFileName)
countFig <- countFig + 1
}
}
} else {
.metagearPROBLEM("warning", "sub-plots were not detected in this figure image")
}
return(totalFigs)
}
|
Sys.setlocale(category = "LC_ALL", locale = "Polish")
setwd('C:\\Users\\anna.ojdowska\\Google Drive\\Praca magisterska\\JMeter\\Results\\Processed\\SpringCloud');
route = read.csv('route.csv');
library(ggplot2)
library(reshape)
library(grid)
route$Spec = factor(route$Spec,levels=unique(route$Spec))
p = ggplot(route, aes(fill=variable, y=value, x=factor(variable)))
p = p + geom_boxplot() + facet_grid(~Spec, scales="free", space = "free")
p=p+ylab('Przepustowość [l. zapytań/s]')+xlab("")
p=p+labs(fill = "Architektura",
title='Testy na Azure Spring Cloud',
subtitle='100 zapytań o zapytań o najkrótszą trasę (Route)')
p=p+theme(plot.title = element_text(hjust = 0.5, size=14),
plot.subtitle = element_text(hjust = 0.5, size = 13))
print(p)
|
/AzureSpringCloud/route_plot.R
|
no_license
|
annaojdowska/mono-vs-ms-results
|
R
| false
| false
| 799
|
r
|
Sys.setlocale(category = "LC_ALL", locale = "Polish")
setwd('C:\\Users\\anna.ojdowska\\Google Drive\\Praca magisterska\\JMeter\\Results\\Processed\\SpringCloud');
route = read.csv('route.csv');
library(ggplot2)
library(reshape)
library(grid)
route$Spec = factor(route$Spec,levels=unique(route$Spec))
p = ggplot(route, aes(fill=variable, y=value, x=factor(variable)))
p = p + geom_boxplot() + facet_grid(~Spec, scales="free", space = "free")
p=p+ylab('Przepustowość [l. zapytań/s]')+xlab("")
p=p+labs(fill = "Architektura",
title='Testy na Azure Spring Cloud',
subtitle='100 zapytań o zapytań o najkrótszą trasę (Route)')
p=p+theme(plot.title = element_text(hjust = 0.5, size=14),
plot.subtitle = element_text(hjust = 0.5, size = 13))
print(p)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/modeltime-calibrate.R
\name{modeltime_calibrate}
\alias{modeltime_calibrate}
\title{Preparation for forecasting}
\usage{
modeltime_calibrate(object, new_data, id = NULL, quiet = TRUE, ...)
}
\arguments{
\item{object}{A fitted model object that is either:
\enumerate{
\item A modeltime table that has been created using \code{\link[=modeltime_table]{modeltime_table()}}
\item A workflow that has been fit by \code{\link[=fit.workflow]{fit.workflow()}} or
\item A parsnip model that has been fit using \code{\link[=fit.model_spec]{fit.model_spec()}}
}}
\item{new_data}{A test data set \code{tibble} containing future information (timestamps and actual values).}
\item{id}{A quoted column name containing an identifier column identifying time series that are grouped.}
\item{quiet}{Hide errors (\code{TRUE}, the default), or display them as they occur?}
\item{...}{Additional arguments passed to \code{\link[=modeltime_forecast]{modeltime_forecast()}}.}
}
\value{
A Modeltime Table (\code{mdl_time_tbl}) with nested \code{.calibration_data} added
}
\description{
Calibration sets the stage for accuracy and forecast confidence
by computing predictions and residuals from out of sample data.
}
\details{
The results of calibration are used for:
\itemize{
\item \strong{Forecast Confidence Interval Estimation}: The out of sample residual data is used to calculate the
confidence interval. Refer to \code{\link[=modeltime_forecast]{modeltime_forecast()}}.
\item \strong{Accuracy Calculations:} The out of sample actual and prediction values are used to calculate
performance metrics. Refer to \code{\link[=modeltime_accuracy]{modeltime_accuracy()}}
}
The calibration steps include:
\enumerate{
\item If not a Modeltime Table, objects are converted to Modeltime Tables internally
\item Two Columns are added:
}
\itemize{
\item \code{.type}: Indicates the sample type. This is:
\itemize{
\item "Test" if predicted, or
\item "Fitted" if residuals were stored during modeling.
}
\item \code{.calibration_data}:
\itemize{
\item Contains a tibble with Timestamps, Actual Values, Predictions and Residuals
calculated from \code{new_data} (Test Data)
\item If \code{id} is provided, will contain a 5th column that is the identifier variable.
}
}
}
\examples{
library(tidyverse)
library(lubridate)
library(timetk)
library(parsnip)
library(rsample)
# Data
m750 <- m4_monthly \%>\% filter(id == "M750")
# Split Data 80/20
splits <- initial_time_split(m750, prop = 0.9)
# --- MODELS ---
# Model 1: auto_arima ----
model_fit_arima <- arima_reg() \%>\%
set_engine(engine = "auto_arima") \%>\%
fit(value ~ date, data = training(splits))
# ---- MODELTIME TABLE ----
models_tbl <- modeltime_table(
model_fit_arima
)
# ---- CALIBRATE ----
calibration_tbl <- models_tbl \%>\%
modeltime_calibrate(
new_data = testing(splits)
)
# ---- ACCURACY ----
calibration_tbl \%>\%
modeltime_accuracy()
# ---- FORECAST ----
calibration_tbl \%>\%
modeltime_forecast(
new_data = testing(splits),
actual_data = m750
)
}
|
/man/modeltime_calibrate.Rd
|
permissive
|
ggardiakos/modeltime
|
R
| false
| true
| 3,137
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/modeltime-calibrate.R
\name{modeltime_calibrate}
\alias{modeltime_calibrate}
\title{Preparation for forecasting}
\usage{
modeltime_calibrate(object, new_data, id = NULL, quiet = TRUE, ...)
}
\arguments{
\item{object}{A fitted model object that is either:
\enumerate{
\item A modeltime table that has been created using \code{\link[=modeltime_table]{modeltime_table()}}
\item A workflow that has been fit by \code{\link[=fit.workflow]{fit.workflow()}} or
\item A parsnip model that has been fit using \code{\link[=fit.model_spec]{fit.model_spec()}}
}}
\item{new_data}{A test data set \code{tibble} containing future information (timestamps and actual values).}
\item{id}{A quoted column name containing an identifier column identifying time series that are grouped.}
\item{quiet}{Hide errors (\code{TRUE}, the default), or display them as they occur?}
\item{...}{Additional arguments passed to \code{\link[=modeltime_forecast]{modeltime_forecast()}}.}
}
\value{
A Modeltime Table (\code{mdl_time_tbl}) with nested \code{.calibration_data} added
}
\description{
Calibration sets the stage for accuracy and forecast confidence
by computing predictions and residuals from out of sample data.
}
\details{
The results of calibration are used for:
\itemize{
\item \strong{Forecast Confidence Interval Estimation}: The out of sample residual data is used to calculate the
confidence interval. Refer to \code{\link[=modeltime_forecast]{modeltime_forecast()}}.
\item \strong{Accuracy Calculations:} The out of sample actual and prediction values are used to calculate
performance metrics. Refer to \code{\link[=modeltime_accuracy]{modeltime_accuracy()}}
}
The calibration steps include:
\enumerate{
\item If not a Modeltime Table, objects are converted to Modeltime Tables internally
\item Two Columns are added:
}
\itemize{
\item \code{.type}: Indicates the sample type. This is:
\itemize{
\item "Test" if predicted, or
\item "Fitted" if residuals were stored during modeling.
}
\item \code{.calibration_data}:
\itemize{
\item Contains a tibble with Timestamps, Actual Values, Predictions and Residuals
calculated from \code{new_data} (Test Data)
\item If \code{id} is provided, will contain a 5th column that is the identifier variable.
}
}
}
\examples{
library(tidyverse)
library(lubridate)
library(timetk)
library(parsnip)
library(rsample)
# Data
m750 <- m4_monthly \%>\% filter(id == "M750")
# Split Data 80/20
splits <- initial_time_split(m750, prop = 0.9)
# --- MODELS ---
# Model 1: auto_arima ----
model_fit_arima <- arima_reg() \%>\%
set_engine(engine = "auto_arima") \%>\%
fit(value ~ date, data = training(splits))
# ---- MODELTIME TABLE ----
models_tbl <- modeltime_table(
model_fit_arima
)
# ---- CALIBRATE ----
calibration_tbl <- models_tbl \%>\%
modeltime_calibrate(
new_data = testing(splits)
)
# ---- ACCURACY ----
calibration_tbl \%>\%
modeltime_accuracy()
# ---- FORECAST ----
calibration_tbl \%>\%
modeltime_forecast(
new_data = testing(splits),
actual_data = m750
)
}
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/cluster.R
\docType{methods}
\name{id,Cluster-method}
\alias{id,Cluster-method}
\title{Returns a cluster id}
\usage{
\S4method{id}{Cluster}(object)
}
\arguments{
\item{object}{a Cluster}
}
\value{
the id
}
\description{
Returns a cluster id
}
\author{
Jose A. Dianes
}
|
/vignettes/man/id-Cluster-method.Rd
|
no_license
|
gccong/ddiR-sirius
|
R
| false
| false
| 355
|
rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/cluster.R
\docType{methods}
\name{id,Cluster-method}
\alias{id,Cluster-method}
\title{Returns a cluster id}
\usage{
\S4method{id}{Cluster}(object)
}
\arguments{
\item{object}{a Cluster}
}
\value{
the id
}
\description{
Returns a cluster id
}
\author{
Jose A. Dianes
}
|
\name{fscaret}
\alias{fscaret}
\title{
feature selection caret
}
\description{
Main function for fast feature selection. It utilizes other functions as regPredImp or impCalc to obtain results in a list of data frames.
}
\usage{
fscaret(trainDF, testDF, installReqPckg = FALSE, preprocessData = FALSE,
with.labels = TRUE, classPred = FALSE, regPred = TRUE, skel_outfile = NULL,
impCalcMet = "RMSE&MSE", myTimeLimit = 24 * 60 * 60, Used.funcRegPred = NULL,
Used.funcClassPred = NULL, no.cores = NULL, method = "boot", returnResamp = "all",
missData=NULL, supress.output=FALSE, saveModel=FALSE, lvlScale=FALSE, ...)
}
\arguments{
\item{trainDF}{
Data frame of training data set, MISO (multiple input single output) type
}
\item{testDF}{
Data frame of testing data set, MISO (multiple input single output) type
}
\item{installReqPckg}{
If TRUE prior to calculations it installs all required packages, please be advised to be logged as root (admin) user
}
\item{preprocessData}{
If TRUE data preprocessing is performed prior to modeling
}
\item{with.labels}{
If TRUE header of the input files are read
}
\item{classPred}{
If TRUE classification models are applied. Please be advised that importance is scaled according to F-measure regardless impCalcMet settings.
}
\item{regPred}{
If TRUE regression models are applied
}
\item{skel_outfile}{
Skeleton output file, e.g. skel_outfile=c("_myoutput_")
}
\item{impCalcMet}{
Variable importance calculation scaling according to RMSE and MSE, for both please enter impCalcMet="RMSE&MSE"
}
\item{myTimeLimit}{
Time limit in seconds for single model development
}
\item{Used.funcRegPred}{
Vector of regression models to be used, for all available models please enter Used.funcRegPred="all"
}
\item{Used.funcClassPred}{
Vector of classification models to be used, for all available models please enter Used.funcClassPred="all"
}
\item{no.cores}{
Number of cores to be used for modeling, if NULL all available cores are used, should be numeric type or NULL
}
\item{method}{
Method passed to fitControl of caret package
}
\item{returnResamp}{
Returned resampling method passed to fitControl of caret package
}
\item{missData}{
Handling of missing data values. Possible values: "delRow" - delete observations with missing values, "delCol" - delete attributes with missing values, "meanCol" - replace missing values with column mean.
}
\item{supress.output}{
If TRUE output of modeling phase by caret functions are supressed. Only info which model is currently calculated and resulting variable importance.
}
\item{saveModel}{
Logical value [TRUE/FALSE] if trained model should be embedded in final model.
}
\item{lvlScale}{
Logical value [TRUE/FALSE] if additional scaling should be applied. For more information plase refer to impCalc().
}
\item{\dots}{
Additional arguments, preferably passed to fitControl of caret package
}
}
\value{
\item{$ModelPred}{List of outputs from caret model fitting}
\item{$VarImp}{Data frames of variable importance and corresponding trained models}
\item{$PPlabels}{Data frame of resulting preprocessed data set with original input numbers and names}
\item{$PPTrainDF}{Training data set after preprocessing}
\item{$PPTestDF}{Testing data set after preprocessing}
\item{$VarImp$model}{Trained models}
}
\references{
Kuhn M. (2008) Building Predictive Models in R Using the caret Package \emph{Journal of Statistical Software} \bold{28(5)} \url{http://www.jstatsoft.org/}.
}
\note{
Be advised when using fscaret function as it requires hard disk operations for saving fitted models and data frames. Files are written in R temp session folder, for more details see tempdir(), getwd() and setwd()
}
\author{
Jakub Szlek and Aleksander Mendyk
}
\examples{
if((Sys.info()['sysname'])!="SunOS"){
library(fscaret)
# Load data sets
data(dataset.train)
data(dataset.test)
requiredPackages <- c("R.utils", "gsubfn", "ipred", "caret", "parallel", "MASS")
if(.Platform$OS.type=="windows"){
myFirstRES <- fscaret(dataset.train, dataset.test, installReqPckg=FALSE,
preprocessData=FALSE, with.labels=TRUE, classPred=FALSE,
regPred=TRUE, skel_outfile=NULL,
impCalcMet="RMSE&MSE", myTimeLimit=4,
Used.funcRegPred=c("lm"), Used.funcClassPred=NULL,
no.cores=1, method="boot", returnResamp="all",
supress.output=TRUE,saveModel=FALSE)
} else {
myCores <- 2
myFirstRES <- fscaret(dataset.train, dataset.test, installReqPckg=FALSE,
preprocessData=FALSE, with.labels=TRUE, classPred=FALSE,
regPred=TRUE, skel_outfile=NULL,
impCalcMet="RMSE&MSE", myTimeLimit=4,
Used.funcRegPred=c("lm","ppr"), Used.funcClassPred=NULL,
no.cores=myCores, method="boot", returnResamp="all",
supress.output=TRUE,saveModel=FALSE)
}
# Results
myFirstRES
}
}
\keyword{methods}
\keyword{iteration}
\keyword{optimize}
\keyword{array}
|
/man/fscaret.Rd
|
no_license
|
cran/fscaret
|
R
| false
| false
| 5,022
|
rd
|
\name{fscaret}
\alias{fscaret}
\title{
feature selection caret
}
\description{
Main function for fast feature selection. It utilizes other functions as regPredImp or impCalc to obtain results in a list of data frames.
}
\usage{
fscaret(trainDF, testDF, installReqPckg = FALSE, preprocessData = FALSE,
with.labels = TRUE, classPred = FALSE, regPred = TRUE, skel_outfile = NULL,
impCalcMet = "RMSE&MSE", myTimeLimit = 24 * 60 * 60, Used.funcRegPred = NULL,
Used.funcClassPred = NULL, no.cores = NULL, method = "boot", returnResamp = "all",
missData=NULL, supress.output=FALSE, saveModel=FALSE, lvlScale=FALSE, ...)
}
\arguments{
\item{trainDF}{
Data frame of training data set, MISO (multiple input single output) type
}
\item{testDF}{
Data frame of testing data set, MISO (multiple input single output) type
}
\item{installReqPckg}{
If TRUE prior to calculations it installs all required packages, please be advised to be logged as root (admin) user
}
\item{preprocessData}{
If TRUE data preprocessing is performed prior to modeling
}
\item{with.labels}{
If TRUE header of the input files are read
}
\item{classPred}{
If TRUE classification models are applied. Please be advised that importance is scaled according to F-measure regardless impCalcMet settings.
}
\item{regPred}{
If TRUE regression models are applied
}
\item{skel_outfile}{
Skeleton output file, e.g. skel_outfile=c("_myoutput_")
}
\item{impCalcMet}{
Variable importance calculation scaling according to RMSE and MSE, for both please enter impCalcMet="RMSE&MSE"
}
\item{myTimeLimit}{
Time limit in seconds for single model development
}
\item{Used.funcRegPred}{
Vector of regression models to be used, for all available models please enter Used.funcRegPred="all"
}
\item{Used.funcClassPred}{
Vector of classification models to be used, for all available models please enter Used.funcClassPred="all"
}
\item{no.cores}{
Number of cores to be used for modeling, if NULL all available cores are used, should be numeric type or NULL
}
\item{method}{
Method passed to fitControl of caret package
}
\item{returnResamp}{
Returned resampling method passed to fitControl of caret package
}
\item{missData}{
Handling of missing data values. Possible values: "delRow" - delete observations with missing values, "delCol" - delete attributes with missing values, "meanCol" - replace missing values with column mean.
}
\item{supress.output}{
If TRUE output of modeling phase by caret functions are supressed. Only info which model is currently calculated and resulting variable importance.
}
\item{saveModel}{
Logical value [TRUE/FALSE] if trained model should be embedded in final model.
}
\item{lvlScale}{
Logical value [TRUE/FALSE] if additional scaling should be applied. For more information plase refer to impCalc().
}
\item{\dots}{
Additional arguments, preferably passed to fitControl of caret package
}
}
\value{
\item{$ModelPred}{List of outputs from caret model fitting}
\item{$VarImp}{Data frames of variable importance and corresponding trained models}
\item{$PPlabels}{Data frame of resulting preprocessed data set with original input numbers and names}
\item{$PPTrainDF}{Training data set after preprocessing}
\item{$PPTestDF}{Testing data set after preprocessing}
\item{$VarImp$model}{Trained models}
}
\references{
Kuhn M. (2008) Building Predictive Models in R Using the caret Package \emph{Journal of Statistical Software} \bold{28(5)} \url{http://www.jstatsoft.org/}.
}
\note{
Be advised when using fscaret function as it requires hard disk operations for saving fitted models and data frames. Files are written in R temp session folder, for more details see tempdir(), getwd() and setwd()
}
\author{
Jakub Szlek and Aleksander Mendyk
}
\examples{
if((Sys.info()['sysname'])!="SunOS"){
library(fscaret)
# Load data sets
data(dataset.train)
data(dataset.test)
requiredPackages <- c("R.utils", "gsubfn", "ipred", "caret", "parallel", "MASS")
if(.Platform$OS.type=="windows"){
myFirstRES <- fscaret(dataset.train, dataset.test, installReqPckg=FALSE,
preprocessData=FALSE, with.labels=TRUE, classPred=FALSE,
regPred=TRUE, skel_outfile=NULL,
impCalcMet="RMSE&MSE", myTimeLimit=4,
Used.funcRegPred=c("lm"), Used.funcClassPred=NULL,
no.cores=1, method="boot", returnResamp="all",
supress.output=TRUE,saveModel=FALSE)
} else {
myCores <- 2
myFirstRES <- fscaret(dataset.train, dataset.test, installReqPckg=FALSE,
preprocessData=FALSE, with.labels=TRUE, classPred=FALSE,
regPred=TRUE, skel_outfile=NULL,
impCalcMet="RMSE&MSE", myTimeLimit=4,
Used.funcRegPred=c("lm","ppr"), Used.funcClassPred=NULL,
no.cores=myCores, method="boot", returnResamp="all",
supress.output=TRUE,saveModel=FALSE)
}
# Results
myFirstRES
}
}
\keyword{methods}
\keyword{iteration}
\keyword{optimize}
\keyword{array}
|
library(testthat)
library(splines)
test_that("Check regular glm", {
n <- 100
set.seed(123)
nl_ds <- data.frame(
x = sample(seq(
from = 0,
to = pi * 3,
length.out = n
),
size = n,
replace = TRUE
),
sex = factor(sample(c("Male", "Female"),
size = n,
replace = TRUE
))
)
nl_ds$y <-
sin(nl_ds$x) * 2 +
(nl_ds$sex == "Male") * 1 +
rnorm(n = n, mean = 0, sd = .5)
l_ds <- nl_ds
l_ds$y <-
nl_ds$x * 2 +
(nl_ds$sex == "Male") * 1 +
rnorm(n = n, mean = 0, sd = .5)
vals <- sapply(2:7, function(x) {
AIC(glm(sprintf(
"y ~ ns(x, %d) + sex",
x
), data = nl_ds))
})
expect_equivalent(
AIC(addNonlinearity(glm(y ~ x + sex, data = nl_ds),
min_fn = AIC,
flex_param = 2:7,
variable = "x", spline_fn = "ns",
workers = FALSE
)),
min(vals)
)
expect_equivalent(
addNonlinearity(glm(y ~ x + sex, data = l_ds),
min_fn = AIC,
flex_param = 2:7,
variable = "x", spline_fn = "ns",
workers = FALSE
),
glm(y ~ x + sex, data = l_ds)
)
})
test_that("Check regular lm", {
n <- 100
set.seed(123)
nl_ds <- data.frame(
x = sample(seq(
from = 0,
to = pi * 3,
length.out = n
),
size = n,
replace = TRUE
),
sex = factor(sample(c("Male", "Female"),
size = n,
replace = TRUE
))
)
nl_ds$y <-
sin(nl_ds$x) * 2 +
(nl_ds$sex == "Male") * 1 +
rnorm(n = n, mean = 0, sd = .5)
l_ds <- nl_ds
l_ds$y <-
nl_ds$x * 2 +
(nl_ds$sex == "Male") * 1 +
rnorm(n = n, mean = 0, sd = .5)
vals <- sapply(2:7, function(x) {
AIC(lm(sprintf(
"y ~ ns(x, %d) + sex",
x
), data = nl_ds))
})
expect_equivalent(
AIC(addNonlinearity(lm(y ~ x + sex, data = nl_ds),
min_fn = AIC,
flex_param = 2:7,
variable = "x", spline_fn = "ns",
workers = FALSE
)),
min(vals)
)
expect_equivalent(
addNonlinearity(lm(y ~ x + sex, data = l_ds),
min_fn = AIC,
flex_param = 2:7,
variable = "x", spline_fn = "ns",
workers = FALSE
),
lm(y ~ x + sex, data = l_ds)
)
})
test_that("That rms-functions work", {
n <- 100
set.seed(123)
nl_ds <- data.frame(
x = sample(seq(
from = 0,
to = pi * 3,
length.out = n
),
size = n,
replace = TRUE
),
sex = factor(sample(c("Male", "Female"),
size = n,
replace = TRUE
))
)
nl_ds$y <-
sin(nl_ds$x) * 2 +
(nl_ds$sex == "Male") * 1 +
rnorm(n = n, mean = 0, sd = .5)
l_ds <- nl_ds
l_ds$y <-
nl_ds$x * 2 +
(nl_ds$sex == "Male") * 1 +
rnorm(n = n, mean = 0, sd = .5)
library(rms)
vals <- sapply(3:7, function(x) {
AIC(ols(as.formula(sprintf("y ~ rcs(x, %d) + sex", x)),
data = nl_ds
))
})
expect_equivalent(
AIC(addNonlinearity(ols(y ~ x + sex, data = nl_ds),
min_fn = AIC,
flex_param = 3:7,
variable = "x",
spline_fn = "rcs",
workers = FALSE
)),
min(vals)
)
expect_error(AIC(addNonlinearity(ols(y ~ x + sex, data = nl_ds),
min_fn = AIC,
flex_param = 3:7,
variable = "x",
spline_fn = "ns",
workers = FALSE
)))
expect_equivalent(
addNonlinearity(ols(y ~ x + sex, data = l_ds),
min_fn = AIC,
flex_param = 3:7,
variable = "x",
spline_fn = "rcs",
workers = FALSE
),
ols(y ~ x + sex, data = l_ds)
)
})
|
/tests/testthat/test-addNonlinearity.R
|
no_license
|
gforge/Greg
|
R
| false
| false
| 3,479
|
r
|
library(testthat)
library(splines)
test_that("Check regular glm", {
n <- 100
set.seed(123)
nl_ds <- data.frame(
x = sample(seq(
from = 0,
to = pi * 3,
length.out = n
),
size = n,
replace = TRUE
),
sex = factor(sample(c("Male", "Female"),
size = n,
replace = TRUE
))
)
nl_ds$y <-
sin(nl_ds$x) * 2 +
(nl_ds$sex == "Male") * 1 +
rnorm(n = n, mean = 0, sd = .5)
l_ds <- nl_ds
l_ds$y <-
nl_ds$x * 2 +
(nl_ds$sex == "Male") * 1 +
rnorm(n = n, mean = 0, sd = .5)
vals <- sapply(2:7, function(x) {
AIC(glm(sprintf(
"y ~ ns(x, %d) + sex",
x
), data = nl_ds))
})
expect_equivalent(
AIC(addNonlinearity(glm(y ~ x + sex, data = nl_ds),
min_fn = AIC,
flex_param = 2:7,
variable = "x", spline_fn = "ns",
workers = FALSE
)),
min(vals)
)
expect_equivalent(
addNonlinearity(glm(y ~ x + sex, data = l_ds),
min_fn = AIC,
flex_param = 2:7,
variable = "x", spline_fn = "ns",
workers = FALSE
),
glm(y ~ x + sex, data = l_ds)
)
})
test_that("Check regular lm", {
n <- 100
set.seed(123)
nl_ds <- data.frame(
x = sample(seq(
from = 0,
to = pi * 3,
length.out = n
),
size = n,
replace = TRUE
),
sex = factor(sample(c("Male", "Female"),
size = n,
replace = TRUE
))
)
nl_ds$y <-
sin(nl_ds$x) * 2 +
(nl_ds$sex == "Male") * 1 +
rnorm(n = n, mean = 0, sd = .5)
l_ds <- nl_ds
l_ds$y <-
nl_ds$x * 2 +
(nl_ds$sex == "Male") * 1 +
rnorm(n = n, mean = 0, sd = .5)
vals <- sapply(2:7, function(x) {
AIC(lm(sprintf(
"y ~ ns(x, %d) + sex",
x
), data = nl_ds))
})
expect_equivalent(
AIC(addNonlinearity(lm(y ~ x + sex, data = nl_ds),
min_fn = AIC,
flex_param = 2:7,
variable = "x", spline_fn = "ns",
workers = FALSE
)),
min(vals)
)
expect_equivalent(
addNonlinearity(lm(y ~ x + sex, data = l_ds),
min_fn = AIC,
flex_param = 2:7,
variable = "x", spline_fn = "ns",
workers = FALSE
),
lm(y ~ x + sex, data = l_ds)
)
})
test_that("That rms-functions work", {
n <- 100
set.seed(123)
nl_ds <- data.frame(
x = sample(seq(
from = 0,
to = pi * 3,
length.out = n
),
size = n,
replace = TRUE
),
sex = factor(sample(c("Male", "Female"),
size = n,
replace = TRUE
))
)
nl_ds$y <-
sin(nl_ds$x) * 2 +
(nl_ds$sex == "Male") * 1 +
rnorm(n = n, mean = 0, sd = .5)
l_ds <- nl_ds
l_ds$y <-
nl_ds$x * 2 +
(nl_ds$sex == "Male") * 1 +
rnorm(n = n, mean = 0, sd = .5)
library(rms)
vals <- sapply(3:7, function(x) {
AIC(ols(as.formula(sprintf("y ~ rcs(x, %d) + sex", x)),
data = nl_ds
))
})
expect_equivalent(
AIC(addNonlinearity(ols(y ~ x + sex, data = nl_ds),
min_fn = AIC,
flex_param = 3:7,
variable = "x",
spline_fn = "rcs",
workers = FALSE
)),
min(vals)
)
expect_error(AIC(addNonlinearity(ols(y ~ x + sex, data = nl_ds),
min_fn = AIC,
flex_param = 3:7,
variable = "x",
spline_fn = "ns",
workers = FALSE
)))
expect_equivalent(
addNonlinearity(ols(y ~ x + sex, data = l_ds),
min_fn = AIC,
flex_param = 3:7,
variable = "x",
spline_fn = "rcs",
workers = FALSE
),
ols(y ~ x + sex, data = l_ds)
)
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calculations.R
\name{rarefy_obs}
\alias{rarefy_obs}
\title{Calculate rarefied observation counts}
\usage{
rarefy_obs(obj, dataset, sample_size = NULL, cols = NULL,
other_cols = FALSE, out_names = NULL)
}
\arguments{
\item{obj}{A \code{\link[taxa]{taxmap}} object}
\item{dataset}{The name of a table in \code{obj$data}.}
\item{sample_size}{The sample size counts will be rarefied to. This can be
either a single integer or a vector of integers of equal length to the
number of columns.}
\item{cols}{The columns in \code{dataset} to use. By
default, all numeric columns are used. Takes one of the following inputs:
\describe{
\item{TRUE/FALSE:}{All/No columns will used.}
\item{Character vector:}{The names of columns to use} \item{Numeric vector:}{The indexes of
columns to use}
\item{Vector of TRUE/FALSE of length equal to the number of columns:}{Use the columns corresponding to \code{TRUE} values.} }}
\item{other_cols}{Preserve in the output non-target columns present in the
input data. New columns will always be on the end. The "taxon_id" column
will be preserved in the front. Takes one of the following inputs:
\describe{
\item{NULL:}{No columns will be added back, not even the taxon id column.}
\item{TRUE/FALSE:}{All/None of the non-target columns will be preserved.}
\item{Character vector:}{The names of columns to preserve}
\item{Numeric vector:}{The indexes of columns to preserve}
\item{Vector of TRUE/FALSE of length equal to the number of columns:}{Preserve the columns corresponding to \code{TRUE} values.}}}
\item{out_names}{The names of count columns in the output. Must be the same
length as \code{cols} (or \code{unique(groups)}, if \code{groups} is used).}
}
\value{
A tibble
}
\description{
For a given table in a \code{\link[taxa]{taxmap}} object, rarefy counts to a constant total. This
is a wrapper around \code{\link[vegan]{rrarefy}} that automatically detects
which columns are numeric and handles the reformatting needed to use tibbles.
}
\examples{
\dontrun{
# Parse dataset for examples
x = parse_tax_data(hmp_otus, class_cols = "lineage", class_sep = ";",
class_key = c(tax_rank = "info", tax_name = "taxon_name"),
class_regex = "^(.+)__(.+)$")
# Rarefy all numeric columns
rarefy_obs(x, "tax_data")
# Rarefy a subset of columns
rarefy_obs(x, "tax_data", cols = c("700035949", "700097855", "700100489"))
rarefy_obs(x, "tax_data", cols = 4:6)
rarefy_obs(x, "tax_data", cols = startsWith(colnames(x$data$tax_data), "70001"))
# Including all other columns in ouput
rarefy_obs(x, "tax_data", other_cols = TRUE)
# Inlcuding specific columns in output
rarefy_obs(x, "tax_data", cols = c("700035949", "700097855", "700100489"),
other_cols = 2:3)
# Rename output columns
rarefy_obs(x, "tax_data", cols = c("700035949", "700097855", "700100489"),
out_names = c("a", "b", "c"))
}
}
\seealso{
Other calculations: \code{\link{calc_group_mean}},
\code{\link{calc_group_median}},
\code{\link{calc_group_rsd}},
\code{\link{calc_group_stat}},
\code{\link{calc_n_samples}},
\code{\link{calc_obs_props}},
\code{\link{calc_prop_samples}},
\code{\link{calc_taxon_abund}},
\code{\link{compare_groups}},
\code{\link{counts_to_presence}},
\code{\link{zero_low_counts}}
}
|
/man/rarefy_obs.Rd
|
permissive
|
agronomist/metacoder
|
R
| false
| true
| 3,398
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calculations.R
\name{rarefy_obs}
\alias{rarefy_obs}
\title{Calculate rarefied observation counts}
\usage{
rarefy_obs(obj, dataset, sample_size = NULL, cols = NULL,
other_cols = FALSE, out_names = NULL)
}
\arguments{
\item{obj}{A \code{\link[taxa]{taxmap}} object}
\item{dataset}{The name of a table in \code{obj$data}.}
\item{sample_size}{The sample size counts will be rarefied to. This can be
either a single integer or a vector of integers of equal length to the
number of columns.}
\item{cols}{The columns in \code{dataset} to use. By
default, all numeric columns are used. Takes one of the following inputs:
\describe{
\item{TRUE/FALSE:}{All/No columns will used.}
\item{Character vector:}{The names of columns to use} \item{Numeric vector:}{The indexes of
columns to use}
\item{Vector of TRUE/FALSE of length equal to the number of columns:}{Use the columns corresponding to \code{TRUE} values.} }}
\item{other_cols}{Preserve in the output non-target columns present in the
input data. New columns will always be on the end. The "taxon_id" column
will be preserved in the front. Takes one of the following inputs:
\describe{
\item{NULL:}{No columns will be added back, not even the taxon id column.}
\item{TRUE/FALSE:}{All/None of the non-target columns will be preserved.}
\item{Character vector:}{The names of columns to preserve}
\item{Numeric vector:}{The indexes of columns to preserve}
\item{Vector of TRUE/FALSE of length equal to the number of columns:}{Preserve the columns corresponding to \code{TRUE} values.}}}
\item{out_names}{The names of count columns in the output. Must be the same
length as \code{cols} (or \code{unique(groups)}, if \code{groups} is used).}
}
\value{
A tibble
}
\description{
For a given table in a \code{\link[taxa]{taxmap}} object, rarefy counts to a constant total. This
is a wrapper around \code{\link[vegan]{rrarefy}} that automatically detects
which columns are numeric and handles the reformatting needed to use tibbles.
}
\examples{
\dontrun{
# Parse dataset for examples
x = parse_tax_data(hmp_otus, class_cols = "lineage", class_sep = ";",
class_key = c(tax_rank = "info", tax_name = "taxon_name"),
class_regex = "^(.+)__(.+)$")
# Rarefy all numeric columns
rarefy_obs(x, "tax_data")
# Rarefy a subset of columns
rarefy_obs(x, "tax_data", cols = c("700035949", "700097855", "700100489"))
rarefy_obs(x, "tax_data", cols = 4:6)
rarefy_obs(x, "tax_data", cols = startsWith(colnames(x$data$tax_data), "70001"))
# Including all other columns in ouput
rarefy_obs(x, "tax_data", other_cols = TRUE)
# Inlcuding specific columns in output
rarefy_obs(x, "tax_data", cols = c("700035949", "700097855", "700100489"),
other_cols = 2:3)
# Rename output columns
rarefy_obs(x, "tax_data", cols = c("700035949", "700097855", "700100489"),
out_names = c("a", "b", "c"))
}
}
\seealso{
Other calculations: \code{\link{calc_group_mean}},
\code{\link{calc_group_median}},
\code{\link{calc_group_rsd}},
\code{\link{calc_group_stat}},
\code{\link{calc_n_samples}},
\code{\link{calc_obs_props}},
\code{\link{calc_prop_samples}},
\code{\link{calc_taxon_abund}},
\code{\link{compare_groups}},
\code{\link{counts_to_presence}},
\code{\link{zero_low_counts}}
}
|
###DEMO for text data analysis with R###
# lessons curated by Noushin Nabavi, PhD (adapted from Datacamp lessons for text analysis by Julia Silge)
# Load dplyr and tidytext
library(dplyr)
library(tidytext)
library(tidyr)
# can use 4 lexicons according to need for data analysis
# search using ??get_sentiments(): "afinn", "bing", "nrc", "loughran"
## e.g.: get_sentiments("bing")
# get sentiment_data
url = "https://d1p17r2m4rzlbo.cloudfront.net/wp-content/uploads/2016/03/Airline-Sentiment-2-w-AA.csv"
sentiment_data <- read_csv(url)
# check built-in sentiments
percents <- sentiment_data %>%
count(airline_sentiment) %>%
rename(total_count = n) %>%
mutate(percent = total_count / sum(total_count) * 100) %>%
# Arrange by percent
arrange(percent)
# tokenize text comments
sentiment_data %>%
unnest_tokens(word, text) %>%
count(word, sort = TRUE)
# visualizing airline sentiments
# note: get_sentiments isn't plotted here
tidy <- sentiment_data %>%
unnest_tokens(word, text) %>%
left_join(percents, by = "airline_sentiment") %>%
# Implement sentiment analysis with the "nrc" lexicon
inner_join(get_sentiments("nrc")) %>%
# Find how many sentiment words each song has
count(word, airline_sentiment, sort = TRUE) %>%
ggplot(aes(as.factor(airline_sentiment), n)) +
# Make a boxplot
geom_boxplot()
# modeling sentiments:
summary(lm(n ~ airline_sentiment, data = tidy))
# compare group_by() with ungroup()
negative_sents1 <- sentiment_data %>%
# Filter to only choose the words associated with sadness
filter(airline_sentiment == "negative") %>%
# Group by word
group_by(negativereason) %>%
# Use the summarize verb to find the mean frequency
summarize(airline_sentiment_conf = mean(`airline_sentiment:confidence`)) %>%
# Arrange to sort in order of descending frequency
arrange(desc(airline_sentiment_conf))
#ungroup()
negative_sents2 <- sentiment_data %>%
# Filter to only choose the words associated with sadness
filter(airline_sentiment == "negative") %>%
# Group by word
group_by(negativereason) %>%
# Use the summarize verb to find the mean frequency
summarize(airline_sentiment_conf = mean(`airline_sentiment:confidence`)) %>%
# Arrange to sort in order of descending frequency
arrange(desc(airline_sentiment_conf)) %>%
ungroup()
# compare plots1 and 2 with one another
require(gridExtra)
plot1 <- negative_sents1 %>%
top_n(20) %>%
mutate(word = reorder(negativereason, airline_sentiment_conf)) %>%
ggplot(aes(negativereason, airline_sentiment_conf)) +
geom_col() +
coord_flip()
plot2 <-negative_sents2 %>%
top_n(20) %>%
mutate(word = reorder(negativereason, airline_sentiment_conf)) %>%
ggplot(aes(negativereason, airline_sentiment_conf)) +
geom_point() +
coord_flip()
grid.arrange(plot1, plot2, ncol=2)
#-------------------------------------------------------------------------------
# Sentiment changes through a text
tidy_shakespeare %>%
# Implement sentiment analysis using "bing" lexicon
inner_join(get_sentiments("bing")) %>%
# Count using four arguments
count(title, type, index = linenumber %/% 70, sentiment)
#-------------------------------------------------------------------------------
# Calculating net sentiments
tidy_sentiment_data <- sentiment_data %>%
unnest_tokens(word, text) %>%
inner_join(get_sentiments("bing")) %>%
count(word, sentiment, airline_sentiment) %>%
# Spread sentiment and n across multiple columns
spread(sentiment, n, fill = 0) %>%
# Use mutate to find net sentiment
mutate(sentiment = positive - negative) %>%
# Put index on x-axis, sentiment on y-axis, and map comedy/tragedy to fill
ggplot(aes(word, sentiment, fill = airline_sentiment)) +
# Make a bar chart with geom_col()
geom_col() +
# Separate panels with facet_wrap()
facet_wrap(~ airline_sentiment, scales = "free_x")
#-------------------------------------------------------------------------------
# Visualizing sentiment over time
# Load the lubridate package
library(lubridate)
# sentiment change over time
sentiment_data %>%
# Define date for judgement
mutate(date = mdy_hm(`_last_judgment_at`)) %>%
#mutate(date = floor_date(date, unit = "6 months")) %>%
# Group by date
group_by(date) %>%
#ungroup() %>%
count(airline_sentiment) %>%
rename(total_count = n) %>%
mutate(percent = total_count / sum(total_count) * 100) %>%
# Arrange by percent
arrange(percent) %>%
na.omit() %>%
# Implement sentiment analysis using the NRC lexicon
#inner_join(get_sentiments("nrc"))
ggplot(aes(date, percent, color = airline_sentiment)) +
geom_line(size = 1.5) +
geom_smooth(method = "lm", se = FALSE, lty = 2) +
expand_limits(y = 0)
# sentiment change over time
sentiment_data %>%
# Define date for judgement
mutate(date = mdy_hm(`_last_judgment_at`)) %>%
#mutate(date = floor_date(date, unit = "6 months")) %>%
# Group by date
group_by(date) %>%
#ungroup() %>%
count(airline_sentiment) %>%
rename(total_count = n) %>%
mutate(percent = total_count / sum(total_count) * 100) %>%
# Arrange by percent
arrange(percent) %>%
na.omit() %>%
# Implement sentiment analysis using the NRC lexicon
#inner_join(get_sentiments("nrc"))
ggplot(aes(date, percent, color = airline_sentiment)) +
# Make facets by word
facet_wrap(~airline_sentiment) +
geom_line(size = 1.5, show.legend = FALSE) +
expand_limits(y = 0)
|
/Lessons/10. TEXT ANALYSIS/text_analysis_examples.R
|
no_license
|
NoushinN/stem-ed
|
R
| false
| false
| 5,407
|
r
|
###DEMO for text data analysis with R###
# lessons curated by Noushin Nabavi, PhD (adapted from Datacamp lessons for text analysis by Julia Silge)
# Load dplyr and tidytext
library(dplyr)
library(tidytext)
library(tidyr)
# can use 4 lexicons according to need for data analysis
# search using ??get_sentiments(): "afinn", "bing", "nrc", "loughran"
## e.g.: get_sentiments("bing")
# get sentiment_data
url = "https://d1p17r2m4rzlbo.cloudfront.net/wp-content/uploads/2016/03/Airline-Sentiment-2-w-AA.csv"
sentiment_data <- read_csv(url)
# check built-in sentiments
percents <- sentiment_data %>%
count(airline_sentiment) %>%
rename(total_count = n) %>%
mutate(percent = total_count / sum(total_count) * 100) %>%
# Arrange by percent
arrange(percent)
# tokenize text comments
sentiment_data %>%
unnest_tokens(word, text) %>%
count(word, sort = TRUE)
# visualizing airline sentiments
# note: get_sentiments isn't plotted here
tidy <- sentiment_data %>%
unnest_tokens(word, text) %>%
left_join(percents, by = "airline_sentiment") %>%
# Implement sentiment analysis with the "nrc" lexicon
inner_join(get_sentiments("nrc")) %>%
# Find how many sentiment words each song has
count(word, airline_sentiment, sort = TRUE) %>%
ggplot(aes(as.factor(airline_sentiment), n)) +
# Make a boxplot
geom_boxplot()
# modeling sentiments:
summary(lm(n ~ airline_sentiment, data = tidy))
# compare group_by() with ungroup()
negative_sents1 <- sentiment_data %>%
# Filter to only choose the words associated with sadness
filter(airline_sentiment == "negative") %>%
# Group by word
group_by(negativereason) %>%
# Use the summarize verb to find the mean frequency
summarize(airline_sentiment_conf = mean(`airline_sentiment:confidence`)) %>%
# Arrange to sort in order of descending frequency
arrange(desc(airline_sentiment_conf))
#ungroup()
negative_sents2 <- sentiment_data %>%
# Filter to only choose the words associated with sadness
filter(airline_sentiment == "negative") %>%
# Group by word
group_by(negativereason) %>%
# Use the summarize verb to find the mean frequency
summarize(airline_sentiment_conf = mean(`airline_sentiment:confidence`)) %>%
# Arrange to sort in order of descending frequency
arrange(desc(airline_sentiment_conf)) %>%
ungroup()
# compare plots1 and 2 with one another
require(gridExtra)
plot1 <- negative_sents1 %>%
top_n(20) %>%
mutate(word = reorder(negativereason, airline_sentiment_conf)) %>%
ggplot(aes(negativereason, airline_sentiment_conf)) +
geom_col() +
coord_flip()
plot2 <-negative_sents2 %>%
top_n(20) %>%
mutate(word = reorder(negativereason, airline_sentiment_conf)) %>%
ggplot(aes(negativereason, airline_sentiment_conf)) +
geom_point() +
coord_flip()
grid.arrange(plot1, plot2, ncol=2)
#-------------------------------------------------------------------------------
# Sentiment changes through a text
tidy_shakespeare %>%
# Implement sentiment analysis using "bing" lexicon
inner_join(get_sentiments("bing")) %>%
# Count using four arguments
count(title, type, index = linenumber %/% 70, sentiment)
#-------------------------------------------------------------------------------
# Calculating net sentiments
tidy_sentiment_data <- sentiment_data %>%
unnest_tokens(word, text) %>%
inner_join(get_sentiments("bing")) %>%
count(word, sentiment, airline_sentiment) %>%
# Spread sentiment and n across multiple columns
spread(sentiment, n, fill = 0) %>%
# Use mutate to find net sentiment
mutate(sentiment = positive - negative) %>%
# Put index on x-axis, sentiment on y-axis, and map comedy/tragedy to fill
ggplot(aes(word, sentiment, fill = airline_sentiment)) +
# Make a bar chart with geom_col()
geom_col() +
# Separate panels with facet_wrap()
facet_wrap(~ airline_sentiment, scales = "free_x")
#-------------------------------------------------------------------------------
# Visualizing sentiment over time
# Load the lubridate package
library(lubridate)
# sentiment change over time
sentiment_data %>%
# Define date for judgement
mutate(date = mdy_hm(`_last_judgment_at`)) %>%
#mutate(date = floor_date(date, unit = "6 months")) %>%
# Group by date
group_by(date) %>%
#ungroup() %>%
count(airline_sentiment) %>%
rename(total_count = n) %>%
mutate(percent = total_count / sum(total_count) * 100) %>%
# Arrange by percent
arrange(percent) %>%
na.omit() %>%
# Implement sentiment analysis using the NRC lexicon
#inner_join(get_sentiments("nrc"))
ggplot(aes(date, percent, color = airline_sentiment)) +
geom_line(size = 1.5) +
geom_smooth(method = "lm", se = FALSE, lty = 2) +
expand_limits(y = 0)
# sentiment change over time
sentiment_data %>%
# Define date for judgement
mutate(date = mdy_hm(`_last_judgment_at`)) %>%
#mutate(date = floor_date(date, unit = "6 months")) %>%
# Group by date
group_by(date) %>%
#ungroup() %>%
count(airline_sentiment) %>%
rename(total_count = n) %>%
mutate(percent = total_count / sum(total_count) * 100) %>%
# Arrange by percent
arrange(percent) %>%
na.omit() %>%
# Implement sentiment analysis using the NRC lexicon
#inner_join(get_sentiments("nrc"))
ggplot(aes(date, percent, color = airline_sentiment)) +
# Make facets by word
facet_wrap(~airline_sentiment) +
geom_line(size = 1.5, show.legend = FALSE) +
expand_limits(y = 0)
|
#define parameters
N=100
betalist=c(0.5,1,2) # try different beta values and simulate the growth curve
T=5 #simulate until time T
outcome<-matrix(NA,ncol=length(betalist),nrow=T)
out<-NA
for (j in 1:length(betalist))
{
beta=betalist[j]
t=0
count=1
sites=rep(0,N)
for ( m in 1:10)
{
sites[m]=1
}
a<-c(beta,t,sum(sites))
out<-rbind(out,a)
sum<-sum(sites)
while (sum>0)
{
u=runif(1,0,1)
lambda=(beta+1)*N
t=t-log(u)/lambda # the next update time
i=sample(1:N,1)
# choose a site at random
if (sites[i]==1)
{
u=runif(1,0,1)
u=u*(beta+1)
if(u<1) # individual at sites[i] die
{
sites[i]=0
}
else
{
i=sample(1:N,1)
if(sites[i]==0) # choose another site and give birth if empty
{
sites[i]=1
}
}
}
sum=sum(sites)
a<-c(beta,t,sum)
out<-rbind(out,a)
}
}
write.table(out,file="J:\\out2.txt",quote=FALSE,row.names=FALSE,col.name=FALSE)
|
/APM541/R_v2.R
|
no_license
|
wduncan21/Classes
|
R
| false
| false
| 1,157
|
r
|
#define parameters
N=100
betalist=c(0.5,1,2) # try different beta values and simulate the growth curve
T=5 #simulate until time T
outcome<-matrix(NA,ncol=length(betalist),nrow=T)
out<-NA
for (j in 1:length(betalist))
{
beta=betalist[j]
t=0
count=1
sites=rep(0,N)
for ( m in 1:10)
{
sites[m]=1
}
a<-c(beta,t,sum(sites))
out<-rbind(out,a)
sum<-sum(sites)
while (sum>0)
{
u=runif(1,0,1)
lambda=(beta+1)*N
t=t-log(u)/lambda # the next update time
i=sample(1:N,1)
# choose a site at random
if (sites[i]==1)
{
u=runif(1,0,1)
u=u*(beta+1)
if(u<1) # individual at sites[i] die
{
sites[i]=0
}
else
{
i=sample(1:N,1)
if(sites[i]==0) # choose another site and give birth if empty
{
sites[i]=1
}
}
}
sum=sum(sites)
a<-c(beta,t,sum)
out<-rbind(out,a)
}
}
write.table(out,file="J:\\out2.txt",quote=FALSE,row.names=FALSE,col.name=FALSE)
|
library(dplyr)
library(vcd)
library(vcdExtra)
# library(gam)
library(car)
library(effects)
expit <- function(x) exp(x)/ (1 + exp(x))
expit_prob <- function(x) c(expit(x), 1 - expit(x))
random_binary_from_logits <- function(lgt) {
factor(sapply(lgt, function(x) {
sample(c(TRUE, FALSE)
, size = 1
, prob = expit_prob(x))
}))}
my_mosaic <- function(x) {
rtype <- if(class(x) == "table") {"deviance" }
else {"rstandard"}
mosaic(x
, residuals_type = rtype
, formula = ~ faculty + hqual + grad + year
, gp = shading_Friendly2
, rot_labels = c(0, -45, -45, 90)
, rot_varnames = c(0, -90, 0, 90)
, offset_labels = c(0, 0.5, 0, 0)
, offset_varnames = c(0, 1, 0, 0.5))
}
my_caplot <- function(x) {
# Generate the plot
res.ca <- plot(x)
# add some segments from the origin to make things clearer
segments(0, 0, res.ca$cols[,1]
, res.ca$cols[,2]
, col = "red", lwd = 1)
segments(0, 0, res.ca$rows[,1]
, res.ca$rows[,2]
, col = "blue", lwd = 1.5, lty = 3)
}
year_intake <- c(2014, 2015)
faculty_business <- c("fin", "law"
, "mgmt", "hsp"
, "hr")
nat <- c("dom", "int")
fin <- c("self", "spons")
genders <- c("F", "M")
quals <- c("dip", "dip-other", "hs", "hs-equiv")
acad_data <- function() {
set.seed(2024)
pois_parms <- expand.grid(year = year_intake
, faculty = faculty_business
, natmix = nat
, finance = fin
, gender = genders
, hqual = quals)
reps <- 2 # 2 per year
pois_parms <- pois_parms %>%
# main effects
mutate(lamb = ifelse(year == year_intake[1], 250, 300)
, lamb = ifelse(gender == genders[1]
, lamb * 0.92
, lamb * 0.90)
, lamb = ifelse(natmix == nat[1]
, lamb
, lamb * 0.15)
, lamb = ifelse(finance == fin[1]
, lamb
, lamb * 0.1)
# joint effects nat fin
, lamb = ifelse(natmix == nat[2] & finance == fin[2]
, lamb
, lamb * 0.7)
# joint effects nat year
, lamb = ifelse(natmix == nat[2] & year == year_intake[2]
, lamb * 0.7
, lamb)
# main effects
, lamb = ifelse(faculty == faculty_business[2]
, lamb * 0.8
, lamb)
, lamb = ifelse(faculty == faculty_business[3]
, lamb * 0.9
, lamb)
, lamb = ifelse(faculty == faculty_business[4]
, lamb * 0.3
, lamb)
, lamb = ifelse(faculty == faculty_business[5]
, lamb * 0.25
, lamb)
# joint year faculty
, lamb = ifelse(faculty == faculty_business[1] &
year == year_intake[1]
, lamb * 0.8
, lamb)
, lamb = ifelse(faculty == faculty_business[2] &
year == year_intake[1]
, lamb * 0.75
, lamb)
, lamb = ifelse(faculty == faculty_business[3] &
year == year_intake[1]
, lamb * 1.2
, lamb)
, lamb = ifelse(faculty == faculty_business[4] &
year == 1.1
, lamb * 1.1
, lamb)
# main effect
, lamb = ifelse(hqual == quals[2]
, lamb * 0.25
, lamb)
, lamb = ifelse(hqual == quals[3]
, lamb * 0.7
, lamb)
, lamb = ifelse(hqual == quals[4]
, lamb * 0.4
, lamb)
# joint effects quals and gender
# gender qual
, lamb = ifelse(gender == genders[1] &
hqual == quals[1]
, lamb * 0.8
, lamb)
, lamb = ifelse(gender == genders[1] &
hqual == quals[2]
, lamb * 0.7
, lamb)
, lamb = ifelse(gender == genders[2] &
faculty == faculty_business[1]
, lamb * 0.8
, lamb)
, lamb = ifelse(gender == genders[1] &
faculty == faculty_business[3]
, lamb * 0.8
, lamb)
, lamb = ifelse(gender == genders[2] &
faculty == faculty_business[5]
, lamb * 0.7
, lamb)
# joint effects quals and faculty
, lamb = ifelse(faculty == faculty_business[1] &
hqual == quals[1]
, lamb * 0.75
, lamb)
, lamb = ifelse(faculty == faculty_business[1] &
hqual == quals[2]
, lamb * 0.5
, lamb)
, lamb = ifelse(faculty == faculty_business[1] &
hqual == quals[3]
, lamb * 1.2
, lamb)
, lamb = ifelse(faculty == faculty_business[2] &
hqual == quals[3]
, lamb
, lamb * 0.9)
, lamb = ifelse(faculty == faculty_business[2] &
hqual == quals[4]
, lamb
, lamb * 0.8)
, lamb = ifelse(faculty == faculty_business[3] &
hqual == quals[1]
, lamb * 1.2
, lamb)
, lamb = ifelse(faculty == faculty_business[3] &
hqual == quals[2]
, lamb * 1.1
, lamb)
, lamb = ifelse(faculty == faculty_business[3] &
hqual == quals[3]
, lamb * 0.7
, lamb)
, lamb = ifelse(faculty == faculty_business[3] &
hqual == quals[4]
, lamb * 0.8
, lamb)
, lamb = ifelse(faculty == faculty_business[4] &
hqual == quals[1]
, lamb * 1.2
, lamb)
, lamb = ifelse(faculty == faculty_business[4] &
hqual == quals[2]
, lamb * 1.1
, lamb)
, lamb = ifelse(faculty == faculty_business[4] &
hqual == quals[3]
, lamb * 0.7
, lamb)
, lamb = ifelse(faculty == faculty_business[4] &
hqual == quals[4]
, lamb * 0.5
, lamb)
, lamb = ifelse(faculty == faculty_business[5] &
hqual == quals[1]
, lamb * 0.95
, lamb)
, lamb = ifelse(faculty == faculty_business[5] &
hqual == quals[2]
, lamb * 0.9
, lamb)
, lamb = ifelse(faculty == faculty_business[5] &
hqual == quals[3]
, lamb * 0.75
, lamb)
, lamb = ifelse(faculty == faculty_business[5] &
hqual == quals[4]
, lamb * 0.7
, lamb)
# joint effects year and quals and faculty
, lamb = ifelse(faculty == faculty_business[1] &
hqual == quals[1] &
year == year_intake[1]
, lamb * 0.8
, lamb)
, lamb = ifelse(faculty == faculty_business[1] &
hqual == quals[2] &
year == year_intake[1]
, lamb * 0.85
, lamb)
, lamb = ifelse(faculty == faculty_business[1] &
hqual == quals[3] &
year == year_intake[1]
, lamb * 1.2
, lamb)
, lamb = ifelse(faculty == faculty_business[2] &
hqual == quals[1] &
year == year_intake[1]
, lamb * 0.95
, lamb)
, lamb = ifelse(faculty == faculty_business[2] &
hqual == quals[2] &
year == year_intake[1]
, lamb * 0.9
, lamb)
# joint effects natmix, finance and qual
, lamb = ifelse(natmix == nat[2] &
hqual == quals[1]
, lamb * 0.6
, lamb)
, lamb = ifelse(natmix == nat[2] &
hqual == quals[2]
, lamb * 0.2
, lamb)
, lamb = ifelse(natmix == nat[2] &
hqual == quals[4]
, lamb * 1.1
, lamb)
, lamb = ifelse(finance == fin[2] &
hqual == quals[1]
, lamb * 0.3
, lamb)
, lamb = ifelse(finance == fin[2] &
hqual == quals[2]
, lamb * 0.1
, lamb)
, lamb = ifelse(finance == fin[2] &
hqual == quals[4]
, lamb * 1.2
, lamb)
, lamb = ifelse(natmix == nat[2] &
faculty == faculty_business[1]
, lamb * 0.7
, lamb)
, lamb = ifelse(natmix == nat[2] &
faculty == faculty_business[2]
, lamb * 0.75
, lamb)
, lamb = ifelse(natmix == nat[2] &
faculty == faculty_business[3]
, lamb * 1.3
, lamb)
, lamb = ifelse(finance == fin[2] &
hqual %in% quals[3:4] &
faculty %in% faculty_business[1:2]
, lamb * 1.3
, lamb)
, lamb = ifelse(finance == fin[1] &
hqual %in% quals[3:4] &
faculty %in% faculty_business[1:2]
, lamb * 1.1
, lamb)
, lamb = ifelse(finance == fin[2] &
hqual %in% quals[3:4] &
faculty %in% faculty_business[3]
, lamb * 1.4
, lamb)
, lamb = ifelse(natmix == nat[2] &
hqual %in% quals[2] &
faculty %in% faculty_business[4:5]
, lamb * 0.7
, lamb)
, lamb = ifelse(natmix == nat[1] &
hqual %in% quals[1:2] &
faculty %in% faculty_business[3:4]
, lamb * 1.4
, lamb)
)
students <- 1:nrow(pois_parms) # minimum 1
for (i in 1:nrow(pois_parms)) {
students <- c(students
, rep(i, sum(rpois(reps
, pois_parms$lamb[i]))))
}
Freq <- table(students)
students <- cbind(pois_parms, Freq)
students <- expand.dft(students)
n <- nrow(students)
students <- students %>%
mutate(year = factor(year)
, logit_abs_rate = -3.2 +
0.1 * (finance == fin[1]) +
-0.2 * (finance == fin[2]) +
0.1 * (natmix == nat[1]) +
-0.2 * (natmix == nat[2]) +
0.2 * (hqual == quals[1]) +
0.25 * (hqual == quals[2]) +
0.075 * (hqual == quals[4]) +
-0.05 * (gender == genders[1]) +
0.15 * (gender == genders[2]) +
0.15 * (faculty == faculty_business[3]) +
0.2 * (faculty == faculty_business[4]) +
0.05 * (faculty == faculty_business[5]) +
0.2 * (finance == fin[1] & natmix == nat[1]) +
0.1 * (hqual %in% quals[1:2] & gender == genders[2]) +
0.05 * (faculty == faculty_business[3] & gender == genders[2]) +
0.075 * (faculty == faculty_business[4] & gender == genders[2]) +
0.05 * (faculty == faculty_business[2] & gender == genders[2] & finance == fin[1]) +
-0.05 * (faculty == faculty_business[5] & gender == genders[1] & finance == fin[1]) +
rnorm(n, sd = 0.15) +
sample(c(TRUE, FALSE) # create some outliers
, size = n
, prob = expit_prob(-5.5) # 0.005, 0.995
, replace = TRUE) * runif(n, 0, 2.5) +
(sample(c(TRUE, FALSE) # create some outliers, based on gender (more females had caring at home)
, size = n
, prob = expit_prob(-4.5) # 0.01, 0.99
, replace = TRUE) & gender == genders[1]) * runif(n, 0, 1)
, abs_rate = expit(logit_abs_rate)
, logit_t1_success = 2 +
(logit_abs_rate + abs(min(logit_abs_rate))) * -0.2 +
0.05 * (gender == genders[1]) +
0.22 * (faculty == faculty_business[3]) +
0.15 * (faculty == faculty_business[5]) +
0.05 * (faculty == faculty_business[5] & gender == genders[1]) +
0.05 * (hqual == quals[3]) +
-0.05 * (hqual == quals[2]) +
0.05 * (hqual == quals[4]) +
0.1 * (finance == fin[1] & natmix == nat[2]) +
0.1 * (finance == fin[2] & natmix == nat[1]) +
rnorm(n, sd = 0.15) +
sample(c(TRUE, FALSE) # create some outliers
, size = n
, prob = c(0.01, 0.99)
, replace = TRUE) * runif(n, 0, 0.5)
, t1_success = random_binary_from_logits(logit_t1_success)
, logit_withdraw = -3.5 +
(logit_abs_rate + abs(min(logit_abs_rate))) * 0.25 +
0.3 * (t1_success == FALSE) +
0.15 * (natmix == nat[2]) +
0.2 * (gender == genders[2]) +
0.05 * (faculty == faculty_business[2]) +
0.1 * (faculty == faculty_business[3]) +
0.15 * (faculty == faculty_business[1]) +
0.05 * (faculty == faculty_business[5] & gender == genders[2]) +
0.05 * (hqual == quals[1]) +
0.1 * (hqual == quals[2]) +
0.1 * (finance == fin[1] & natmix == nat[2]) +
rnorm(n, sd = 0.15) +
sample(c(TRUE, FALSE) # create some outliers
, size = n
, prob = c(0.01, 0.99)
, replace = TRUE) * runif(n, 0, 2)
, withdraw = random_binary_from_logits(logit_withdraw)
, logit_defer = -3 +
(logit_abs_rate + abs(min(logit_abs_rate))) * 0.25 +
0.1 * (t1_success == FALSE) +
0.2 * (natmix == nat[2]) +
-0.15 * (gender == genders[1]) +
-0.1 * (faculty == faculty_business[2]) +
-0.15 * (faculty == faculty_business[1]) +
-0.1 * (faculty == faculty_business[5] & gender == genders[1]) +
-0.05 * (hqual == quals[1]) +
-0.075 * (hqual == quals[2]) +
-0.1 * (finance == fin[1] & natmix == nat[2]) +
-2 * (year == year_intake[1]) + # most of the earlier year defferals have passed through by now
rnorm(n, sd = 0.15) +
sample(c(TRUE, FALSE) # create some outliers
, size = n
, prob = expit_prob(-3.5) # 0.02, 0.98
, replace = TRUE) * runif(n, -1, 1) +
(sample(c(TRUE, FALSE) # create some outliers, based on gender (more females had caring at home)
, size = n
, prob = expit_prob(-3.5) # 0.02, 0.98
, replace = TRUE) & gender == genders[1]) * runif(n, 0, 1)
, defer = random_binary_from_logits(logit_defer)
, logit_outcome = expit(1.1) + # 65% baseline
(logit_abs_rate + abs(min(logit_abs_rate))) * -0.25 +
0.125 * (t1_success == TRUE) +
-0.125 * (t1_success == FALSE) +
0.1 * (natmix == nat[2]) +
-0.05 * (natmix == nat[1]) +
0.025 * (gender == genders[1]) +
-0.025 * (gender == genders[2]) +
0.15 * (faculty == faculty_business[4]) +
0.1 * (faculty == faculty_business[3]) +
-0.05 * (faculty == faculty_business[2]) +
-0.1 * (faculty == faculty_business[1]) +
0.05 * (faculty == faculty_business[5] & gender == genders[2]) +
-0.1 * (faculty == faculty_business[1] & gender == genders[2]) +
0.075 * (hqual == quals[3]) +
0.05 * (hqual == quals[4]) +
-0.05 * (hqual == quals[2]) +
0.1 * (finance == fin[2]) +
0.05 * (natmix == nat[2]) +
-0.1 * (finance == fin[1]) +
-0.1 * (natmix == nat[1]) +
0.05 * (finance == fin[2] & natmix == nat[2]) +
0.05 * ((faculty == faculty_business[4] | faculty == faculty_business[3]) & gender == genders[1]) +
-0.075 * (faculty == faculty_business[2] & hqual == quals[1]) +
-0.1 * (faculty == faculty_business[2] & hqual == quals[2]) +
-0.1 * (faculty == faculty_business[1] & hqual %in% quals[1:2]) +
0.15 * (faculty == faculty_business[1] & hqual == quals[3]) +
0.1 * (faculty == faculty_business[1] & hqual == quals[4]) +
0.025 * (faculty == faculty_business[5] & gender == genders[1] & finance == fin[1]) +
rnorm(n, sd = 0.15) +
sample(c(TRUE, FALSE) # create some outliers
, size = n
, prob = c(0.01, 0.99)
, replace = TRUE) * runif(n, -2, 2)
, outcome = factor(case_when(withdraw == TRUE ~ "wthdr"
# , defer == TRUE ~ "defer"
, expit(logit_outcome) > 0.70 ~ "dist"
, expit(logit_outcome) > 0.63 ~ "merit"
, expit(logit_outcome) > 0.50 ~ "pass"
, TRUE ~ "fail"
))
, grad = factor(ifelse(outcome %in% c("pass", "merit", "dist")
, TRUE, FALSE))
)
return(students)
}
students <- acad_data()
sts <- students[students$faculty %in% faculty_business[1:3], ]
sts$faculty <- factor(sts$faculty)
sts <- sts[, c("year", "faculty", "hqual"
, "natmix", "finance", "gender"
, "t1_success", "abs_rate"
, "outcome", "grad")]
|
/acad_data.R
|
no_license
|
julianhatwell/DDAR
|
R
| false
| false
| 20,398
|
r
|
library(dplyr)
library(vcd)
library(vcdExtra)
# library(gam)
library(car)
library(effects)
expit <- function(x) exp(x)/ (1 + exp(x))
expit_prob <- function(x) c(expit(x), 1 - expit(x))
random_binary_from_logits <- function(lgt) {
factor(sapply(lgt, function(x) {
sample(c(TRUE, FALSE)
, size = 1
, prob = expit_prob(x))
}))}
my_mosaic <- function(x) {
rtype <- if(class(x) == "table") {"deviance" }
else {"rstandard"}
mosaic(x
, residuals_type = rtype
, formula = ~ faculty + hqual + grad + year
, gp = shading_Friendly2
, rot_labels = c(0, -45, -45, 90)
, rot_varnames = c(0, -90, 0, 90)
, offset_labels = c(0, 0.5, 0, 0)
, offset_varnames = c(0, 1, 0, 0.5))
}
my_caplot <- function(x) {
# Generate the plot
res.ca <- plot(x)
# add some segments from the origin to make things clearer
segments(0, 0, res.ca$cols[,1]
, res.ca$cols[,2]
, col = "red", lwd = 1)
segments(0, 0, res.ca$rows[,1]
, res.ca$rows[,2]
, col = "blue", lwd = 1.5, lty = 3)
}
year_intake <- c(2014, 2015)
faculty_business <- c("fin", "law"
, "mgmt", "hsp"
, "hr")
nat <- c("dom", "int")
fin <- c("self", "spons")
genders <- c("F", "M")
quals <- c("dip", "dip-other", "hs", "hs-equiv")
acad_data <- function() {
set.seed(2024)
pois_parms <- expand.grid(year = year_intake
, faculty = faculty_business
, natmix = nat
, finance = fin
, gender = genders
, hqual = quals)
reps <- 2 # 2 per year
pois_parms <- pois_parms %>%
# main effects
mutate(lamb = ifelse(year == year_intake[1], 250, 300)
, lamb = ifelse(gender == genders[1]
, lamb * 0.92
, lamb * 0.90)
, lamb = ifelse(natmix == nat[1]
, lamb
, lamb * 0.15)
, lamb = ifelse(finance == fin[1]
, lamb
, lamb * 0.1)
# joint effects nat fin
, lamb = ifelse(natmix == nat[2] & finance == fin[2]
, lamb
, lamb * 0.7)
# joint effects nat year
, lamb = ifelse(natmix == nat[2] & year == year_intake[2]
, lamb * 0.7
, lamb)
# main effects
, lamb = ifelse(faculty == faculty_business[2]
, lamb * 0.8
, lamb)
, lamb = ifelse(faculty == faculty_business[3]
, lamb * 0.9
, lamb)
, lamb = ifelse(faculty == faculty_business[4]
, lamb * 0.3
, lamb)
, lamb = ifelse(faculty == faculty_business[5]
, lamb * 0.25
, lamb)
# joint year faculty
, lamb = ifelse(faculty == faculty_business[1] &
year == year_intake[1]
, lamb * 0.8
, lamb)
, lamb = ifelse(faculty == faculty_business[2] &
year == year_intake[1]
, lamb * 0.75
, lamb)
, lamb = ifelse(faculty == faculty_business[3] &
year == year_intake[1]
, lamb * 1.2
, lamb)
, lamb = ifelse(faculty == faculty_business[4] &
year == 1.1
, lamb * 1.1
, lamb)
# main effect
, lamb = ifelse(hqual == quals[2]
, lamb * 0.25
, lamb)
, lamb = ifelse(hqual == quals[3]
, lamb * 0.7
, lamb)
, lamb = ifelse(hqual == quals[4]
, lamb * 0.4
, lamb)
# joint effects quals and gender
# gender qual
, lamb = ifelse(gender == genders[1] &
hqual == quals[1]
, lamb * 0.8
, lamb)
, lamb = ifelse(gender == genders[1] &
hqual == quals[2]
, lamb * 0.7
, lamb)
, lamb = ifelse(gender == genders[2] &
faculty == faculty_business[1]
, lamb * 0.8
, lamb)
, lamb = ifelse(gender == genders[1] &
faculty == faculty_business[3]
, lamb * 0.8
, lamb)
, lamb = ifelse(gender == genders[2] &
faculty == faculty_business[5]
, lamb * 0.7
, lamb)
# joint effects quals and faculty
, lamb = ifelse(faculty == faculty_business[1] &
hqual == quals[1]
, lamb * 0.75
, lamb)
, lamb = ifelse(faculty == faculty_business[1] &
hqual == quals[2]
, lamb * 0.5
, lamb)
, lamb = ifelse(faculty == faculty_business[1] &
hqual == quals[3]
, lamb * 1.2
, lamb)
, lamb = ifelse(faculty == faculty_business[2] &
hqual == quals[3]
, lamb
, lamb * 0.9)
, lamb = ifelse(faculty == faculty_business[2] &
hqual == quals[4]
, lamb
, lamb * 0.8)
, lamb = ifelse(faculty == faculty_business[3] &
hqual == quals[1]
, lamb * 1.2
, lamb)
, lamb = ifelse(faculty == faculty_business[3] &
hqual == quals[2]
, lamb * 1.1
, lamb)
, lamb = ifelse(faculty == faculty_business[3] &
hqual == quals[3]
, lamb * 0.7
, lamb)
, lamb = ifelse(faculty == faculty_business[3] &
hqual == quals[4]
, lamb * 0.8
, lamb)
, lamb = ifelse(faculty == faculty_business[4] &
hqual == quals[1]
, lamb * 1.2
, lamb)
, lamb = ifelse(faculty == faculty_business[4] &
hqual == quals[2]
, lamb * 1.1
, lamb)
, lamb = ifelse(faculty == faculty_business[4] &
hqual == quals[3]
, lamb * 0.7
, lamb)
, lamb = ifelse(faculty == faculty_business[4] &
hqual == quals[4]
, lamb * 0.5
, lamb)
, lamb = ifelse(faculty == faculty_business[5] &
hqual == quals[1]
, lamb * 0.95
, lamb)
, lamb = ifelse(faculty == faculty_business[5] &
hqual == quals[2]
, lamb * 0.9
, lamb)
, lamb = ifelse(faculty == faculty_business[5] &
hqual == quals[3]
, lamb * 0.75
, lamb)
, lamb = ifelse(faculty == faculty_business[5] &
hqual == quals[4]
, lamb * 0.7
, lamb)
# joint effects year and quals and faculty
, lamb = ifelse(faculty == faculty_business[1] &
hqual == quals[1] &
year == year_intake[1]
, lamb * 0.8
, lamb)
, lamb = ifelse(faculty == faculty_business[1] &
hqual == quals[2] &
year == year_intake[1]
, lamb * 0.85
, lamb)
, lamb = ifelse(faculty == faculty_business[1] &
hqual == quals[3] &
year == year_intake[1]
, lamb * 1.2
, lamb)
, lamb = ifelse(faculty == faculty_business[2] &
hqual == quals[1] &
year == year_intake[1]
, lamb * 0.95
, lamb)
, lamb = ifelse(faculty == faculty_business[2] &
hqual == quals[2] &
year == year_intake[1]
, lamb * 0.9
, lamb)
# joint effects natmix, finance and qual
, lamb = ifelse(natmix == nat[2] &
hqual == quals[1]
, lamb * 0.6
, lamb)
, lamb = ifelse(natmix == nat[2] &
hqual == quals[2]
, lamb * 0.2
, lamb)
, lamb = ifelse(natmix == nat[2] &
hqual == quals[4]
, lamb * 1.1
, lamb)
, lamb = ifelse(finance == fin[2] &
hqual == quals[1]
, lamb * 0.3
, lamb)
, lamb = ifelse(finance == fin[2] &
hqual == quals[2]
, lamb * 0.1
, lamb)
, lamb = ifelse(finance == fin[2] &
hqual == quals[4]
, lamb * 1.2
, lamb)
, lamb = ifelse(natmix == nat[2] &
faculty == faculty_business[1]
, lamb * 0.7
, lamb)
, lamb = ifelse(natmix == nat[2] &
faculty == faculty_business[2]
, lamb * 0.75
, lamb)
, lamb = ifelse(natmix == nat[2] &
faculty == faculty_business[3]
, lamb * 1.3
, lamb)
, lamb = ifelse(finance == fin[2] &
hqual %in% quals[3:4] &
faculty %in% faculty_business[1:2]
, lamb * 1.3
, lamb)
, lamb = ifelse(finance == fin[1] &
hqual %in% quals[3:4] &
faculty %in% faculty_business[1:2]
, lamb * 1.1
, lamb)
, lamb = ifelse(finance == fin[2] &
hqual %in% quals[3:4] &
faculty %in% faculty_business[3]
, lamb * 1.4
, lamb)
, lamb = ifelse(natmix == nat[2] &
hqual %in% quals[2] &
faculty %in% faculty_business[4:5]
, lamb * 0.7
, lamb)
, lamb = ifelse(natmix == nat[1] &
hqual %in% quals[1:2] &
faculty %in% faculty_business[3:4]
, lamb * 1.4
, lamb)
)
students <- 1:nrow(pois_parms) # minimum 1
for (i in 1:nrow(pois_parms)) {
students <- c(students
, rep(i, sum(rpois(reps
, pois_parms$lamb[i]))))
}
Freq <- table(students)
students <- cbind(pois_parms, Freq)
students <- expand.dft(students)
n <- nrow(students)
students <- students %>%
mutate(year = factor(year)
, logit_abs_rate = -3.2 +
0.1 * (finance == fin[1]) +
-0.2 * (finance == fin[2]) +
0.1 * (natmix == nat[1]) +
-0.2 * (natmix == nat[2]) +
0.2 * (hqual == quals[1]) +
0.25 * (hqual == quals[2]) +
0.075 * (hqual == quals[4]) +
-0.05 * (gender == genders[1]) +
0.15 * (gender == genders[2]) +
0.15 * (faculty == faculty_business[3]) +
0.2 * (faculty == faculty_business[4]) +
0.05 * (faculty == faculty_business[5]) +
0.2 * (finance == fin[1] & natmix == nat[1]) +
0.1 * (hqual %in% quals[1:2] & gender == genders[2]) +
0.05 * (faculty == faculty_business[3] & gender == genders[2]) +
0.075 * (faculty == faculty_business[4] & gender == genders[2]) +
0.05 * (faculty == faculty_business[2] & gender == genders[2] & finance == fin[1]) +
-0.05 * (faculty == faculty_business[5] & gender == genders[1] & finance == fin[1]) +
rnorm(n, sd = 0.15) +
sample(c(TRUE, FALSE) # create some outliers
, size = n
, prob = expit_prob(-5.5) # 0.005, 0.995
, replace = TRUE) * runif(n, 0, 2.5) +
(sample(c(TRUE, FALSE) # create some outliers, based on gender (more females had caring at home)
, size = n
, prob = expit_prob(-4.5) # 0.01, 0.99
, replace = TRUE) & gender == genders[1]) * runif(n, 0, 1)
, abs_rate = expit(logit_abs_rate)
, logit_t1_success = 2 +
(logit_abs_rate + abs(min(logit_abs_rate))) * -0.2 +
0.05 * (gender == genders[1]) +
0.22 * (faculty == faculty_business[3]) +
0.15 * (faculty == faculty_business[5]) +
0.05 * (faculty == faculty_business[5] & gender == genders[1]) +
0.05 * (hqual == quals[3]) +
-0.05 * (hqual == quals[2]) +
0.05 * (hqual == quals[4]) +
0.1 * (finance == fin[1] & natmix == nat[2]) +
0.1 * (finance == fin[2] & natmix == nat[1]) +
rnorm(n, sd = 0.15) +
sample(c(TRUE, FALSE) # create some outliers
, size = n
, prob = c(0.01, 0.99)
, replace = TRUE) * runif(n, 0, 0.5)
, t1_success = random_binary_from_logits(logit_t1_success)
, logit_withdraw = -3.5 +
(logit_abs_rate + abs(min(logit_abs_rate))) * 0.25 +
0.3 * (t1_success == FALSE) +
0.15 * (natmix == nat[2]) +
0.2 * (gender == genders[2]) +
0.05 * (faculty == faculty_business[2]) +
0.1 * (faculty == faculty_business[3]) +
0.15 * (faculty == faculty_business[1]) +
0.05 * (faculty == faculty_business[5] & gender == genders[2]) +
0.05 * (hqual == quals[1]) +
0.1 * (hqual == quals[2]) +
0.1 * (finance == fin[1] & natmix == nat[2]) +
rnorm(n, sd = 0.15) +
sample(c(TRUE, FALSE) # create some outliers
, size = n
, prob = c(0.01, 0.99)
, replace = TRUE) * runif(n, 0, 2)
, withdraw = random_binary_from_logits(logit_withdraw)
, logit_defer = -3 +
(logit_abs_rate + abs(min(logit_abs_rate))) * 0.25 +
0.1 * (t1_success == FALSE) +
0.2 * (natmix == nat[2]) +
-0.15 * (gender == genders[1]) +
-0.1 * (faculty == faculty_business[2]) +
-0.15 * (faculty == faculty_business[1]) +
-0.1 * (faculty == faculty_business[5] & gender == genders[1]) +
-0.05 * (hqual == quals[1]) +
-0.075 * (hqual == quals[2]) +
-0.1 * (finance == fin[1] & natmix == nat[2]) +
-2 * (year == year_intake[1]) + # most of the earlier year defferals have passed through by now
rnorm(n, sd = 0.15) +
sample(c(TRUE, FALSE) # create some outliers
, size = n
, prob = expit_prob(-3.5) # 0.02, 0.98
, replace = TRUE) * runif(n, -1, 1) +
(sample(c(TRUE, FALSE) # create some outliers, based on gender (more females had caring at home)
, size = n
, prob = expit_prob(-3.5) # 0.02, 0.98
, replace = TRUE) & gender == genders[1]) * runif(n, 0, 1)
, defer = random_binary_from_logits(logit_defer)
, logit_outcome = expit(1.1) + # 65% baseline
(logit_abs_rate + abs(min(logit_abs_rate))) * -0.25 +
0.125 * (t1_success == TRUE) +
-0.125 * (t1_success == FALSE) +
0.1 * (natmix == nat[2]) +
-0.05 * (natmix == nat[1]) +
0.025 * (gender == genders[1]) +
-0.025 * (gender == genders[2]) +
0.15 * (faculty == faculty_business[4]) +
0.1 * (faculty == faculty_business[3]) +
-0.05 * (faculty == faculty_business[2]) +
-0.1 * (faculty == faculty_business[1]) +
0.05 * (faculty == faculty_business[5] & gender == genders[2]) +
-0.1 * (faculty == faculty_business[1] & gender == genders[2]) +
0.075 * (hqual == quals[3]) +
0.05 * (hqual == quals[4]) +
-0.05 * (hqual == quals[2]) +
0.1 * (finance == fin[2]) +
0.05 * (natmix == nat[2]) +
-0.1 * (finance == fin[1]) +
-0.1 * (natmix == nat[1]) +
0.05 * (finance == fin[2] & natmix == nat[2]) +
0.05 * ((faculty == faculty_business[4] | faculty == faculty_business[3]) & gender == genders[1]) +
-0.075 * (faculty == faculty_business[2] & hqual == quals[1]) +
-0.1 * (faculty == faculty_business[2] & hqual == quals[2]) +
-0.1 * (faculty == faculty_business[1] & hqual %in% quals[1:2]) +
0.15 * (faculty == faculty_business[1] & hqual == quals[3]) +
0.1 * (faculty == faculty_business[1] & hqual == quals[4]) +
0.025 * (faculty == faculty_business[5] & gender == genders[1] & finance == fin[1]) +
rnorm(n, sd = 0.15) +
sample(c(TRUE, FALSE) # create some outliers
, size = n
, prob = c(0.01, 0.99)
, replace = TRUE) * runif(n, -2, 2)
, outcome = factor(case_when(withdraw == TRUE ~ "wthdr"
# , defer == TRUE ~ "defer"
, expit(logit_outcome) > 0.70 ~ "dist"
, expit(logit_outcome) > 0.63 ~ "merit"
, expit(logit_outcome) > 0.50 ~ "pass"
, TRUE ~ "fail"
))
, grad = factor(ifelse(outcome %in% c("pass", "merit", "dist")
, TRUE, FALSE))
)
return(students)
}
students <- acad_data()
sts <- students[students$faculty %in% faculty_business[1:3], ]
sts$faculty <- factor(sts$faculty)
sts <- sts[, c("year", "faculty", "hqual"
, "natmix", "finance", "gender"
, "t1_success", "abs_rate"
, "outcome", "grad")]
|
insertCohortDefinitionInPackage(definitionId = 5021,
name = "Test",
baseUrl = Sys.getenv("baseUrl"))
# WebAPI functions -----------------------------------------------------------
getCohortDefinitionName(baseUrl = Sys.getenv("baseUrl"), definitionId = 5021)
getConceptSetName(baseUrl = Sys.getenv("baseUrl"), setId = 12)
getPriorityVocabKey(baseUrl = Sys.getenv("baseUrl"))
getConceptSetConceptIds(baseUrl = Sys.getenv("baseUrl"), setId = 12)
# R environment snapshot ------------------------------------------------------
# snapshot <- read.csv("c:/temp/rEnvironmentSnapshot.csv")
# restoreEnvironment(snapshot)
restoreEnvironmentFromPackageOnGithub("OHDSI/Legend")
createRenvLockFile(rootPackage = "Eumaeus",
includeRootPackage = FALSE,
additionalRequiredPackages = "keyring")
|
/extras/TestCode.R
|
permissive
|
anthonysena/OhdsiRTools
|
R
| false
| false
| 899
|
r
|
insertCohortDefinitionInPackage(definitionId = 5021,
name = "Test",
baseUrl = Sys.getenv("baseUrl"))
# WebAPI functions -----------------------------------------------------------
getCohortDefinitionName(baseUrl = Sys.getenv("baseUrl"), definitionId = 5021)
getConceptSetName(baseUrl = Sys.getenv("baseUrl"), setId = 12)
getPriorityVocabKey(baseUrl = Sys.getenv("baseUrl"))
getConceptSetConceptIds(baseUrl = Sys.getenv("baseUrl"), setId = 12)
# R environment snapshot ------------------------------------------------------
# snapshot <- read.csv("c:/temp/rEnvironmentSnapshot.csv")
# restoreEnvironment(snapshot)
restoreEnvironmentFromPackageOnGithub("OHDSI/Legend")
createRenvLockFile(rootPackage = "Eumaeus",
includeRootPackage = FALSE,
additionalRequiredPackages = "keyring")
|
PATH <- "D:/09_analytics_new_start/06_time_series_problem/"
setwd(PATH)
data_path <- paste(PATH,"data/Train_SU63ISt.csv",sep = "")
data <- read.csv(data_path,stringsAsFactors = FALSE)
head(data$Datetime)
library(lubridate)
data$Datetime <- dmy_hm(data$Datetime)
head(data)
class(data$Datetime)
library(xts)
data.xts <- xts(x = data$Count, order.by = data$Datetime)
head(data)
data.xts.monthly = apply.monthly(data.xts,sum)
head(data.xts.monthly)
#Split the data
# In Case of time series prolem traditional spit is not used we directly dicive it sequentially(date)
data.xts.end <- floor( 0.8 * length(data.xts.monthly))
data.xts.train <- data.xts.monthly[1:data.xts.end]
data.xts.test <- data.xts.monthly[ (data.xts.end + 1) : length(data.xts.monthly)]
#Many function work best wiht base R
#so convert xts to ts objects
data.xts.start <- c(year(start(data.xts.train)), month(start(data.xts.train)), day(start(data.xts.train)))
data.xts.end <- c(year(end(data.xts.train)), month(end(data.xts.train)), day(end(data.xts.train)))
data.ts.train <- ts(as.numeric(data.xts.train),start = data.xts.start, end = data.xts.end, frequency = 12)
data.xts.start <- c(year(start(data.xts.test)), month(start(data.xts.test)), day(start(data.xts.test)))
data.xts.end <- c(year(end(data.xts.test)), month(end(data.xts.test)), day(end(data.xts.test)))
data.ts.test <- ts(as.numeric(data.xts.test),start = data.xts.start, end = data.xts.end, frequency = 12)
#par(mar=c(1,1,1,1))
plot(data.ts.train)
abline(reg = lm(data.ts.train~time(data.ts.train)))
boxplot(data.ts.train~cycle(data.ts.train))
start(data.ts.train)
end(data.ts.train)
library(tseries)
#AR I MA (Auto Regression) (Moving Average) Integration
#p d q
acf(data.ts.train)
acf(diff(log(data.ts.train))) # Value of q = 2 - coefficient of ma
pacf(diff(log(data.ts.train))) # Value of p
plot(diff(log(data.ts.train)))
fit <- arima(log(data.ts.train),c(0,1,1),seasonal = list(order = c(0,1,1), period = 12))
pred <- predict(fit,n.ahead = 1*12)
predt <- 2.718^pred$pred
ts.plot(data.ts.train,2.718*pred$pred,log = "y",ity = c(1.3))
#testing our Model
start(data.ts.train)
end(data.ts.train)
datawide <-ts(data.ts.train, frequency = 12, start = c(2012,8), end = c(2014,3))
fit <- arima(log(datawide),c(0,1,1),seasonal = list(order = c(0,1,1), period = 12))
pred <- predict(fit,n.ahead = 1*12)
predt <-2.718^pred$pred
data1 <- head(predt,12)
predict_2014 <-round(data1,digits = 0)
original_2014 <- tail(data.ts.train,12)
predict_2014
original_2014
|
/03_time_series_problem/code.R
|
no_license
|
shubamsharma/Data-Analytics
|
R
| false
| false
| 2,523
|
r
|
PATH <- "D:/09_analytics_new_start/06_time_series_problem/"
setwd(PATH)
data_path <- paste(PATH,"data/Train_SU63ISt.csv",sep = "")
data <- read.csv(data_path,stringsAsFactors = FALSE)
head(data$Datetime)
library(lubridate)
data$Datetime <- dmy_hm(data$Datetime)
head(data)
class(data$Datetime)
library(xts)
data.xts <- xts(x = data$Count, order.by = data$Datetime)
head(data)
data.xts.monthly = apply.monthly(data.xts,sum)
head(data.xts.monthly)
#Split the data
# In Case of time series prolem traditional spit is not used we directly dicive it sequentially(date)
data.xts.end <- floor( 0.8 * length(data.xts.monthly))
data.xts.train <- data.xts.monthly[1:data.xts.end]
data.xts.test <- data.xts.monthly[ (data.xts.end + 1) : length(data.xts.monthly)]
#Many function work best wiht base R
#so convert xts to ts objects
data.xts.start <- c(year(start(data.xts.train)), month(start(data.xts.train)), day(start(data.xts.train)))
data.xts.end <- c(year(end(data.xts.train)), month(end(data.xts.train)), day(end(data.xts.train)))
data.ts.train <- ts(as.numeric(data.xts.train),start = data.xts.start, end = data.xts.end, frequency = 12)
data.xts.start <- c(year(start(data.xts.test)), month(start(data.xts.test)), day(start(data.xts.test)))
data.xts.end <- c(year(end(data.xts.test)), month(end(data.xts.test)), day(end(data.xts.test)))
data.ts.test <- ts(as.numeric(data.xts.test),start = data.xts.start, end = data.xts.end, frequency = 12)
#par(mar=c(1,1,1,1))
plot(data.ts.train)
abline(reg = lm(data.ts.train~time(data.ts.train)))
boxplot(data.ts.train~cycle(data.ts.train))
start(data.ts.train)
end(data.ts.train)
library(tseries)
#AR I MA (Auto Regression) (Moving Average) Integration
#p d q
acf(data.ts.train)
acf(diff(log(data.ts.train))) # Value of q = 2 - coefficient of ma
pacf(diff(log(data.ts.train))) # Value of p
plot(diff(log(data.ts.train)))
fit <- arima(log(data.ts.train),c(0,1,1),seasonal = list(order = c(0,1,1), period = 12))
pred <- predict(fit,n.ahead = 1*12)
predt <- 2.718^pred$pred
ts.plot(data.ts.train,2.718*pred$pred,log = "y",ity = c(1.3))
#testing our Model
start(data.ts.train)
end(data.ts.train)
datawide <-ts(data.ts.train, frequency = 12, start = c(2012,8), end = c(2014,3))
fit <- arima(log(datawide),c(0,1,1),seasonal = list(order = c(0,1,1), period = 12))
pred <- predict(fit,n.ahead = 1*12)
predt <-2.718^pred$pred
data1 <- head(predt,12)
predict_2014 <-round(data1,digits = 0)
original_2014 <- tail(data.ts.train,12)
predict_2014
original_2014
|
setwd("C:/MyGitRepos/cherry-blossom-run/Data")
# els <- readLines("MenTxt/2012.txt")
# eqIndex <- grep("^===", els)
# spacerRow <- els[eqIndex]
# headerRow <- els[eqIndex - 1]
# body <- els[-(1:eqIndex)]
#
# headerRow <- tolower(headerRow)
# ageStart <- regexpr("ag", headerRow)
# age <- substr(body, start = ageStart, stop = ageStart + 1)
# blankLocs <- gregexpr(" ", spacerRow)
# searchLocs <- c(0, blankLocs[[1]])
# Values <- mapply(substr, list(body), start = searchLocs[-length(searchLocs) + 1], stop = searchLocs[-1] - 1)
findColLocs <- function(spacerRow) {
spaceLocs <- gregexpr(" ", spacerRow)[[1]]
rowLength <- nchar(spacerRow)
if (substring(spacerRow, rowLength, rowLength) != " ") {
return (c(0, spaceLocs, rowLength + 1))
} else {
return (c(0, spaceLocs))
}
}
selectCols <- function(colNames, headerRow, searchLocs) {
sapply(colNames,
function(name, headerRow, searchLocs) {
startPos <- regexpr(name, headerRow)[[1]]
if (startPos == -1) {
return(c(NA, NA))
}
index <- sum(startPos >= searchLocs)
c(searchLocs[index] + 1, searchLocs[index + 1])
},
headerRow = headerRow, searchLocs = searchLocs)
}
# searchLocs <- findColLocs(spacerRow)
# ageLoc <- selectCols("ag", headerRow, searchLocs)
# ages <- mapply(substr, list(body), start = ageLoc[1,], stop = ageLoc[2,])
#
# shortColNames <- c("name", "home", "ag", "gun", "net", "time")
#
# locCols <- selectCols(shortColNames, headerRow, searchLocs)
# Values <- mapply(substr, list(body), start = locCols[1,], stop = locCols[2,])
# class(Values)
# colnames(Values) <- shortColNames
extractVariables <- function(file, varNames = c("name", "home", "ag", "gun", "net", "time"),
sex, year) {
#Find the index of the footer row
footIndex <- grep("^[[:blank:]]*[#|*]", file)
#Find the index of rows that are completely blank
blankIndex <- grep("^[[:blank:]]*$", file)
if(sex == "W" & year == 2001){
#women's file for 2001 does not contain spacer or header rows
body <- file[-c(footIndex, blankIndex)]
locCols<-matrix(c(13, 34, 38, 56, 35, 37, 65, 72, 57, 64, NA, NA), nrow = 2)
colnames(locCols) <- varNames
} else {
#Find the index of the row with equal signs
eqIndex <- grep("^===", file)
#Extract the two key rows and the data (fix men 2006 spacer row)
spacerRow <- file[eqIndex]
headerRow <- tolower(file[eqIndex - 1])
if (year == 2006){
locNetTime <- regexpr("net", headerRow)
spacerRow <- paste(substr(spacerRow, 1, locNetTime - 2),
substr(spacerRow, locNetTime, nchar(spacerRow)), "")
}
body <- file[-c(1:eqIndex, footIndex, blankIndex)]
#Obtain the starting and ending positions of variables
searchLocs <- findColLocs(spacerRow)
locCols <- selectCols(varNames, headerRow, searchLocs)
}
Values <- mapply(substr, list(body), start = locCols[1,], stop = locCols[2,])
colnames(Values) <- varNames
invisible(Values)
}
mfilenames <- paste("MenTxt/", 1999:2012, ".txt", sep="")
menFiles <- lapply(mfilenames, readLines)
names(menFiles) <- 1999:2012
wfilenames <- paste("WomenTxt/", 1999:2012, ".txt", sep="")
womenFiles <- lapply(wfilenames, readLines)
names(womenFiles) <- 1999:2012
#menResMat <- lapply(menFiles, extractVariables)
#womenResMat <- lapply(womenFiles, extractVariables)
menResMat <- mapply(extractVariables, menFiles, sex = "M", year = 1999:2012)
womenResMat <- mapply(extractVariables, womenFiles, sex = "W", year = 1999:2012)
sapply(menResMat, nrow)
sapply(womenResMat, nrow)
##Check ages
#Men
age <- as.numeric(menResMat$`2012`[, "ag"])
age <- sapply(menResMat, function(x) as.numeric(x[ , "ag"]))
boxplot(age, ylab = "Age", xlab = "Year")
sapply(age, function(x) sum(is.na(x)))
age2001 <- age[["2001"]]
grep("^===", menFiles[["2001"]])
badAgeIndex <- which(is.na(age2001)) + 5
menFiles[["2001"]][badAgeIndex]
blanks <- grep("^[[:blank:]]*$", menFiles[["2001"]])
which(age2001 < 5)
menFiles[["2001"]][which(age2001 < 5) + 5]
charTime <- menResMat[["2012"]][, "time"]
timePieces <- strsplit(charTime, ":")
timePieces[[1]]
tail(timePieces, 1)
timePieces <- sapply(timePieces, as.numeric)
runTime <- sapply(timePieces,
function(x){
if (length(x) == 2) x[1] + x[2]/60
else 60*x[1] + x[2] + x[3]/60
})
summary(runTime)
#Women
age <- as.numeric(womenResMat$`2012`[, "ag"])
age <- sapply(womenResMat, function(x) as.numeric(x[ , "ag"]))
boxplot(age, ylab = "Age", xlab = "Year")
sapply(age, function(x) sum(is.na(x)))
age1999 <- age$`1999`
badAgeIndex <- which(is.na(age1999)) + grep("^===", womenFiles[["1999"]])
womenFiles$`1999`[badAgeIndex]
age2002 <- age$`2002`
badAgeIndex <- which(is.na(age2002)) + grep("^===", womenFiles[["2002"]])
womenFiles$`2002`[badAgeIndex]
age2005 <- age$`2005`
badAgeIndex <- which(is.na(age2005)) + grep("^===", womenFiles[["2005"]])
womenFiles$`2005`[badAgeIndex]
age2001 <- age$`2001`
min(age2001)
zeroAgeIndex <- which(age2001 == 0)
womenFiles$`2001`[zeroAgeIndex + 3]
age2009 <- age$`2009`
min(age2009, na.rm = T)
ageSevenIndex <- which(age2009 == 7) + grep("^===", womenFiles[["2009"]])
womenFiles$`2009`[ageSevenIndex]
convertTime <- function(charTime){
#takes time in h:mm:ss format and converts it to minutes
#if time is invalid, it forces it to NA
timePieces <- strsplit(charTime, ":")
timePieces <- sapply(timePieces, as.numeric)
#Fix to account for times that are of incorrect format, e.g. "1:30:"
nbrColons <- lapply(charTime,
function(x) {
length(gregexpr(":", x)[[1]])
})
runTime <- mapply(function(x, y, z){
nbrTimePieces <- length(x)
if (nbrTimePieces <= y) {
return(NA)}
else if (nbrTimePieces == 2) {
return(x[1] + x[2]/60)}
else {
return(60*x[1] + x[2] + x[3]/60)}
},
timePieces,
nbrColons,
charTime)
}
createDF <- function(Res, year, sex){
#Determine which time to use
useTime <- if(!is.na(Res[1, "net"])) {
Res[, "net"]
} else if(!is.na(Res[1, "gun"])) {
Res[, "gun"]
} else {
Res[, "time"]}
#Remove # and * and blanks from time
useTime <- gsub("[#\\*[:blank:]]", "", useTime)
#Drop rows with no time
Res <- Res[useTime != "", ]
runTime <- convertTime(useTime[useTime != ""])
#convertTime returns NA for invalid run times; drop these records and print
#message about record(s) dropped
if(sum(is.na(runTime)) > 0){
print(paste("Dropping the following records in year", year, "for",
ifelse(sex == "M", "Men", "Women"),
"due to invalid times", sep = " "))
print(Res[is.na(runTime), ])
}
Results <- data.frame(year = rep(year, nrow(Res)),
sex = rep(sex, nrow(Res)),
name = Res[ , "name"],
home = Res[ , "home"],
age = as.numeric(Res[ , "ag"]),
runTime = runTime,
stringsAsFactors = F)
invisible(Results)
}
menDF <- mapply(createDF, menResMat, year = 1999:2012, sex = "M", SIMPLIFY = F)
womenDF <- mapply(createDF, womenResMat, year = 1999:2012, sex = "W", SIMPLIFY = F)
#check NA values for runTime
sapply(menDF, function(x) sum(is.na(x$runTime)))
sapply(womenDF, function(x) sum(is.na(x$runTime)))
#Check for why there are so many NA's in women's 2006 file for runtime
#It's because hometown spacer row is not separated from net time, just like in men's 2006 file
#We can fix this in the extractVariables function
fileWomen2006 <- womenFiles$`2006`
head(fileWomen2006, 30)
cbMen <- do.call(rbind, menDF)
save(cbMen, file = "cbMen.rda")
cbWomen <- do.call(rbind, womenDF)
save(cbWomen, file = "cbWomen.rda")
|
/readTxt.R
|
no_license
|
Tubbz-alt/cherry-blossom-run
|
R
| false
| false
| 8,460
|
r
|
setwd("C:/MyGitRepos/cherry-blossom-run/Data")
# els <- readLines("MenTxt/2012.txt")
# eqIndex <- grep("^===", els)
# spacerRow <- els[eqIndex]
# headerRow <- els[eqIndex - 1]
# body <- els[-(1:eqIndex)]
#
# headerRow <- tolower(headerRow)
# ageStart <- regexpr("ag", headerRow)
# age <- substr(body, start = ageStart, stop = ageStart + 1)
# blankLocs <- gregexpr(" ", spacerRow)
# searchLocs <- c(0, blankLocs[[1]])
# Values <- mapply(substr, list(body), start = searchLocs[-length(searchLocs) + 1], stop = searchLocs[-1] - 1)
findColLocs <- function(spacerRow) {
spaceLocs <- gregexpr(" ", spacerRow)[[1]]
rowLength <- nchar(spacerRow)
if (substring(spacerRow, rowLength, rowLength) != " ") {
return (c(0, spaceLocs, rowLength + 1))
} else {
return (c(0, spaceLocs))
}
}
selectCols <- function(colNames, headerRow, searchLocs) {
sapply(colNames,
function(name, headerRow, searchLocs) {
startPos <- regexpr(name, headerRow)[[1]]
if (startPos == -1) {
return(c(NA, NA))
}
index <- sum(startPos >= searchLocs)
c(searchLocs[index] + 1, searchLocs[index + 1])
},
headerRow = headerRow, searchLocs = searchLocs)
}
# searchLocs <- findColLocs(spacerRow)
# ageLoc <- selectCols("ag", headerRow, searchLocs)
# ages <- mapply(substr, list(body), start = ageLoc[1,], stop = ageLoc[2,])
#
# shortColNames <- c("name", "home", "ag", "gun", "net", "time")
#
# locCols <- selectCols(shortColNames, headerRow, searchLocs)
# Values <- mapply(substr, list(body), start = locCols[1,], stop = locCols[2,])
# class(Values)
# colnames(Values) <- shortColNames
extractVariables <- function(file, varNames = c("name", "home", "ag", "gun", "net", "time"),
sex, year) {
#Find the index of the footer row
footIndex <- grep("^[[:blank:]]*[#|*]", file)
#Find the index of rows that are completely blank
blankIndex <- grep("^[[:blank:]]*$", file)
if(sex == "W" & year == 2001){
#women's file for 2001 does not contain spacer or header rows
body <- file[-c(footIndex, blankIndex)]
locCols<-matrix(c(13, 34, 38, 56, 35, 37, 65, 72, 57, 64, NA, NA), nrow = 2)
colnames(locCols) <- varNames
} else {
#Find the index of the row with equal signs
eqIndex <- grep("^===", file)
#Extract the two key rows and the data (fix men 2006 spacer row)
spacerRow <- file[eqIndex]
headerRow <- tolower(file[eqIndex - 1])
if (year == 2006){
locNetTime <- regexpr("net", headerRow)
spacerRow <- paste(substr(spacerRow, 1, locNetTime - 2),
substr(spacerRow, locNetTime, nchar(spacerRow)), "")
}
body <- file[-c(1:eqIndex, footIndex, blankIndex)]
#Obtain the starting and ending positions of variables
searchLocs <- findColLocs(spacerRow)
locCols <- selectCols(varNames, headerRow, searchLocs)
}
Values <- mapply(substr, list(body), start = locCols[1,], stop = locCols[2,])
colnames(Values) <- varNames
invisible(Values)
}
mfilenames <- paste("MenTxt/", 1999:2012, ".txt", sep="")
menFiles <- lapply(mfilenames, readLines)
names(menFiles) <- 1999:2012
wfilenames <- paste("WomenTxt/", 1999:2012, ".txt", sep="")
womenFiles <- lapply(wfilenames, readLines)
names(womenFiles) <- 1999:2012
#menResMat <- lapply(menFiles, extractVariables)
#womenResMat <- lapply(womenFiles, extractVariables)
menResMat <- mapply(extractVariables, menFiles, sex = "M", year = 1999:2012)
womenResMat <- mapply(extractVariables, womenFiles, sex = "W", year = 1999:2012)
sapply(menResMat, nrow)
sapply(womenResMat, nrow)
##Check ages
#Men
age <- as.numeric(menResMat$`2012`[, "ag"])
age <- sapply(menResMat, function(x) as.numeric(x[ , "ag"]))
boxplot(age, ylab = "Age", xlab = "Year")
sapply(age, function(x) sum(is.na(x)))
age2001 <- age[["2001"]]
grep("^===", menFiles[["2001"]])
badAgeIndex <- which(is.na(age2001)) + 5
menFiles[["2001"]][badAgeIndex]
blanks <- grep("^[[:blank:]]*$", menFiles[["2001"]])
which(age2001 < 5)
menFiles[["2001"]][which(age2001 < 5) + 5]
charTime <- menResMat[["2012"]][, "time"]
timePieces <- strsplit(charTime, ":")
timePieces[[1]]
tail(timePieces, 1)
timePieces <- sapply(timePieces, as.numeric)
runTime <- sapply(timePieces,
function(x){
if (length(x) == 2) x[1] + x[2]/60
else 60*x[1] + x[2] + x[3]/60
})
summary(runTime)
#Women
age <- as.numeric(womenResMat$`2012`[, "ag"])
age <- sapply(womenResMat, function(x) as.numeric(x[ , "ag"]))
boxplot(age, ylab = "Age", xlab = "Year")
sapply(age, function(x) sum(is.na(x)))
age1999 <- age$`1999`
badAgeIndex <- which(is.na(age1999)) + grep("^===", womenFiles[["1999"]])
womenFiles$`1999`[badAgeIndex]
age2002 <- age$`2002`
badAgeIndex <- which(is.na(age2002)) + grep("^===", womenFiles[["2002"]])
womenFiles$`2002`[badAgeIndex]
age2005 <- age$`2005`
badAgeIndex <- which(is.na(age2005)) + grep("^===", womenFiles[["2005"]])
womenFiles$`2005`[badAgeIndex]
age2001 <- age$`2001`
min(age2001)
zeroAgeIndex <- which(age2001 == 0)
womenFiles$`2001`[zeroAgeIndex + 3]
age2009 <- age$`2009`
min(age2009, na.rm = T)
ageSevenIndex <- which(age2009 == 7) + grep("^===", womenFiles[["2009"]])
womenFiles$`2009`[ageSevenIndex]
convertTime <- function(charTime){
#takes time in h:mm:ss format and converts it to minutes
#if time is invalid, it forces it to NA
timePieces <- strsplit(charTime, ":")
timePieces <- sapply(timePieces, as.numeric)
#Fix to account for times that are of incorrect format, e.g. "1:30:"
nbrColons <- lapply(charTime,
function(x) {
length(gregexpr(":", x)[[1]])
})
runTime <- mapply(function(x, y, z){
nbrTimePieces <- length(x)
if (nbrTimePieces <= y) {
return(NA)}
else if (nbrTimePieces == 2) {
return(x[1] + x[2]/60)}
else {
return(60*x[1] + x[2] + x[3]/60)}
},
timePieces,
nbrColons,
charTime)
}
createDF <- function(Res, year, sex){
#Determine which time to use
useTime <- if(!is.na(Res[1, "net"])) {
Res[, "net"]
} else if(!is.na(Res[1, "gun"])) {
Res[, "gun"]
} else {
Res[, "time"]}
#Remove # and * and blanks from time
useTime <- gsub("[#\\*[:blank:]]", "", useTime)
#Drop rows with no time
Res <- Res[useTime != "", ]
runTime <- convertTime(useTime[useTime != ""])
#convertTime returns NA for invalid run times; drop these records and print
#message about record(s) dropped
if(sum(is.na(runTime)) > 0){
print(paste("Dropping the following records in year", year, "for",
ifelse(sex == "M", "Men", "Women"),
"due to invalid times", sep = " "))
print(Res[is.na(runTime), ])
}
Results <- data.frame(year = rep(year, nrow(Res)),
sex = rep(sex, nrow(Res)),
name = Res[ , "name"],
home = Res[ , "home"],
age = as.numeric(Res[ , "ag"]),
runTime = runTime,
stringsAsFactors = F)
invisible(Results)
}
menDF <- mapply(createDF, menResMat, year = 1999:2012, sex = "M", SIMPLIFY = F)
womenDF <- mapply(createDF, womenResMat, year = 1999:2012, sex = "W", SIMPLIFY = F)
#check NA values for runTime
sapply(menDF, function(x) sum(is.na(x$runTime)))
sapply(womenDF, function(x) sum(is.na(x$runTime)))
#Check for why there are so many NA's in women's 2006 file for runtime
#It's because hometown spacer row is not separated from net time, just like in men's 2006 file
#We can fix this in the extractVariables function
fileWomen2006 <- womenFiles$`2006`
head(fileWomen2006, 30)
cbMen <- do.call(rbind, menDF)
save(cbMen, file = "cbMen.rda")
cbWomen <- do.call(rbind, womenDF)
save(cbWomen, file = "cbWomen.rda")
|
install.packages("Metrics")
library(Metrics)
Amtrak<-read.csv('D:\\Data science classes\\Assignment R\\Amtrak.csv') # read the Amtrack data
View(Amtrak) # Seasonality 12 months
plot(Amtrak$Ridership,type="l")
# So creating 11 dummy variables
X<- data.frame(outer(rep(month.abb,length = 120), month.abb,"==") + 0 )# Creating dummies for 12 months
colnames(X)<-month.abb # Assigning month names
View(X)
trakdata<-cbind(Amtrak,X)
View(trakdata)
trakdata["t"]<- 1:120
View(trakdata)
#trakdata["log_rider"]<-log(trakdata["Ridership"])
trakdata["t_square"]<-trakdata["t"]*trakdata["t"]
#attach(trakdata)
##Data Partition
train<-trakdata[1:108,]
test<-trakdata[109:120,]
######################### Exponential #################################
expo_model<-lm(log_rider~t,data=train)
expo_pred<-data.frame(predict(expo_model,interval='predict',newdata=test))
rmse_expo<-rmse(test$Ridership,exp(expo_pred$fit))
rmse_expo
######################### Quadratic ####################################
Quad_model<-lm(Ridership~t+t_square,data=train)
Quad_pred<-data.frame(predict(Quad_model,interval='predict',newdata=test))
rmse_Quad<-rmse(test$Ridership,Quad_pred$fit)
rmse_Quad
######################### Additive Seasonality #########################
sea_add_model<-lm(Ridership~Jan+Feb+Mar+Apr+May+Jun+Jul+Aug+Sep+Oct+Nov,data=train)
sea_add_pred<-data.frame(predict(sea_add_model,newdata=test,interval='predict'))
rmse_sea_add<-rmse(test$Ridership,sea_add_pred$fit)
rmse_sea_add
######################## Additive Seasonality with Linear #################
Add_sea_Linear_model<-lm(Ridership~t+Jan+Feb+Mar+Apr+May+Jun+Jul+Aug+Sep+Oct+Nov,data=train)
Add_sea_Linear_pred<-data.frame(predict(Add_sea_Linear_model,interval='predict',newdata=test))
rmse_Add_sea_Linear<-rmse(test$Ridership,Add_sea_Linear_pred$fit)
rmse_Add_sea_Linear
######################## Additive Seasonality with Quadratic #################
Add_sea_Quad_model<-lm(Ridership~t+t_square+Jan+Feb+Mar+Apr+May+Jun+Jul+Aug+Sep+Oct+Nov,data=train)
Add_sea_Quad_pred<-data.frame(predict(Add_sea_Quad_model,interval='predict',newdata=test))
rmse_Add_sea_Quad<-rmse(test$Ridership,Add_sea_Quad_pred$fit)
rmse_Add_sea_Quad
######################## Multiplicative Seasonality #########################
multi_sea_model<-lm(log_rider~Jan+Feb+Mar+Apr+May+Jun+Jul+Aug+Sep+Oct+Nov,data = train)
multi_sea_pred<-data.frame(predict(multi_sea_model,newdata=test,interval='predict'))
rmse_multi_sea<-rmse(test$Ridership,exp(multi_sea_pred$fit))
######################## Multiplicative Seasonality Linear trend ##########################multi_add_sea_model<-lm(log_rider~t+Jan+Feb+Mar+Apr+May+Jun+Jul+Aug+Sep+Oct+Nov,data = train)multi_add_sea_pred<-data.frame(predict(multi_add_sea_model,newdata=test,interval='predict'))rmse_multi_add_sea<-rmse(test$Ridership,exp(multi_add_sea_pred$fit))rmse_multi_add_sea
# Preparing table on model and it's RMSE values
table_rmse<-data.frame('Model'=c("rmse_linear","rmse_expo","rmse_Quad","rmse_sea_add","rmse_Add_sea_Quad","rmse_multi_sea","rmse_multi_add_sea"),'RMSE'=c(rmse_linear,rmse_expo,rmse_Quad,rmse_sea_add,rmse_Add_sea_Quad,rmse_multi_sea,rmse_multi_add_sea))
colnames(table_rmse)<-c("model","RMSE")
View(table_rmse)
# Use entire data : Additive seasonality with Quadratic has least RMSE value
new_model <- lm(Ridership~t+t_square+Jan+Feb+Mar+Apr+May+Jun+Jul+Aug+Sep+Oct+Nov,data=trakdata)
# Getting residuals
resid <- residuals(new_model)
acf(resid,lag.max = 10)
# By principal of parcimony we will consider lag - 1 as we have so
# many significant lags
# Building Autoregressive model on residuals consider lag-1
k <- arima(resid, order=c(1,0,0))
pred_res<- predict(arima(resid,order=c(1,0,0)),n.ahead = 12)
str(pred_res)
pred_res$pred
acf(k$residuals)
#write.csv(trakdata,file="trakdata.csv",col.names = F,row.names = F)
####################### Predicting new data #############################
library(readxl)
test_data<-read_excel(file.choose(),1) #Load Predict_new.xlsx
View(test_data)
#test_data<-Predict_new
pred_new<-data.frame(predict(new_model,newdata=test_data,interval = 'predict'))
View(pred_new)
#pred_re<-pred_res$pred[1:12]
pred_new$fit <- pred_new$fit+pred_res$pred
View(pred_new)
########################### LINEAR MODEL #############################
linear_model<-lm(Ridership~t,data=train)
linear_pred<-data.frame(predict(linear_model,interval='predict',newdata =test))
rmse_linear<-rmse(test$Ridership,linear_pred$fit)
rmse_linear
|
/Forecasting.R
|
no_license
|
Amit1608/Datascience-Rcodes
|
R
| false
| false
| 4,606
|
r
|
install.packages("Metrics")
library(Metrics)
Amtrak<-read.csv('D:\\Data science classes\\Assignment R\\Amtrak.csv') # read the Amtrack data
View(Amtrak) # Seasonality 12 months
plot(Amtrak$Ridership,type="l")
# So creating 11 dummy variables
X<- data.frame(outer(rep(month.abb,length = 120), month.abb,"==") + 0 )# Creating dummies for 12 months
colnames(X)<-month.abb # Assigning month names
View(X)
trakdata<-cbind(Amtrak,X)
View(trakdata)
trakdata["t"]<- 1:120
View(trakdata)
#trakdata["log_rider"]<-log(trakdata["Ridership"])
trakdata["t_square"]<-trakdata["t"]*trakdata["t"]
#attach(trakdata)
##Data Partition
train<-trakdata[1:108,]
test<-trakdata[109:120,]
######################### Exponential #################################
expo_model<-lm(log_rider~t,data=train)
expo_pred<-data.frame(predict(expo_model,interval='predict',newdata=test))
rmse_expo<-rmse(test$Ridership,exp(expo_pred$fit))
rmse_expo
######################### Quadratic ####################################
Quad_model<-lm(Ridership~t+t_square,data=train)
Quad_pred<-data.frame(predict(Quad_model,interval='predict',newdata=test))
rmse_Quad<-rmse(test$Ridership,Quad_pred$fit)
rmse_Quad
######################### Additive Seasonality #########################
sea_add_model<-lm(Ridership~Jan+Feb+Mar+Apr+May+Jun+Jul+Aug+Sep+Oct+Nov,data=train)
sea_add_pred<-data.frame(predict(sea_add_model,newdata=test,interval='predict'))
rmse_sea_add<-rmse(test$Ridership,sea_add_pred$fit)
rmse_sea_add
######################## Additive Seasonality with Linear #################
Add_sea_Linear_model<-lm(Ridership~t+Jan+Feb+Mar+Apr+May+Jun+Jul+Aug+Sep+Oct+Nov,data=train)
Add_sea_Linear_pred<-data.frame(predict(Add_sea_Linear_model,interval='predict',newdata=test))
rmse_Add_sea_Linear<-rmse(test$Ridership,Add_sea_Linear_pred$fit)
rmse_Add_sea_Linear
######################## Additive Seasonality with Quadratic #################
Add_sea_Quad_model<-lm(Ridership~t+t_square+Jan+Feb+Mar+Apr+May+Jun+Jul+Aug+Sep+Oct+Nov,data=train)
Add_sea_Quad_pred<-data.frame(predict(Add_sea_Quad_model,interval='predict',newdata=test))
rmse_Add_sea_Quad<-rmse(test$Ridership,Add_sea_Quad_pred$fit)
rmse_Add_sea_Quad
######################## Multiplicative Seasonality #########################
multi_sea_model<-lm(log_rider~Jan+Feb+Mar+Apr+May+Jun+Jul+Aug+Sep+Oct+Nov,data = train)
multi_sea_pred<-data.frame(predict(multi_sea_model,newdata=test,interval='predict'))
rmse_multi_sea<-rmse(test$Ridership,exp(multi_sea_pred$fit))
######################## Multiplicative Seasonality Linear trend ##########################multi_add_sea_model<-lm(log_rider~t+Jan+Feb+Mar+Apr+May+Jun+Jul+Aug+Sep+Oct+Nov,data = train)multi_add_sea_pred<-data.frame(predict(multi_add_sea_model,newdata=test,interval='predict'))rmse_multi_add_sea<-rmse(test$Ridership,exp(multi_add_sea_pred$fit))rmse_multi_add_sea
# Preparing table on model and it's RMSE values
table_rmse<-data.frame('Model'=c("rmse_linear","rmse_expo","rmse_Quad","rmse_sea_add","rmse_Add_sea_Quad","rmse_multi_sea","rmse_multi_add_sea"),'RMSE'=c(rmse_linear,rmse_expo,rmse_Quad,rmse_sea_add,rmse_Add_sea_Quad,rmse_multi_sea,rmse_multi_add_sea))
colnames(table_rmse)<-c("model","RMSE")
View(table_rmse)
# Use entire data : Additive seasonality with Quadratic has least RMSE value
new_model <- lm(Ridership~t+t_square+Jan+Feb+Mar+Apr+May+Jun+Jul+Aug+Sep+Oct+Nov,data=trakdata)
# Getting residuals
resid <- residuals(new_model)
acf(resid,lag.max = 10)
# By principal of parcimony we will consider lag - 1 as we have so
# many significant lags
# Building Autoregressive model on residuals consider lag-1
k <- arima(resid, order=c(1,0,0))
pred_res<- predict(arima(resid,order=c(1,0,0)),n.ahead = 12)
str(pred_res)
pred_res$pred
acf(k$residuals)
#write.csv(trakdata,file="trakdata.csv",col.names = F,row.names = F)
####################### Predicting new data #############################
library(readxl)
test_data<-read_excel(file.choose(),1) #Load Predict_new.xlsx
View(test_data)
#test_data<-Predict_new
pred_new<-data.frame(predict(new_model,newdata=test_data,interval = 'predict'))
View(pred_new)
#pred_re<-pred_res$pred[1:12]
pred_new$fit <- pred_new$fit+pred_res$pred
View(pred_new)
########################### LINEAR MODEL #############################
linear_model<-lm(Ridership~t,data=train)
linear_pred<-data.frame(predict(linear_model,interval='predict',newdata =test))
rmse_linear<-rmse(test$Ridership,linear_pred$fit)
rmse_linear
|
#############################################
## ConvertSupport
ConvertSupport <- function (fromGrid, toGrid, mu = NULL, Cov = NULL, phi = NULL)
{
# Input:
# - fromGrid: which grid should be started at?
# - toGrid:
buff <- .Machine$double.eps * max(abs(fromGrid)) * 3
if (abs(toGrid[1] - fromGrid[1]) < buff)
toGrid[1] <- fromGrid[1]
if (abs(toGrid[length(toGrid)] - fromGrid[length(fromGrid)]) <
buff)
toGrid[length(toGrid)] <- fromGrid[length(fromGrid)]
if (!is.null(mu)) {
return(mapX1d(fromGrid, mu, toGrid))
}
else if (!is.null(Cov)) {
gd <- pracma::meshgrid(toGrid)
ret <- matrix(interp2lin(fromGrid, fromGrid, Cov, gd$X,
gd$Y), nrow = length(toGrid))
ret <- 0.5 * (ret + t(ret))
return(ret)
}
else if (!is.null(phi)) {
return(mapX1d(fromGrid, phi, toGrid))
}
}
|
/R_Functions/convertSupport.r
|
no_license
|
stefanrameseder/BiddingCurves
|
R
| false
| false
| 924
|
r
|
#############################################
## ConvertSupport
ConvertSupport <- function (fromGrid, toGrid, mu = NULL, Cov = NULL, phi = NULL)
{
# Input:
# - fromGrid: which grid should be started at?
# - toGrid:
buff <- .Machine$double.eps * max(abs(fromGrid)) * 3
if (abs(toGrid[1] - fromGrid[1]) < buff)
toGrid[1] <- fromGrid[1]
if (abs(toGrid[length(toGrid)] - fromGrid[length(fromGrid)]) <
buff)
toGrid[length(toGrid)] <- fromGrid[length(fromGrid)]
if (!is.null(mu)) {
return(mapX1d(fromGrid, mu, toGrid))
}
else if (!is.null(Cov)) {
gd <- pracma::meshgrid(toGrid)
ret <- matrix(interp2lin(fromGrid, fromGrid, Cov, gd$X,
gd$Y), nrow = length(toGrid))
ret <- 0.5 * (ret + t(ret))
return(ret)
}
else if (!is.null(phi)) {
return(mapX1d(fromGrid, phi, toGrid))
}
}
|
##' Adaptive permutation test one-sample problems
##'
##' @title One-sample adaptive permutation test
##' @template onesample_sims
##' @param combination_function Function to combine stage-wise (permutation) p-values
##' @param perms Maximum number of permutations to use when computing permutation p-values and conditional error rates
##' @author Florian Klinglmueller
##' @export
adaptive_permtest_os <- function(x,n1,n,ne,test_statistic,perms=50000,alpha=0.025){
if(ne>n){
xs <- split(x,rep(1:3,c(n1,n-n1,ne-n)))
gs <- split(sign(x)>0,rep(1:3,c(n1,n-n1,ne-n)))
} else {
xs <- split(x,rep(1:2,c(n1,ne-n1)))
gs <- split(sign(x)>0,rep(1:2,c(n1,ne-n1)))
gs[[3]] <- xs[[3]] <- numeric(0)
}
A <- permutation_CER(xs[[1]],gs[[1]],xs[[2]],test_statistic,one_sample=TRUE,restricted=FALSE,B=perms,alpha=alpha)
q <- perm_test(xs[[2]],xs[[3]],gs[[2]],gs[[3]],test_statistic,restricted=FALSE,B=perms)
A>=q
}
##' Adaptive t-test as described in Timmesfeld et al. (2007)
##'
##' Warning the current implementation seems to be numerically instable
##' @title Adaptive t-test
##' @param mpfr Whether to use high precision numbers from \code{Rmpfr}
##' @template onesample_sims
##' @author Florian Klinglmueller
##' @export
adaptive_ttest_os <- function(x,n1,n,ne,alpha=0.025,mpfr=FALSE) {
if(n == ne){
return(0.025 >= t.test(x,alternative='greater')$p.value)
}
xs <- split(x,rep(1:2,c(n1,ne-n1)))
V1 <- sum(xs[[1]])
U <- sum(xs[[1]]^2)
tU <- sum(xs[[2]]^2)
A <- clev(tU,U,V1,ne-n1,n,n1,alpha=alpha,mpfr=mpfr)
A >= t.test(xs[[2]],alternative='greater')$p.value
}
##' Adaptive combination test of stage-wise t-tests using the inverse normal combination function.
##'
##' @title Inverse normal adaptive t-test
##' @template onesample_sims
##' @author Florian Klinglmueller
##' @export
adaptive_invnormtest_os <- function(x,n1,n,ne,alpha=0.025){
xs <- split(x,rep(1:2,c(n1,ne-n1)))
p1 <- t.test(xs[[1]],alternative='greater')$p.value
p2 <- t.test(xs[[2]],alternative='greater')$p.value
alpha >= {sqrt(c(n1,n-n1)/n) * qnorm(c(p1,p2),lower=F)} %>% sum() %>% pnorm(lower=FALSE)
}
##' Non-parametric combination of stage-wise test statistics. Combines stage-wise permutation p-values using some combination function; performs the test using the joint conditional permutation distribution of stage wise permutation p-values.
##'
##' @title adptive NPC test
##' @template onesample_sims
##' @param test_statistic Function that computes the test test statistic
##' @param combination_function Function to combine stage-wise (permutation) p-values
##' @param perms Maximum number of permutations to use when computing permutation p-values and conditional error rates
##' @author Florian Klinglmueller
##' @export
adaptive_npcombtest_os <- function(x,n1,n,ne,test_statistic,combination_function=inverse_normal,perms=50000,alpha=0.025) {
xs <- split(x,rep(1:2,c(n1,ne-n1)))
gs <- split(sign(x)>0,rep(1:2,c(n1,ne-n1)))
G <- omega(gs[[1]],gs[[2]],restricted=FALSE,B=perms)
rB <- ncol(G)
p1 <- 1-(rank(test_statistic(xs[[1]],G[1:n1,]))/(rB+1))
p2 <- 1-(rank(test_statistic(xs[[2]],G[(n1+1):ne,]))/(rB+1))
ct <- combination_function(p1,p2,n1/n,(n-n1)/n)
sum(ct[1]>=ct)/length(ct) <= alpha
}
adaptive_invnormtest_2s <- function(x,y,n1,n,ne,m1,m,me,alpha=0.025){
xs <- split(x,rep(1:2,c(n1,ne-n1)))
p1 <- t.test(xs[[1]],alternative='greater')$p.value
p2 <- t.test(xs[[2]],alternative='greater')$p.value
alpha >= {sqrt(c(n1,n-n1)/n) * qnorm(c(p1,p2),lower=F)} %>% sum() %>% pnorm(lower=FALSE)
}
##' Compare adaptive test procedures for general two-sample cases
##'
##' \code{rdist} needs to take the number of samples to return as its first argument.
##'
##' @title Compare two-sample adaptive tests
##' @param n1 first stage sample size (control group)
##' @param n preplanned total sample size (control group)
adaptive_permtest_2s <- function(x,y,n1,n,ne,m1,m,me,test_statistic,perms=50000,alpha=0.025){
if(ne>n){
xs <- split(x,rep(1:3,c(n1,n-n1,ne-n)))
} else {
if(me>m) stop('Stages with controls only not supported')
xs <- split(x,rep(1:2,c(n1,ne-n1)))
}
if(me>m){
ys <- split(y,rep(1:3,c(m1,m-m1,me-m)))
} else {
if(ne>n) stop('Stages with treatments only not supported')
ys <- split(y,rep(1:2,c(m1,me-m1)))
}
gs <- lapply(1:length(xs),function(i) rep(0:1,c(length(xs[[i]],ys[[i]]))))
xs <- lapply(1:length(xs),function(i) c(xs[[i]],ys[[i]]))
A <- permutation_CER(xs[[1]],gs[[1]],xs[[2]],test_statistic,B=perms,alpha=alpha)
q <- perm_test(xs[[2]],xs[[3]],gs[[2]],gs[[3]],test_statistic,B=perms)
A>=q
}
adaptive_invnormtest_2s <- function(x,y,n1,n,ne,m1,m,me,alpha=0.025){
xs <- split(x,rep(1:2,c(n1,ne-n1)))
p1 <- t.test(xs[[1]],alternative='greater')$p.value
p2 <- t.test(xs[[2]],alternative='greater')$p.value
alpha >= {sqrt(c(n1,n-n1)/n) * qnorm(c(p1,p2),lower=F)} %>% sum() %>% pnorm(lower=FALSE)
}
|
/R/simulation.R
|
no_license
|
livioivil/resamplingMCP
|
R
| false
| false
| 5,069
|
r
|
##' Adaptive permutation test one-sample problems
##'
##' @title One-sample adaptive permutation test
##' @template onesample_sims
##' @param combination_function Function to combine stage-wise (permutation) p-values
##' @param perms Maximum number of permutations to use when computing permutation p-values and conditional error rates
##' @author Florian Klinglmueller
##' @export
adaptive_permtest_os <- function(x,n1,n,ne,test_statistic,perms=50000,alpha=0.025){
if(ne>n){
xs <- split(x,rep(1:3,c(n1,n-n1,ne-n)))
gs <- split(sign(x)>0,rep(1:3,c(n1,n-n1,ne-n)))
} else {
xs <- split(x,rep(1:2,c(n1,ne-n1)))
gs <- split(sign(x)>0,rep(1:2,c(n1,ne-n1)))
gs[[3]] <- xs[[3]] <- numeric(0)
}
A <- permutation_CER(xs[[1]],gs[[1]],xs[[2]],test_statistic,one_sample=TRUE,restricted=FALSE,B=perms,alpha=alpha)
q <- perm_test(xs[[2]],xs[[3]],gs[[2]],gs[[3]],test_statistic,restricted=FALSE,B=perms)
A>=q
}
##' Adaptive t-test as described in Timmesfeld et al. (2007)
##'
##' Warning the current implementation seems to be numerically instable
##' @title Adaptive t-test
##' @param mpfr Whether to use high precision numbers from \code{Rmpfr}
##' @template onesample_sims
##' @author Florian Klinglmueller
##' @export
adaptive_ttest_os <- function(x,n1,n,ne,alpha=0.025,mpfr=FALSE) {
if(n == ne){
return(0.025 >= t.test(x,alternative='greater')$p.value)
}
xs <- split(x,rep(1:2,c(n1,ne-n1)))
V1 <- sum(xs[[1]])
U <- sum(xs[[1]]^2)
tU <- sum(xs[[2]]^2)
A <- clev(tU,U,V1,ne-n1,n,n1,alpha=alpha,mpfr=mpfr)
A >= t.test(xs[[2]],alternative='greater')$p.value
}
##' Adaptive combination test of stage-wise t-tests using the inverse normal combination function.
##'
##' @title Inverse normal adaptive t-test
##' @template onesample_sims
##' @author Florian Klinglmueller
##' @export
adaptive_invnormtest_os <- function(x,n1,n,ne,alpha=0.025){
xs <- split(x,rep(1:2,c(n1,ne-n1)))
p1 <- t.test(xs[[1]],alternative='greater')$p.value
p2 <- t.test(xs[[2]],alternative='greater')$p.value
alpha >= {sqrt(c(n1,n-n1)/n) * qnorm(c(p1,p2),lower=F)} %>% sum() %>% pnorm(lower=FALSE)
}
##' Non-parametric combination of stage-wise test statistics. Combines stage-wise permutation p-values using some combination function; performs the test using the joint conditional permutation distribution of stage wise permutation p-values.
##'
##' @title adptive NPC test
##' @template onesample_sims
##' @param test_statistic Function that computes the test test statistic
##' @param combination_function Function to combine stage-wise (permutation) p-values
##' @param perms Maximum number of permutations to use when computing permutation p-values and conditional error rates
##' @author Florian Klinglmueller
##' @export
adaptive_npcombtest_os <- function(x,n1,n,ne,test_statistic,combination_function=inverse_normal,perms=50000,alpha=0.025) {
xs <- split(x,rep(1:2,c(n1,ne-n1)))
gs <- split(sign(x)>0,rep(1:2,c(n1,ne-n1)))
G <- omega(gs[[1]],gs[[2]],restricted=FALSE,B=perms)
rB <- ncol(G)
p1 <- 1-(rank(test_statistic(xs[[1]],G[1:n1,]))/(rB+1))
p2 <- 1-(rank(test_statistic(xs[[2]],G[(n1+1):ne,]))/(rB+1))
ct <- combination_function(p1,p2,n1/n,(n-n1)/n)
sum(ct[1]>=ct)/length(ct) <= alpha
}
adaptive_invnormtest_2s <- function(x,y,n1,n,ne,m1,m,me,alpha=0.025){
xs <- split(x,rep(1:2,c(n1,ne-n1)))
p1 <- t.test(xs[[1]],alternative='greater')$p.value
p2 <- t.test(xs[[2]],alternative='greater')$p.value
alpha >= {sqrt(c(n1,n-n1)/n) * qnorm(c(p1,p2),lower=F)} %>% sum() %>% pnorm(lower=FALSE)
}
##' Compare adaptive test procedures for general two-sample cases
##'
##' \code{rdist} needs to take the number of samples to return as its first argument.
##'
##' @title Compare two-sample adaptive tests
##' @param n1 first stage sample size (control group)
##' @param n preplanned total sample size (control group)
adaptive_permtest_2s <- function(x,y,n1,n,ne,m1,m,me,test_statistic,perms=50000,alpha=0.025){
if(ne>n){
xs <- split(x,rep(1:3,c(n1,n-n1,ne-n)))
} else {
if(me>m) stop('Stages with controls only not supported')
xs <- split(x,rep(1:2,c(n1,ne-n1)))
}
if(me>m){
ys <- split(y,rep(1:3,c(m1,m-m1,me-m)))
} else {
if(ne>n) stop('Stages with treatments only not supported')
ys <- split(y,rep(1:2,c(m1,me-m1)))
}
gs <- lapply(1:length(xs),function(i) rep(0:1,c(length(xs[[i]],ys[[i]]))))
xs <- lapply(1:length(xs),function(i) c(xs[[i]],ys[[i]]))
A <- permutation_CER(xs[[1]],gs[[1]],xs[[2]],test_statistic,B=perms,alpha=alpha)
q <- perm_test(xs[[2]],xs[[3]],gs[[2]],gs[[3]],test_statistic,B=perms)
A>=q
}
adaptive_invnormtest_2s <- function(x,y,n1,n,ne,m1,m,me,alpha=0.025){
xs <- split(x,rep(1:2,c(n1,ne-n1)))
p1 <- t.test(xs[[1]],alternative='greater')$p.value
p2 <- t.test(xs[[2]],alternative='greater')$p.value
alpha >= {sqrt(c(n1,n-n1)/n) * qnorm(c(p1,p2),lower=F)} %>% sum() %>% pnorm(lower=FALSE)
}
|
library(coala)
### Name: sumstat_four_gamete
### Title: Summary Statistic: Four-Gamete-Condition
### Aliases: sumstat_four_gamete
### ** Examples
model <- coal_model(5, 2) +
feat_mutation(50) +
feat_recombination(10) +
sumstat_four_gamete()
stats <- simulate(model)
print(stats$four_gamete)
|
/data/genthat_extracted_code/coala/examples/sumstat_four_gamete.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 301
|
r
|
library(coala)
### Name: sumstat_four_gamete
### Title: Summary Statistic: Four-Gamete-Condition
### Aliases: sumstat_four_gamete
### ** Examples
model <- coal_model(5, 2) +
feat_mutation(50) +
feat_recombination(10) +
sumstat_four_gamete()
stats <- simulate(model)
print(stats$four_gamete)
|
# Enter your code here. Read input from STDIN. Print output to STDOUT
# Get input
input <- file("stdin")
data <- strsplit(readLines(input, warn = FALSE), split = " ")
# Specify the input data for math aptitude scores(X)
X <- rep(0, length(data))
for (i in 1:length(data)){
X[i] <- as.numeric(data[[i]][1])
}
# Specify the data for statistics scores(Y)
Y <- rep(0, length(data))
for (i in 1:length(data)){
Y[i] <- as.numeric(data[[i]][2])
}
# Create the linear model
model <- lm(Y ~ X)
# Create prediction
prediction <- predict(object = model, newdata = data.frame(X = 80))
# Print the predicted value
cat(round(prediction, 3))
|
/Day 8 - Least Square Regression Line.R
|
no_license
|
EirikEspe/10-Days-of-Statistics
|
R
| false
| false
| 637
|
r
|
# Enter your code here. Read input from STDIN. Print output to STDOUT
# Get input
input <- file("stdin")
data <- strsplit(readLines(input, warn = FALSE), split = " ")
# Specify the input data for math aptitude scores(X)
X <- rep(0, length(data))
for (i in 1:length(data)){
X[i] <- as.numeric(data[[i]][1])
}
# Specify the data for statistics scores(Y)
Y <- rep(0, length(data))
for (i in 1:length(data)){
Y[i] <- as.numeric(data[[i]][2])
}
# Create the linear model
model <- lm(Y ~ X)
# Create prediction
prediction <- predict(object = model, newdata = data.frame(X = 80))
# Print the predicted value
cat(round(prediction, 3))
|
library(lattice)
library(datasets)
## xyplot(y ~ x | f * g, data)
xyplot(Ozone ~ Wind, data = airquality)
## First convert Month to a factor variable:
airquality <- transform(airquality, Month=factor(Month))
xyplot(Ozone ~ Wind | Month, data = airquality, layout = c(5,1))
## store a trellis object
p <- xyplot(Ozone ~ Wind, data = airquality)
print(p)
#Lattice panel functions:
set.seed(10)
x <- rnorm(100)
f <- rep(0:1, each=50)
y <- x + f - f * x + rnorm(100, sd=0.5)
f <- factor(f, labels=c("Group 1", "Group 2"))
xyplot(y~x | f, layout=c(2,1))
xyplot(y ~ x | f, panel=function(x,y,...) {
panel.xyplot(x,y,...)
panel.abline(h=median(y), lty=2)
})
xyplot(y ~ x | f, panel=function(x,y,...) {
panel.xyplot(x,y,...)
panel.lmline(x,y,col=2)
})
|
/Lattice.R
|
no_license
|
aperelson/EDA_Week2
|
R
| false
| false
| 772
|
r
|
library(lattice)
library(datasets)
## xyplot(y ~ x | f * g, data)
xyplot(Ozone ~ Wind, data = airquality)
## First convert Month to a factor variable:
airquality <- transform(airquality, Month=factor(Month))
xyplot(Ozone ~ Wind | Month, data = airquality, layout = c(5,1))
## store a trellis object
p <- xyplot(Ozone ~ Wind, data = airquality)
print(p)
#Lattice panel functions:
set.seed(10)
x <- rnorm(100)
f <- rep(0:1, each=50)
y <- x + f - f * x + rnorm(100, sd=0.5)
f <- factor(f, labels=c("Group 1", "Group 2"))
xyplot(y~x | f, layout=c(2,1))
xyplot(y ~ x | f, panel=function(x,y,...) {
panel.xyplot(x,y,...)
panel.abline(h=median(y), lty=2)
})
xyplot(y ~ x | f, panel=function(x,y,...) {
panel.xyplot(x,y,...)
panel.lmline(x,y,col=2)
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/exptab.R
\name{exptab}
\alias{exptab}
\title{Esporta una o piu' tabelle in un unico file csv (standard italiano)}
\usage{
exptab(tab, file, dids = names(tab), aggiungi = FALSE, ...)
}
\arguments{
\item{tab}{lista degli oggetti (table); se la lista è con nomi, questi saranno
utilizzati come didascalie}
\item{file}{nome del file di esportazione (con estensione csv)}
\item{dids}{vettore carattere con le discalie delle tabelle;
per default, è costituito dal nome degli elementi della lista}
\item{aggiungi}{aggiunge le tabelle ad un file esistente (come append), logico}
\item{...}{altri argomenti ereditati \code{\link[utils]{write.table}}}
}
\value{
file in formato csv con le tabelle
}
\description{
Esportazione semplificata di più tabelle in formato csv in uno stesso file
con \code{\link[utils]{write.table}}, e
secondo lo standard Excel in lingua italiana:
sep = ";",
dec = "," (separatore di decimali = virgola), NA = "" (celle vuote),
row.names = TRUE,
col.names = NA (per le colonne nel file csv).
}
\examples{
data("MYSLID")
tab1 = tabfreq(MYSLID$Genere)
tab2 = tabcont(MYSLID$Lingua, MYSLID$Genere)
# una tabella
exptab(list(tab1), file = "tabelle.csv")
# tabelle diverse con didascalie
exptab(list(tab1, tab2), file = "tabelle.csv",
dids = c("Genere", "Lingua parlata per Genere"),
aggiungi = TRUE)
}
|
/man/exptab.Rd
|
no_license
|
cran/LabRS
|
R
| false
| true
| 1,459
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/exptab.R
\name{exptab}
\alias{exptab}
\title{Esporta una o piu' tabelle in un unico file csv (standard italiano)}
\usage{
exptab(tab, file, dids = names(tab), aggiungi = FALSE, ...)
}
\arguments{
\item{tab}{lista degli oggetti (table); se la lista è con nomi, questi saranno
utilizzati come didascalie}
\item{file}{nome del file di esportazione (con estensione csv)}
\item{dids}{vettore carattere con le discalie delle tabelle;
per default, è costituito dal nome degli elementi della lista}
\item{aggiungi}{aggiunge le tabelle ad un file esistente (come append), logico}
\item{...}{altri argomenti ereditati \code{\link[utils]{write.table}}}
}
\value{
file in formato csv con le tabelle
}
\description{
Esportazione semplificata di più tabelle in formato csv in uno stesso file
con \code{\link[utils]{write.table}}, e
secondo lo standard Excel in lingua italiana:
sep = ";",
dec = "," (separatore di decimali = virgola), NA = "" (celle vuote),
row.names = TRUE,
col.names = NA (per le colonne nel file csv).
}
\examples{
data("MYSLID")
tab1 = tabfreq(MYSLID$Genere)
tab2 = tabcont(MYSLID$Lingua, MYSLID$Genere)
# una tabella
exptab(list(tab1), file = "tabelle.csv")
# tabelle diverse con didascalie
exptab(list(tab1, tab2), file = "tabelle.csv",
dids = c("Genere", "Lingua parlata per Genere"),
aggiungi = TRUE)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/alluvial.b.R
\name{alluvialClass}
\alias{alluvialClass}
\title{Alluvial Plot}
\value{
Alluvial Plot
}
\description{
Alluvial Plot
Alluvial Plot
}
\section{Super classes}{
\code{\link[jmvcore:Analysis]{jmvcore::Analysis}} -> \code{\link[ClinicoPath:alluvialBase]{ClinicoPath::alluvialBase}} -> \code{alluvialClass}
}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-clone}{\code{alluvialClass$clone()}}
}
}
\if{html}{
\out{<details ><summary>Inherited methods</summary>}
\itemize{
\item \out{<span class="pkg-link" data-pkg="jmvcore" data-topic="Analysis" data-id=".createImage">}\href{../../jmvcore/html/Analysis.html#method-.createImage}{\code{jmvcore::Analysis$.createImage()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="jmvcore" data-topic="Analysis" data-id=".createImages">}\href{../../jmvcore/html/Analysis.html#method-.createImages}{\code{jmvcore::Analysis$.createImages()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="jmvcore" data-topic="Analysis" data-id=".createPlotObject">}\href{../../jmvcore/html/Analysis.html#method-.createPlotObject}{\code{jmvcore::Analysis$.createPlotObject()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="jmvcore" data-topic="Analysis" data-id=".load">}\href{../../jmvcore/html/Analysis.html#method-.load}{\code{jmvcore::Analysis$.load()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="jmvcore" data-topic="Analysis" data-id=".render">}\href{../../jmvcore/html/Analysis.html#method-.render}{\code{jmvcore::Analysis$.render()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="jmvcore" data-topic="Analysis" data-id=".save">}\href{../../jmvcore/html/Analysis.html#method-.save}{\code{jmvcore::Analysis$.save()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="jmvcore" data-topic="Analysis" data-id=".savePart">}\href{../../jmvcore/html/Analysis.html#method-.savePart}{\code{jmvcore::Analysis$.savePart()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="jmvcore" data-topic="Analysis" data-id=".setCheckpoint">}\href{../../jmvcore/html/Analysis.html#method-.setCheckpoint}{\code{jmvcore::Analysis$.setCheckpoint()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="jmvcore" data-topic="Analysis" data-id=".setParent">}\href{../../jmvcore/html/Analysis.html#method-.setParent}{\code{jmvcore::Analysis$.setParent()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="jmvcore" data-topic="Analysis" data-id=".setReadDatasetHeaderSource">}\href{../../jmvcore/html/Analysis.html#method-.setReadDatasetHeaderSource}{\code{jmvcore::Analysis$.setReadDatasetHeaderSource()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="jmvcore" data-topic="Analysis" data-id=".setReadDatasetSource">}\href{../../jmvcore/html/Analysis.html#method-.setReadDatasetSource}{\code{jmvcore::Analysis$.setReadDatasetSource()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="jmvcore" data-topic="Analysis" data-id=".setResourcesPathSource">}\href{../../jmvcore/html/Analysis.html#method-.setResourcesPathSource}{\code{jmvcore::Analysis$.setResourcesPathSource()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="jmvcore" data-topic="Analysis" data-id=".setStatePathSource">}\href{../../jmvcore/html/Analysis.html#method-.setStatePathSource}{\code{jmvcore::Analysis$.setStatePathSource()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="jmvcore" data-topic="Analysis" data-id="addAddon">}\href{../../jmvcore/html/Analysis.html#method-addAddon}{\code{jmvcore::Analysis$addAddon()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="jmvcore" data-topic="Analysis" data-id="asProtoBuf">}\href{../../jmvcore/html/Analysis.html#method-asProtoBuf}{\code{jmvcore::Analysis$asProtoBuf()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="jmvcore" data-topic="Analysis" data-id="asSource">}\href{../../jmvcore/html/Analysis.html#method-asSource}{\code{jmvcore::Analysis$asSource()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="jmvcore" data-topic="Analysis" data-id="check">}\href{../../jmvcore/html/Analysis.html#method-check}{\code{jmvcore::Analysis$check()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="jmvcore" data-topic="Analysis" data-id="init">}\href{../../jmvcore/html/Analysis.html#method-init}{\code{jmvcore::Analysis$init()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="jmvcore" data-topic="Analysis" data-id="optionsChangedHandler">}\href{../../jmvcore/html/Analysis.html#method-optionsChangedHandler}{\code{jmvcore::Analysis$optionsChangedHandler()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="jmvcore" data-topic="Analysis" data-id="postInit">}\href{../../jmvcore/html/Analysis.html#method-postInit}{\code{jmvcore::Analysis$postInit()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="jmvcore" data-topic="Analysis" data-id="print">}\href{../../jmvcore/html/Analysis.html#method-print}{\code{jmvcore::Analysis$print()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="jmvcore" data-topic="Analysis" data-id="readDataset">}\href{../../jmvcore/html/Analysis.html#method-readDataset}{\code{jmvcore::Analysis$readDataset()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="jmvcore" data-topic="Analysis" data-id="run">}\href{../../jmvcore/html/Analysis.html#method-run}{\code{jmvcore::Analysis$run()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="jmvcore" data-topic="Analysis" data-id="serialize">}\href{../../jmvcore/html/Analysis.html#method-serialize}{\code{jmvcore::Analysis$serialize()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="jmvcore" data-topic="Analysis" data-id="setError">}\href{../../jmvcore/html/Analysis.html#method-setError}{\code{jmvcore::Analysis$setError()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="jmvcore" data-topic="Analysis" data-id="setStatus">}\href{../../jmvcore/html/Analysis.html#method-setStatus}{\code{jmvcore::Analysis$setStatus()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="ClinicoPath" data-topic="alluvialBase" data-id="initialize">}\href{../../ClinicoPath/html/alluvialBase.html#method-initialize}{\code{ClinicoPath::alluvialBase$initialize()}}\out{</span>}
}
\out{</details>}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-clone"></a>}}
\if{latex}{\out{\hypertarget{method-clone}{}}}
\subsection{Method \code{clone()}}{
The objects of this class are cloneable with this method.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{alluvialClass$clone(deep = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{deep}}{Whether to make a deep clone.}
}
\if{html}{\out{</div>}}
}
}
}
|
/man/alluvialClass.Rd
|
no_license
|
sbalci/ClinicoPath
|
R
| false
| true
| 6,806
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/alluvial.b.R
\name{alluvialClass}
\alias{alluvialClass}
\title{Alluvial Plot}
\value{
Alluvial Plot
}
\description{
Alluvial Plot
Alluvial Plot
}
\section{Super classes}{
\code{\link[jmvcore:Analysis]{jmvcore::Analysis}} -> \code{\link[ClinicoPath:alluvialBase]{ClinicoPath::alluvialBase}} -> \code{alluvialClass}
}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-clone}{\code{alluvialClass$clone()}}
}
}
\if{html}{
\out{<details ><summary>Inherited methods</summary>}
\itemize{
\item \out{<span class="pkg-link" data-pkg="jmvcore" data-topic="Analysis" data-id=".createImage">}\href{../../jmvcore/html/Analysis.html#method-.createImage}{\code{jmvcore::Analysis$.createImage()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="jmvcore" data-topic="Analysis" data-id=".createImages">}\href{../../jmvcore/html/Analysis.html#method-.createImages}{\code{jmvcore::Analysis$.createImages()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="jmvcore" data-topic="Analysis" data-id=".createPlotObject">}\href{../../jmvcore/html/Analysis.html#method-.createPlotObject}{\code{jmvcore::Analysis$.createPlotObject()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="jmvcore" data-topic="Analysis" data-id=".load">}\href{../../jmvcore/html/Analysis.html#method-.load}{\code{jmvcore::Analysis$.load()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="jmvcore" data-topic="Analysis" data-id=".render">}\href{../../jmvcore/html/Analysis.html#method-.render}{\code{jmvcore::Analysis$.render()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="jmvcore" data-topic="Analysis" data-id=".save">}\href{../../jmvcore/html/Analysis.html#method-.save}{\code{jmvcore::Analysis$.save()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="jmvcore" data-topic="Analysis" data-id=".savePart">}\href{../../jmvcore/html/Analysis.html#method-.savePart}{\code{jmvcore::Analysis$.savePart()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="jmvcore" data-topic="Analysis" data-id=".setCheckpoint">}\href{../../jmvcore/html/Analysis.html#method-.setCheckpoint}{\code{jmvcore::Analysis$.setCheckpoint()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="jmvcore" data-topic="Analysis" data-id=".setParent">}\href{../../jmvcore/html/Analysis.html#method-.setParent}{\code{jmvcore::Analysis$.setParent()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="jmvcore" data-topic="Analysis" data-id=".setReadDatasetHeaderSource">}\href{../../jmvcore/html/Analysis.html#method-.setReadDatasetHeaderSource}{\code{jmvcore::Analysis$.setReadDatasetHeaderSource()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="jmvcore" data-topic="Analysis" data-id=".setReadDatasetSource">}\href{../../jmvcore/html/Analysis.html#method-.setReadDatasetSource}{\code{jmvcore::Analysis$.setReadDatasetSource()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="jmvcore" data-topic="Analysis" data-id=".setResourcesPathSource">}\href{../../jmvcore/html/Analysis.html#method-.setResourcesPathSource}{\code{jmvcore::Analysis$.setResourcesPathSource()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="jmvcore" data-topic="Analysis" data-id=".setStatePathSource">}\href{../../jmvcore/html/Analysis.html#method-.setStatePathSource}{\code{jmvcore::Analysis$.setStatePathSource()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="jmvcore" data-topic="Analysis" data-id="addAddon">}\href{../../jmvcore/html/Analysis.html#method-addAddon}{\code{jmvcore::Analysis$addAddon()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="jmvcore" data-topic="Analysis" data-id="asProtoBuf">}\href{../../jmvcore/html/Analysis.html#method-asProtoBuf}{\code{jmvcore::Analysis$asProtoBuf()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="jmvcore" data-topic="Analysis" data-id="asSource">}\href{../../jmvcore/html/Analysis.html#method-asSource}{\code{jmvcore::Analysis$asSource()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="jmvcore" data-topic="Analysis" data-id="check">}\href{../../jmvcore/html/Analysis.html#method-check}{\code{jmvcore::Analysis$check()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="jmvcore" data-topic="Analysis" data-id="init">}\href{../../jmvcore/html/Analysis.html#method-init}{\code{jmvcore::Analysis$init()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="jmvcore" data-topic="Analysis" data-id="optionsChangedHandler">}\href{../../jmvcore/html/Analysis.html#method-optionsChangedHandler}{\code{jmvcore::Analysis$optionsChangedHandler()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="jmvcore" data-topic="Analysis" data-id="postInit">}\href{../../jmvcore/html/Analysis.html#method-postInit}{\code{jmvcore::Analysis$postInit()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="jmvcore" data-topic="Analysis" data-id="print">}\href{../../jmvcore/html/Analysis.html#method-print}{\code{jmvcore::Analysis$print()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="jmvcore" data-topic="Analysis" data-id="readDataset">}\href{../../jmvcore/html/Analysis.html#method-readDataset}{\code{jmvcore::Analysis$readDataset()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="jmvcore" data-topic="Analysis" data-id="run">}\href{../../jmvcore/html/Analysis.html#method-run}{\code{jmvcore::Analysis$run()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="jmvcore" data-topic="Analysis" data-id="serialize">}\href{../../jmvcore/html/Analysis.html#method-serialize}{\code{jmvcore::Analysis$serialize()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="jmvcore" data-topic="Analysis" data-id="setError">}\href{../../jmvcore/html/Analysis.html#method-setError}{\code{jmvcore::Analysis$setError()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="jmvcore" data-topic="Analysis" data-id="setStatus">}\href{../../jmvcore/html/Analysis.html#method-setStatus}{\code{jmvcore::Analysis$setStatus()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="ClinicoPath" data-topic="alluvialBase" data-id="initialize">}\href{../../ClinicoPath/html/alluvialBase.html#method-initialize}{\code{ClinicoPath::alluvialBase$initialize()}}\out{</span>}
}
\out{</details>}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-clone"></a>}}
\if{latex}{\out{\hypertarget{method-clone}{}}}
\subsection{Method \code{clone()}}{
The objects of this class are cloneable with this method.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{alluvialClass$clone(deep = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{deep}}{Whether to make a deep clone.}
}
\if{html}{\out{</div>}}
}
}
}
|
#Summarize_off-target_editing_functs.R
# conda_environment: crispresso_downstream_env
# last modified: 2020_08_17 Anne Shen
# For use with CRISPResso version 2.0.40
#
### Dependencies:
# library(tidyverse)
# library(tidyselect)
# library(gtable)
# library(scales)
# library(gridExtra)
# library(grid)
# library(effsize)
# library(extrafont)
#
# font_import()
# loadfonts()
#
#
### Options:
# options(scipen=999) #turn off scientific notation
#
#
### Functions:
# summarize_off_targets()
# remove_guideseq_from_cols()
# get_formatted_summary()
# get_group_summary_table()
# get_table_by_guide()
# pair_mock_v_edited_samples_by_donor()
# get_stat_comparison_sample_tb()
# get_ttest_table()
# save_editing_results()
# check_plot_aesthetics()
# get_default_palette()
# order_alpha()
# get_aes_scale_values_for_dotplot()
# get_heatmap_legend_values()
# make_compiled_OT_editing_dotplot()
# make_compiled_summary_OT_editing_boxplot()
# make_compiled_OT_coverage_heatmap()
# make_composite_grobPlot()
# save_composite_plot()
######################################## FUNCTIONS ##################################################
###### summarize_off_targets() ##################################################################
# Function that generates all off-target analyses with the following steps:
# 1. format collapsed summary tables & read in metadata/reference tables
# 2. generate & format editing frequency & data tables
# 3. perform statistical tests (Mock v. Edited for each donor)
# 4. generate aesthetics for editing dotplot
# 5. generate all plots (editing dotplot, read coverage heatmap, composite plot)
#
# CALLS HELPERS: remove_guideseq_from_cols(), get_formatted_summary(), pair_mock_v_edited_samples_by_donor(),
# get_ttest_table(), save_editing_results(), check_plot_aesthetics(), order_alpha(),
# get_aes_scale_values_for_dotplot(), make_compiled_OT_editing_dotplot(),
# make_compiled_OT_coverage_heatmap(), make_composite_grobPlot(), save_composite_plot()
# CALLED IN: CRISPResso2_downstream.R
#
# ARGUMENTS:
# mode = run mode (ex. "collapse_BE_OT", "BE_OT", "OT_only" etc.)
# ref_seq_csv = csv name for reading in ref_seq_tb containing all reference, guide, and PAM sequences
# by off-target
# ot_sample_csv = name of ot_sample_csv file (contains donor, condition, CRISPResso_dir_name, sample_name)
# percent_freq_cutoff = percent cutoff for alleles in alleles summary tables (generated by "collapse"
# mode)
# conversion_nuc_from = (if BE mode) nucleotide to be edited by base editor
# conversion_nuc_to = (if BE mode) base editing result
# sort_by_pval = sort off-targets in composite plots by t-test value (if applicable) instead of off-target
# alphanumeric name
# scale_size_by_editing_freq = logical, whether or not to separate geom_points by size according to read coverage
# in the editing scatterplot
# low_coverage = the upper read count cutoff for "low-coverage" amplicons/samples
# high_coverage = the lower read count cutoff for "high-coverage" amplicons/samples
#
# OUTPUT:
# 1. saves ot_ttest_tb as "CRISPResso_OTs_ttest.csv"
# 2. saves edited_summary_tb as "CRISPResso_OT_editing_summary.csv"
# 3. saves off-target composite figures as .pdf and .png
summarize_off_targets <- function(mode, ref_seq_csv, ot_sample_csv, percent_freq_cutoff,
conversion_nuc_from, conversion_nuc_to, sort_by_pval,
scale_size_by_editing_freq, low_coverage, high_coverage){
#test code:
# saved_wd <- getwd()
# setwd("/Users/anneshen/Documents/local_working/local_Jing_BE/2020_1620_BE_NatureMed/20191206_1620_input_rhAMPSeq_integrated3")
# mode<- "BE_OT"
# ref_seq_csv <- "1620_ref_seqs_tb.csv"
# ot_sample_csv<- "2019_1620_BE_rhAMPSeq_samples.csv"
# percent_freq_cutoff <- 0
# conversion_nuc_from <- "C"
# conversion_nuc_to <- "T"
# sort_by_pval <- TRUE
# scale_size_by_editing_freq <- TRUE
# low_coverage <- 1000
# high_coverage <- 10000
#
# setwd("/Users/anneshen/Documents/local_working/IND_off_target/2020_CRISPResso2/20200722_DE_1450_rhAMPSeq")
# mode<- "OT_only"
# ref_seq_csv <- "202006_1450_0000_ref_seqs.csv"
# ot_sample_csv<- "202006_DE_1450_rhAMPSeq_samples.csv"
# percent_freq_cutoff <- 0
# scale_size_by_editing_freq <- TRUE
# low_coverage <- 1000
# high_coverage <- 10000
cat("Off_target_summary_log\n",
paste(Sys.time(), "\n", sep = ""),
paste(getwd(), "\n", sep = ""),
"ref_seq_csv: ", ref_seq_csv, "\n",
"ot_sample_csv: ", ot_sample_csv, "\n",
"percent_freq_cutoff: ", percent_freq_cutoff, "\n",
"analysis_mode: ", mode, "\n",
"sort_by_pval: ", sort_by_pval, "\n",
"scale_size_by_editing_freq: ", scale_size_by_editing_freq, "\n",
"low_coverage: ", low_coverage, "\n",
"high_coverage: ", high_coverage, "\n",
"\n")
#set date for saving figures
date <- format(Sys.Date(), "%Y%m%d")
##### 1. format collapsed summary tables & read in metadata/reference tables #####
#get all summary file names
if(grepl("BE", mode)){
#if running OT analysis on base editing data, use BE summary tables
conversion <- paste(conversion_nuc_from, "to", conversion_nuc_to, ".csv", sep = "")
summary_file_suffix <- paste("BE_summary_", conversion, sep = "")
list_summary_files <- list.files(pattern = summary_file_suffix)
}else{
#get collapsed file suffix
summary_file_suffix <- paste("collapsed_", percent_freq_cutoff, ".csv", sep = "")
#if not running OT analysis on base editing data, use collapsed allele tables
list_summary_files <- list.files(pattern = summary_file_suffix)
}
#read in ref_seq_tb linking off-target names and sequences
ref_seq_tb <- read.csv(ref_seq_csv, stringsAsFactors = FALSE) %>%
filter(grepl("OT", ot_id)) %>%
rename(off_target = ot_id) %>%
filter(!duplicated(off_target))
#Remove guide name from list_summary_files file columns (only need to do once) & re-saves .csv file.
# Also separates guides on the same amplicon.
# Non-fruitful function
remove_guideseq_from_cols(list_summary_files, ref_seq_tb, ref_seq_csv)
#read in sample table
ot_samples_tb <- read.csv(ot_sample_csv, stringsAsFactors = FALSE)
#get ot_samples_tb row indexes for mock & experimental samples
mock_samples_idx <- grep("mock", ot_samples_tb$condition, ignore.case = TRUE)
trt_samples_idx <- grep("mock", ot_samples_tb$condition, ignore.case = TRUE, invert = TRUE)
##### 2. generate & format editing frequency & data tables #####
#get editing tables from mock_sample collapsed/filtered allele tables
all_mock_tb <- get_formatted_summary(ot_samples_tb, mock_samples_idx, summary_file_suffix,
ref_seq_tb, condition = "control")
#get editing tables from experimental_sample collapsed/filtered allele tables
all_trt_tb <- get_formatted_summary(ot_samples_tb, trt_samples_idx, summary_file_suffix,
ref_seq_tb, condition = "edited")
#bind all data tables together by columns
all_samples_from_file <- rbind(all_mock_tb, all_trt_tb)
#generate all_samples table (complete table including all off-target and all samples)
unique_samples_idx <- unique(all_samples_from_file$sample)
unique_samples <- unique_samples_idx[which(!is.na(unique_samples_idx))]
n_control <- length(mock_samples_idx)
n_edited <- length(trt_samples_idx)
n_samples <- n_control + n_edited
all_samples <- data.frame(off_target = rep(ref_seq_tb$off_target, times = n_samples, each = 2),
amplicon_sequence = rep(ref_seq_tb$amplicon_sequence, times = n_samples, each = 2),
guide_sequence = rep(ref_seq_tb$guide_sequence, times = n_samples, each = 2),
pam = rep(ref_seq_tb$pam, times = n_samples, each = 2),
indel = rep(c("Unedited", "Edited"), nrow(ref_seq_tb) * n_samples),
sample = rep(c(unique_samples), times = 1, each = nrow(ref_seq_tb) * 2),
condition = rep(c(rep("control", n_control), rep("edited", n_edited)), times = 1,
each = nrow(ref_seq_tb) * 2))
#join all_samples & all_samples_from_file to get table with all off-targets and samples represented
# (NAs for frequencies and reads of samples that were not analyzed in CRISPResso2)
all_samples_tb <- left_join(all_samples, all_samples_from_file, by = names(all_samples)) %>%
mutate(group = paste(condition, sample, sep = " "))
#generate name_seq column (OT name + guide sequence + PAM) with standardized spacing
max_nseq_len <- max(str_length(all_samples_tb $off_target) +
str_length(all_samples_tb $guide_sequence))
all_samples_tb <- all_samples_tb %>%
mutate(padding = as.numeric(max_nseq_len - str_length(off_target) - str_length(guide_sequence)))
all_samples_tb$spaces <- str_dup(rep(" ", nrow(all_samples_tb )), all_samples_tb $padding)
all_samples_tb$name_seq <- paste(paste(all_samples_tb $off_target,
" ",
all_samples_tb $spaces,
all_samples_tb $guide, sep = ""),
all_samples_tb $pam, sep = " ")
#remove unnecessary columns
all_samples_tb <- all_samples_tb %>%
select(-c("padding", "spaces"))
#select only off-targets & samples that were represented in CRISPResso2 analysis
# (will not include off-targets with no samples in final composite figure)
all_samples_tb <- all_samples_tb[complete.cases(all_samples_tb), ]
#select data representing editing frequency
edited_summary_tb <- all_samples_tb %>%
filter(indel == "Edited") %>%
select(-amplicon_sequence)
##### 3. perform statistical tests (Mock v. Edited for each donor) #####
### statistical test
# Compare % edited in Mock v. Edited samples
paired_stats_tb <- pair_mock_v_edited_samples_by_donor(ot_sample_csv)
#get list of off-targets
off_targets <- unique(edited_summary_tb$off_target)
#generate table of Edited v. Mock editing frequency results
ot_ttest_tb <- get_ttest_table(off_targets, edited_summary_tb, paired_stats_tb)
sig_ots <- ot_ttest_tb$off_target[which(ot_ttest_tb$significant)] %>% droplevels()
#save OT ttest statistics as csv
write.csv(ot_ttest_tb, paste(date, "CRISPResso_OTs_ttest.csv", sep = "_"),
row.names = FALSE, quote = FALSE)
#add asterisks (*) to name_seq of significant off-targets
for(n in seq(1, nrow(edited_summary_tb))){
edited_summary_tb$name_seq <- as.character(edited_summary_tb$name_seq)
if(edited_summary_tb$off_target[n] %in% sig_ots){
edited_summary_tb$name_seq[n] <- paste(edited_summary_tb$name_seq[n],
"* ",
sep = " ")
}else{
edited_summary_tb$name_seq[n] <- paste(edited_summary_tb$name_seq[n],
" ",
sep = " ")
}
}
#format editing results and save as csv file (non-fruitful function)
save_editing_results(date, edited_summary_tb)
##### 4. set 0% editing frequency to power of 10 below lowest frequency (for plotting purposes) #####
#find the lowest editing % frequency in all the samples, then go a power of 10 below that minimum
# for the "0% editing" setting on the composite % editing scatterplot
min_edited_log_freq <- min(edited_summary_tb$log_freq[which(edited_summary_tb$log_freq > -Inf)])
power10_floor <- floor(min_edited_log_freq) - 1
#set all 0% editing to 10^power10_floor (for log10 transformation)
zero_freq_row_idx <- which(edited_summary_tb$frequency == 0)
edited_summary_tb$frequency[zero_freq_row_idx] <- 10^power10_floor
edited_summary_tb$log_freq[zero_freq_row_idx] <- power10_floor
##### 5. generate aesthetics for editing dotplot #####
#get aesthetics columns from ot_samples_tb to join with edited_summary_tb
aes_tb <- select(ot_samples_tb, c("condition", "sample_name", grep("R_", names(ot_samples_tb), value = TRUE)))
#check that R_color, R_fill, and R_shape aesthetics exist & generate defaults if non-existent
aes_tb <- check_plot_aesthetics(aes_tb)
#join aes_tb with edited_summary_tb to match aesthetics scale values with samples for plotting
full_plotting_tb <- full_join(edited_summary_tb, select(aes_tb, -condition), by = c("sample" = "sample_name")) %>%
order_alpha("off_target", decreasing_bool = FALSE)
#get list of dotplot aesthetics scale values (color, fill, shape)
aes_val_list <- get_aes_scale_values_for_dotplot(full_plotting_tb)
#generate counts total counts table
full_total_counts_tb <- all_samples_tb %>%
filter(indel == "Unedited" ) %>%
mutate(total_reads = ceiling((reads * 100) / frequency ))
##### 6. calculate number of OTs per plot and aesthetics sizes #####
#use the following data frame to calculate the number of plots to generate (between 20-60 OTs per plot)
n_targets <- length(unique(full_plotting_tb$off_target))
ots_per_plot_df <- data.frame(ots_per_plot = seq(20, 50, 10)) %>%
mutate(min_n_plots = n_targets %/% ots_per_plot,
ots_rem = n_targets %% ots_per_plot,
diff_ots_per_plot = ifelse(ots_rem > 0, ots_per_plot - ots_rem, 0))
#choose to plot the least number of plots with the most even distribution of off-targets among plots
min_idx <- max(which(ots_per_plot_df$diff_ots_per_plot == min(ots_per_plot_df$diff_ots_per_plot)))
n_ots_per_plot <- ots_per_plot_df$ots_per_plot[min_idx]
unique_ots <- levels(full_plotting_tb$off_target)
#adjust font sizes according to number of off-targets displayed per plot
guide_font_size <- 10 - (n_ots_per_plot %/% 30)
tile_font_size <- 2.75 - (n_ots_per_plot %/% 30)*0.25
pointsize <- 1.5 - (n_ots_per_plot %/% 30)*0.25
group_font_size <- 8 - (n_ots_per_plot %/% 10)*0.25
#adjust heatmap and dotplot width to match sample number
n_plot_samples <- length(unique(full_plotting_tb$sample))
heatmap_width <- n_plot_samples * 0.5 + 2
dotplot_width <- n_plot_samples * 0.05 + 8
cplot_height <- n_plot_samples * 0.1 + 8.5
#Generate legend values for all figures. Calculate the best legend label increments such that:
# -there are at >= 5 labels
# -the labels encompass 10^(max_log10 - 1) to at least the max_reads
legend_breaks <- get_heatmap_legend_values(full_total_counts_tb$total_reads)
#generate editing % frequency scatterplot scale (to go from 10^power10_floor --> 100)
# (power10_floor = log10(lowest editing % frequency) -1)
editing_freq_scale <- mapply(function(x) 10^x, seq(power10_floor, 2))
#add total_reads column to plotting_tb
full_plotting_tb <- full_join(full_plotting_tb, full_total_counts_tb[c("off_target", "group", "total_reads")],
by = c("off_target", "group"))
#if sort_by_pval, order full_plotting_tb rows by p-value (increasing) so that off-targets are divided into
# composite plots in the correct order
if(sort_by_pval){
full_plotting_tb <- full_join(full_plotting_tb, ot_ttest_tb[c("off_target", "ttest_p_value")],
by = "off_target")
full_plotting_tb <- full_plotting_tb[order(full_plotting_tb$ttest_p_value),]
#set unique_ots order to reflect p_value order, not alphanumeric order
unique_ots <- as.character(ot_ttest_tb[order(ot_ttest_tb$ttest_p_value),]$off_target)
}
#add geom_point size column (R_point_size) if scale_size_by_editing_freq == TRUE & generate aesthetics list
if(scale_size_by_editing_freq){
#set medium and high-coverage points
mid_coverage_size <- pointsize + 0.6
high_coverage_size <- pointsize + 1.5
full_plotting_tb$R_point_size <- as.factor(ifelse(full_plotting_tb$total_reads < low_coverage, pointsize,
ifelse(full_plotting_tb$total_reads <= high_coverage, mid_coverage_size,
high_coverage_size)))
#generate aesthetics list
size_range <- c(pointsize, high_coverage_size)
size_val <- c(pointsize, mid_coverage_size, high_coverage_size)
names(size_val) <- as.factor(c(pointsize, mid_coverage_size, high_coverage_size))
size_lab <- c(paste("<", low_coverage, " reads", sep = ""),
paste(low_coverage,"-", high_coverage, " reads", sep = ""),
paste(">", high_coverage, " reads", sep = ""))
names(size_lab) <- as.factor(c(pointsize, mid_coverage_size, high_coverage_size))
aes_val_list$size_scale_val <- list("Read\nCoverage", size_val, size_lab)
}else{
full_plotting_tb$R_point_size <- pointsize
aes_val_list$size_scale_val <- NULL
}
##### 7. generate all plots (editing dotplot, read coverage heatmap, composite plot) #####
#initialize while loop iterator
ots_plotted <- 0
#initialize plot number tracker
n_plots <- 0
#initialize composite plot list for pdf printing
composite_plot_list <- list()
#initialize composite summary plot list for pdf printing
composite_summary_plot_list <- list()
#generate the plots
while(ots_plotted < n_targets){
#increase plot number iterator
n_plots <- n_plots + 1
#calculate the number of off-targets to include the current plot
n_ots_this_plot <- min(n_targets - ots_plotted, n_ots_per_plot)
#filter full_plotting_tb by the number of ots
sub_plotting_tb <- full_plotting_tb %>%
filter(off_target %in% unique_ots[(ots_plotted + 1):(ots_plotted + n_ots_this_plot)])
#HERE
#if sort_by_pval is true, order_apha by pval. Otherwise, order_alpha by name_seq (in heatmap and scatterplots/boxplots)
if(sort_by_pval){
ordered_plotting_tb <- sub_plotting_tb #%>%
#order_by_pval("name_seq")
ordered_plotting_tb$name_seq <- factor(ordered_plotting_tb$name_seq,
levels = unique(ordered_plotting_tb$name_seq))
}else{
ordered_plotting_tb <- sub_plotting_tb %>%
order_alpha("name_seq", decreasing_bool = FALSE)
}
#get the total counts for the plotted off_targets
# total_counts <- full_total_counts_tb %>%
# filter(off_target %in% unique_ots[(ots_plotted + 1):(ots_plotted + n_ots_this_plot)]) %>%
# order_alpha("name_seq", decreasing_bool = FALSE)
#order the sample names so that samples are listed by donor
heatmap_sample_order <- ot_samples_tb$sample_name[str_order(ot_samples_tb$donor, numeric = TRUE)]
#heatmap_group_idx <- mapply(function(x) which(total_counts$sample == x )[1] , heatmap_sample_order)
#total_counts$group <- factor(total_counts$group, levels = total_counts$group[heatmap_group_idx])
heatmap_group_idx <- mapply(function(x) which(ordered_plotting_tb$sample == x )[1] , heatmap_sample_order)
ordered_plotting_tb$group <- factor(ordered_plotting_tb$group, levels = ordered_plotting_tb$group[heatmap_group_idx])
#filter full_plotting_tb to include only off-targets in the current plot
# plotting_tb <- full_plotting_tb %>%
# filter(off_target %in% unique_ots[(ots_plotted + 1):(ots_plotted + n_ots_this_plot)]) %>%
# order_alpha("name_seq", decreasing_bool = FALSE)
#### generate coverage heatmap
heatmap <- make_compiled_OT_coverage_heatmap(ordered_plotting_tb, guide_font_size,
group_font_size, tile_font_size,
legend_breaks)
#total_counts,
#### generate editing dotplot
dotplot <- make_compiled_OT_editing_dotplot(ordered_plotting_tb, scale_size_by_editing_freq,
aes_val_list$fill_scale_val,
aes_val_list$color_scale_val,
aes_val_list$shape_scale_val,
aes_val_list$size_scale_val,
guide_font_size, editing_freq_scale)
#plotting_tb,
summary_dotplot <- make_compiled_summary_OT_editing_boxplot(ordered_plotting_tb,
guide_font_size,
editing_freq_scale)
#plotting_tb,
#make and save composite plots
composite_plot <- make_composite_grobPlot(heatmap, dotplot, heatmap_width, dotplot_width)
summary_composite_plot <- make_composite_grobPlot(heatmap, summary_dotplot, heatmap_width, dotplot_width)
#save composite plots as individual png files
png_plot_name <- paste(date, "off_targets_Rplot", n_plots , sep = "_")
save_composite_plot(png_plot_name, composite_plot, heatmap_width + dotplot_width + 1, cplot_height + 1)
#save summary plots
png_summary_plot_name <- paste(date, "off_targets_summary_Rplot", n_plots , sep = "_")
save_composite_plot(png_summary_plot_name, summary_composite_plot,
heatmap_width + dotplot_width + 1, cplot_height + 1)
#add composite plot to list for printing in pdf
composite_plot_list[[n_plots]] <- composite_plot
#add composite summary plot to list for printing in pdf
composite_summary_plot_list[[n_plots]] <- summary_composite_plot
#record plotting in run log
cat("targets plotted in ", png_plot_name, ": ", ots_plotted+1, " - ", ots_plotted + n_ots_this_plot, "\n")
#increase iterators
ots_plotted <- ots_plotted + n_ots_this_plot
}
#printing composite plots in single .pdf file
pdf(file = paste(date, "off_targets_Rplot.pdf", sep = "_"), onefile = TRUE,
width = heatmap_width + dotplot_width + 1, height = cplot_height + 1)
#print composite plots
for(i in 1:length(composite_plot_list)){
print(composite_plot_list[[i]])
print(composite_summary_plot_list[[i]])
}
#disconnect device for pdf printing
dev.off()
#end off-target analysis log
cat("\n")
}
###### remove_guideseq_from_cols() ##################################################################
# Reads through the collapsed files (generated either in "collapse" or "BE" mode) in list_summary_files
# and removes the guide sequences from the column headers. Also resolves cases where multiple guides
# were found in the same amplicon; this function renames the column headers by guide, not amplicon.
# Immediately throws an error if the guide_sequences in ref_seq_tb are not unique. (Repeated guides will
# cause errors in table joining later in the function.)
#
# CALLS HELPERS: NA
# CALLED IN: summarize_off_targets()
#
# ARGUMENTS:
# list_summary_files = list of collapsed or BE allele summary files
# ref_seq_tb = table containing all reference, guide, and PAM sequences by off-target
# ref_seq_csv= csv name for reading in ref_seq_tb
#
# OUTPUT: none
remove_guideseq_from_cols <- function(list_summary_files, ref_seq_tb, ref_seq_csv){
# setwd("/Users/anneshen/Documents/local_working/IND_off_target/2020_CRISPResso2/20200722_DE_1450_rhAMPSeq")
# ref_seq_csv <- "202006_1450_0000_ref_seqs.csv"
# ref_seq_tb <- read.csv(ref_seq_csv, stringsAsFactors = FALSE) %>%
# rename(off_target = ot_id)
# list_summary_files <- list.files(pattern = "collapse")
# setwd("/Users/anneshen/Documents/local_working/local_Jing_BE/1620/20191206_1620_input_rhAMPSeq_integrated3")
# ref_seq_csv <- "1620_ref_seqs_tb.csv"
# ref_seq_tb <- read.csv(ref_seq_csv, stringsAsFactors = FALSE) %>%
# rename(off_target = ot_id)
# list_summary_files <- list.files(pattern = "collapse")
#remove guide name from list_summary_files file columns (only need to do once)
# Also separates guides on the same amplicon.
for(file in list_summary_files){
temp <- read.csv(file, stringsAsFactors = FALSE)
#remove guide sequence from column name if not removed already
if(any(grepl("__[ATCG]{6,}", names(temp)))){
renaming_df <- data.frame(sample_names = grep("__", names(temp), value = TRUE))%>%
separate(col = sample_names, into = c("sample", "guide"), sep = "__", remove = FALSE)
#order of sample_names is retained
joined_df <- left_join(renaming_df, ref_seq_tb, by = c("guide" = "aligned_guide_seq")) %>%
mutate(filter = str_detect(sample, off_target)) %>%
filter(filter) %>%
transform(off_target = ifelse(grepl("read", sample), paste(off_target, "reads", sep = "_"), off_target)) %>%
select(-filter)
#rename the columns of temp to be the off_target name
names(temp)[grep("__", names(temp))] <- as.character(joined_df$off_target)
write.csv(temp, file = file, row.names = FALSE)
}
}
}
###### get_formatted_summary() ##################################################################
# For either all the control or edited samples, reads all the allele frequency tables (generated either
# during the "collapse" or "BE" modes), summarizes the Edited v. Unedited allele frequencies for each
# target within each sample, and formats all the data within one data frame.
#
# CALLS HELPERS: get_group_summary_table()
# CALLED IN: summarize_off_targets()
#
# ARGUMENTS:
# ot_samples_tb = sample metadata table containing donor, condition, CRISPResso_dir_name, and sample_name
# ot_sample_idx = vector of desired samples' row indexes in ot_samples_tb
# summary_file_suffix = suffix of all allele collapsed summary tables (depends on whether
# "collapsed" or "BE" mode was used to generate most recent summary table)
# master_guide_tb = table containing all reference, guide, and PAM sequences by off-target
# condition = desired condition to samples in this table (either "control" or "edited")
#
# OUTPUT:
# summary_and_seq_tb = full data table containing Edited/Unedited frequencies of every off-target
# of every CRISPResso2 run/sample
get_formatted_summary <- function(ot_samples_tb, ot_sample_idx, summary_file_suffix,
master_guide_tb, condition){
#loop through all the summary files
for(n in seq(1, length(ot_sample_idx))){
#get idx of CRISPResso2 sample name in ot_samples_tb
idx <-ot_sample_idx[n]
#get the sample file names & read in summary tables
summary_file_name <- paste(ot_samples_tb$CRISPResso_dir_name[idx], summary_file_suffix, sep = "_")
summary_tb_raw <- read.csv(summary_file_name, stringsAsFactors = FALSE)
#get the sample name of the CRISPResso2 run
sample_name <- ot_samples_tb$sample_name[idx]
#for all off-targets in the CRISPResso2 run, obtain "Edited" and "Unedited" total allele frequencies
summary_tb <- get_group_summary_table(summary_tb_raw, sample_name)
#gather columns into off_target and read columns
summary_table_reads <-summary_tb %>%
select(vars_select(names(summary_tb), -matches("[0-9]$"))) %>%
gather(key = "off_target", value = "reads",
vars_select(names(summary_tb), contains("_reads")))
summary_table_reads$off_target <- gsub("_reads", "", summary_table_reads$off_target)
#gather columns into off_target and read columns
summary_table_freqs <- summary_tb %>%
select(vars_select(names(summary_tb), -ends_with("_reads"))) %>%
gather(key = "off_target", value = "frequency",
vars_select(grep("_reads", names(summary_tb), invert = TRUE, value = TRUE),
contains("_O")))
summary_table <-full_join(summary_table_freqs, summary_table_reads,
by =c("off_target", "indel", "sample"))
if(n == 1){
all_summary_table <- summary_table
}else{
all_summary_table <- rbind(all_summary_table, summary_table)
}
}
#add off-target sequence to summary table
summary_and_seq_tb <- left_join(master_guide_tb, all_summary_table, by = "off_target")
#log10 transform frequency
summary_and_seq_tb$log_freq <- log10(summary_and_seq_tb$frequency)
#order table by off_target
summary_and_seq_tb <- summary_and_seq_tb[order(summary_and_seq_tb$off_target),]
#add "condition" column to indicate whether this was an edited or control sample
summary_and_seq_tb$condition <- rep(condition, nrow(summary_and_seq_tb))
return(summary_and_seq_tb)
}
###### get_group_summary_table() ##################################################################
# For all off-targets in a CRISPResso2 run, obtains "Edited" and "Unedited" total allele frequencies.
#
# CALLS HELPERS: NA
# CALLED IN: get_formatted_summary()
#
# ARGUMENTS:
# pool_tb = collapsed allele table generated by "collapse" mode (read in from file name)
# sample_name = name of user-input CRISPResso2 run sample from ot_sample_csv file
#
# OUTPUT:
# pool_summary = table containing % edited and unedited alleles for all off-targets in CRISPResso run
get_group_summary_table <- function(pool_tb, sample_name){
#vector of fixed pool_tb headers
fixed_col_names <- c("Aligned_Sequence", "Reference_Sequence", "Unedited", "n_deleted",
"n_inserted", "n_mutated", "indel")
#remove "X" from beginning of off-target names (column headers) if they begin with numbers or
# special symbols
names(pool_tb) <- gsub("^X", "", names(pool_tb))
#get table with total Unedited frequency across all off-targets within CRISPResso sample/run
unedited <- pool_tb %>%
filter(indel == "Unedited") %>%
select(which(! names(pool_tb) %in% fixed_col_names)) %>%
mutate(indel = "Unedited", sample = sample_name)
#get table with total Edited frequency across all off-targets within CRISPResso sample/run
edited <- pool_tb %>%
filter(indel != "Unedited") %>%
select(which(! names(pool_tb) %in% fixed_col_names))%>%
colSums(na.rm = TRUE)
#bind edited & unedited data together
pool_summary <- rbind(unedited, edited)
#rename edited indel and sample appropriately
pool_summary$indel[2] <- "Edited"
pool_summary$sample[2] <- sample_name
return(pool_summary)
}
###### pair_mock_v_edited_samples_by_donor() ##################################################################
# Reads in ot_sample_csv table and adds a column complete_for_stats indicated whether each donor has both
# edited and mock CRIPSResso2 samples (ready for statistics). Each row contains mock & edited sample pairings
# by donor (if a donor has 1 mock and 2 edited samples, the mock will be repeated in 2 rows and paired
# with a different edited sample in each row).
#
# CALLS HELPERS: get_stat_comparison_sample_tb()
# CALLED IN: summarize_off_targets()
#
# ARGUMENTS:
# ot_sample_csv = name of ot_sample_csv file (contains donor, condition, CRISPResso_dir_name, sample_name)
#
# OUTPUT:
# donor_paired_tb = table containing "mock" and "edited" sample columns in which rows contain paired
# samples by donor (sets of samples appropriate for statistical analyses)
pair_mock_v_edited_samples_by_donor <- function(ot_sample_csv){
#filter table for just donors with samples available for statistical comparison
stat_comp_tb <- get_stat_comparison_sample_tb(ot_sample_csv)
#get list of unique donors for statistical comparison
unique_donors <- unique(stat_comp_tb$donor)
# pair donor/mock in donor_paired_tb
donor_paired_tb <- data.frame(donor = as.character(c()), mock = as.character(c()), edited = as.character(c()),
stringsAsFactors=FALSE)
#for each donor that has both mock and edited samples
for(donor in unique_donors){
#get the names of the donor's samples
donor_mock_samples <- stat_comp_tb[stat_comp_tb$donor == donor & grepl("mock", stat_comp_tb$condition),]$sample_name#[mock_samples_idx]
donor_trt_samples <- stat_comp_tb[stat_comp_tb$donor == donor & !grepl("mock", stat_comp_tb$condition), ]$sample_name#[trt_samples_idx]
n_mock_samples <- length(donor_mock_samples)
n_trt_samples <- length(donor_trt_samples)
#generate the donor-edited sample pairings (redundancy allowed)
donor_paired_samples <- data.frame(donor = as.character(rep(donor, times = n_mock_samples*n_trt_samples)),
mock = as.character(rep(donor_mock_samples,
each = n_trt_samples)),
edited = as.character(rep(donor_trt_samples,
times = n_mock_samples)),
stringsAsFactors = FALSE)
donor_paired_tb <- rbind(donor_paired_tb, donor_paired_samples)
}
donor_paired_tb <- donor_paired_tb %>%
mutate(sample = paste(donor, row.names(donor_paired_tb), sep = "_"))
return(donor_paired_tb)
}
###### get_stat_comparison_sample_tb() ##################################################################
# Reads in ot_sample_csv table and adds a column complete_for_stats indicated whether each donor has both
# edited and mock CRIPSResso2 samples (ready for statistics).
#
# CALLS HELPERS: NA
# CALLED IN: pair_mock_v_edited_samples_by_donor()
#
# ARGUMENTS:
# ot_sample_csv = name of ot_sample_csv file (contains donor, condition, CRISPResso_dir_name, sample_name)
#
# OUTPUT:
# sample_metadata = sample metadata table containing complete_for_stats column
get_stat_comparison_sample_tb <- function(ot_sample_csv){
#read sample metadata table
sample_metadata <- read.csv(file = ot_sample_csv, stringsAsFactors = FALSE)
#get list of donors with associated mock sample
donors_with_mock <- sample_metadata$donor[which(grepl("mock", sample_metadata$condition,
ignore.case = TRUE))]
#get list of donors with associated edited sample
donors_with_edited <- unique(sample_metadata$donor[ which( !grepl( "mock", sample_metadata$condition,
ignore.case = TRUE))])
#get list of donors with both mock and edited samples
donors_complete <- intersect(donors_with_mock, donors_with_edited)
#add complete_for_stats column to indicate whether a donor has both mock and edited conditions
sample_metadata <- sample_metadata %>%
mutate(complete_for_stats = ifelse(donor %in% donors_complete, TRUE, FALSE))
return(sample_metadata)
}
###### get_ttest_table() ##################################################################
#Takes a list of unique off-target names and edited summary table and performs a t-test
# comparing the mean editing frequency between Edited and Mock samples. Because all mock & edited samples
# are paired by donor, all Mocks are averaged (and all Edited samples are averaged) for t.test calculations.
# This does mean that some samples are represented twice (ex. Mock1 is represented twice if both Edited1
# and Edited2 are from the same donor and thus paired with Mock1.)
# Returns a table of off-targets and their corresponding t-test p-values, as well as whether the
# difference between Edited and Mock is significant.
# (NOTE: statistical comparison is ONLY performed for donors with AT LEAST one mock and one edited sample.
# Comparisons are skipped where there is only one Edited or Mock sample available for the target across
# donors.)
#
# CALLS HELPERS: NA
# CALLED IN: summarize_off_targets()
#
# ARGUMENTS:
# off_targets = list of off-targets for statistical comparison
# edited_summary_tb = summary table of off-target editing frequencies
# paired_stats_tb = table storing mock-edited sample pairings for each donor appropriate for statistical
# comparisons
#
# OUTPUT:
# ots_pval_tb = table showing median & mean editing frequencies for mock and edited samples as well as
# the p-value of the one-tailed t.test (alpha = 0.05) comparison and effect size
# (ordered by off-target)
get_ttest_table <- function(off_targets, edited_summary_tb, paired_stats_tb){
#initalize p-value vector
p_vals <- c()
#initalize effect size vector
d_vals <- c()
#initalize median vectors
control_median <- c()
edited_median <- c()
#initalize mean vectors
control_mean <- c()
edited_mean <- c()
#initalize sd vectors
control_sd <- c()
edited_sd <- c()
#calculate p-values with t-test comparing Mock v. Edited samples for each off-target
for(n in seq(1, length(off_targets))){
#get rows that correspond with the off-target of interest
ot_only_tb <- edited_summary_tb %>%
filter(complete.cases(edited_summary_tb)) %>%
filter(off_target == off_targets[n])
edited_freq <-ot_only_tb[ot_only_tb$sample %in% paired_stats_tb$edited,]$frequency
control_freq <-ot_only_tb[ot_only_tb$sample %in% paired_stats_tb$mock,]$frequency
#calculate median
edited_median <- c(edited_median, median(edited_freq, na.rm = TRUE))
control_median <- c(control_median, median(control_freq, na.rm = TRUE))
#calculate mean
ot_edited_mean <- mean(edited_freq, na.rm = TRUE)
ot_control_mean <- mean(control_freq, na.rm = TRUE)
edited_mean <- c(edited_mean, ot_edited_mean)
control_mean <- c(control_mean, ot_control_mean)
#calculate standard deviation
ot_edited_sd <- sd(edited_freq, na.rm = TRUE)
ot_control_sd <- sd(control_freq, na.rm = TRUE)
edited_sd <- c(edited_sd, ot_edited_sd)
control_sd <- c(control_sd, ot_control_sd)
#check that there is sufficient variance and
if( any(edited_freq != control_freq) & (length(edited_freq) > 1) & (length(control_freq) > 1) ){
#assuming edited_freq is larger than control_freq
p_vals <- c(p_vals,
t.test(edited_freq, control_freq, alternative = "greater", paired = FALSE)$p.value)
#calculate cohen's d
pooled_sd <- sqrt(( ot_edited_sd^2 + ot_control_sd^2 ) / 2)
cohen_d <- ( ot_edited_mean - ot_control_mean ) / pooled_sd
d_vals <- c(d_vals, cohen_d)
}else{
p_vals <- c(p_vals, NA)
d_vals <- c(d_vals, NA)
}
}
#generate data frame with off_targets, the t-test p-value associated with it,
# and boolean (significant or not)
ots_pval_tb <- data.frame(off_target = off_targets,
edited_median = edited_median,
control_median = control_median,
edited_mean = edited_mean,
control_mean = control_mean,
edited_sd = edited_sd,
control_sd = control_sd,
ttest_p_value = p_vals,
eff_size = d_vals) %>%
mutate(significant = ttest_p_value < 0.05)
return(ots_pval_tb[order(ots_pval_tb$off_target),])
}
###### save_editing_results() ##################################################################
# Formats and saves off-target editing frequency table
#
# CALLS HELPERS: NA
# CALLED IN: summarize_off_targets()
#
# ARGUMENTS:
# date = date in YYYYMMDD format
# edited_summary_tb = summary table of off-target editing frequencies to be formatted and saved
#
# OUTPUT: none
save_editing_results <- function(date, edited_summary_tb){
#format editing results
edited_summary_tb_csv <- edited_summary_tb %>%
rename(editing_freq = frequency) %>%
order_alpha("name_seq", decreasing_bool = TRUE) %>%
select(-c(log_freq, indel, name_seq))
write.csv(edited_summary_tb_csv, paste(date, "CRISPResso_OT_editing_summary.csv", sep = "_"),
row.names = FALSE)
}
###### check_plot_aesthetics() ##################################################################
# Check whether aesthetics (color, fill, shape) values have been entered by the user for editing dotplot
# generation. If not, fill in table with default aesthetics scale values.
#
# CALLS HELPERS: get_default_palette()
# CALLED IN: summarize_off_targets()
#
# ARGUMENTS:
# aes_tb = data frame storing aesthetics (color, fill, shape) to be used for the dotplot
#
# OUTPUT:
# aes_tb = aes_tb with default color, fill, and shape values (unless user had entered values)
check_plot_aesthetics <- function(aes_tb){
#check that R_color, R_fill, and R_shape aesthetics exist & generate defaults if non-existent
if(! "R_color" %in% names(aes_tb)){
#get custom palette
default_palette <- get_default_palette()
#set default colors
aes_tb$R_color <- default_palette[1:nrow(aes_tb)]
}
if(! "R_fill" %in% names(aes_tb)){
#defaults as same colors as R_color
aes_tb$R_fill <- aes_tb$R_color
}
if(! "R_shape" %in% names(aes_tb)){
#determine which samples are mock in aes_tb
is_mock <- aes_tb$condition == "mock"
#set defaults: control/mock is 1, edited is 16
aes_tb$R_shape <- ifelse(is_mock, 1, 16 )
}
return(aes_tb)
}
###### get_default_palette() ##################################################################
# Returns vector of default colors for dotplot color & fill aesthetics. Colors were chosen to be
# dark and distinct, though the colors become more similar as the number of visualized samples
# increases.
#
# CALLS HELPERS: NA
# CALLED IN: check_plot_aesthetics()
#
# ARGUMENTS: none
#
# OUTPUT:
# default_palette = vector storing 30 default colors for dotplot color & fill aesthetics
get_default_palette <- function(){
#6 colors per row: red, blue, green, orange/yellow, purple, brown
# 30 colors total
default_palette <- c("firebrick", "cornflowerblue", "olivedrab", "goldenrod", "mediumpurple", "tan4",
"tomato3", "steelblue1", "seagreen3", "darkorange", "darkorchid", "burlywood4",
"indianred1", "royalblue3", "palegreen4", "coral", "purple1", "peru",
"firebrick1", "deepskyblue", "darkseagreen", "chocolate1", "palevioletred1", "tan",
"darkred", "slateblue3", "springgreen", "darkgoldenrod", "plum", "wheat4")
return(default_palette)
}
###### order_alpha() ##################################################################
# Generate alphabetical levels for a specific column (for plotting purposes)
#
# CALLS HELPERS: NA
# CALLED IN: summarize_off_targets()
#
# ARGUMENTS:
# data_tb = any data frame
# colname = data_tb column name to save as factor with sorted levels
# decreasing_bool = logical indicating whether the alphanumeric sort is decreasing
#
# OUTPUT:
# ordered_tb = returns the data_tb with the colname values saved as factors with alphanumeric sorted
# levels
order_alpha <- function(data_tb, colname, decreasing_bool){
ordered_tb <- data_tb[str_order(data_tb[,colname], decreasing = decreasing_bool,
numeric = TRUE),]
ordered_tb[,colname] <- factor(ordered_tb[,colname],
levels = unique(ordered_tb[,colname]), ordered = TRUE)
return(ordered_tb)
}
#HERE
###### order_by_pval() ##################################################################
# Generate alphabetical levels for a specific column (for plotting purposes)
#
# CALLS HELPERS: NA
# CALLED IN: summarize_off_targets()
#
# ARGUMENTS:
# data_tb = any data frame with ttest_p_value column
# colname = data_tb column name to save as factor with sorted levels
#
# OUTPUT:
# ordered_tb = returns the data_tb with the colname values saved as factors with alphanumeric sorted
# levels
order_by_pval <- function(data_tb, colname){
data_tb[,colname] <- factor(data_tb[,colname],
levels = unique(data_tb[,colname]))
return(data_tb)
}
###### get_aes_scale_values_for_dotplot() ##################################################################
#Takes a data frame containing all information necessary to plot the editing frequency dotplot and
# generates the list of aesthetics (color, fill, shape) to pass to scale_manual when generating dotplot.
#
# CALLS HELPERS: NA
# CALLED IN: summarize_off_targets()
#
# ARGUMENTS:
# plotting_tb = data frame containing editing frequencies and target sequences to be plotted in dotplot
#
# OUTPUT:
# aes_val_list = list of dotplot aesthetics scale values (color, fill, shape)
get_aes_scale_values_for_dotplot <- function(plotting_tb){
#Generate color scale values
color_values <- plotting_tb$R_color
names(color_values) <- plotting_tb$group
color_values <- color_values[which(!duplicated(names(color_values)))]
color <- list("Group", color_values)
#Generate fill scale values
fill_values <- plotting_tb$R_fill
names(fill_values) <- plotting_tb$group
fill_values <- fill_values[which(!duplicated(names(fill_values)))]
fill <- list("Group", fill_values)
#Generate shape scale values
shape_values <- plotting_tb$R_shape
names(shape_values) <- plotting_tb$group
shape_values <- shape_values[which(!duplicated(names(shape_values)))]
shape <- list("Group", shape_values)
#save all scale value lists in aes_val_list
aes_val_list <- list("color_scale_val" = color,
"fill_scale_val" = fill,
"shape_scale_val" = shape)
return(aes_val_list)
}
###### get_heatmap_legend_values() ##################################################################
get_heatmap_legend_values <- function(total_read_vector){
#calculate legend breaks and values for read coverage legend
max_reads <- max(total_read_vector, na.rm = TRUE)
max_log10 <- round(log10(max_reads), digits = 0)
max_legend_val <- round(max_reads, digits = -max_log10) + 10^max_log10
legend_breaks_full <- seq(from = 0, to = max_legend_val, by = 10^(max_log10 - 1))
smallest_max_legend_vals <- which(legend_breaks_full > max_reads)[1:2]
#calculate the best legend label increments such that:
# -there are at >= 5 labels
# -the labels encompass 10^(max_log10 - 1) to at least the max_reads
max_legend_val_idx <- data.frame(max_legend_val = rep(smallest_max_legend_vals, each = 11),
factor = rep(seq(1,11), times = length(smallest_max_legend_vals))) %>%
mutate(divisible_by = max_legend_val%%factor == 0)
max_legend_val_idx$last_idx <- mapply(function(max_legend_val, factor) tail( seq(2, max_legend_val, by = factor), 1),
max_legend_val_idx$max_legend_val, max_legend_val_idx$factor)
max_legend_val_idx$n_labels <- mapply(function(max_legend_val, factor) length(seq(2, max_legend_val, by = factor)),
max_legend_val_idx$max_legend_val, max_legend_val_idx$factor)
max_legend_val_idx <- max_legend_val_idx %>%
mutate(pass_n_labels = ifelse((n_labels == last_idx) | (n_labels >= 4 & n_labels <= 8), TRUE, FALSE)) %>%
filter(last_idx >= smallest_max_legend_vals[1]) %>%
filter( pass_n_labels ) %>%
filter(factor == max(factor)) %>%
filter(max_legend_val == min(max_legend_val))
legend_breaks <- legend_breaks_full[c(1, seq(2, max_legend_val_idx$max_legend_val,
by = max_legend_val_idx$factor))]
return(legend_breaks)
}
###### make_compiled_OT_editing_dotplot() ##################################################################
#Takes a summary table with all samples, all off-targets, and all editing outcomes and
# generates editing summary dotplot. Also takes fill and color parameters for ggplot2. (The off-targets
# are in the same order as the dotplot so that they are aligned when displayed together.)
# Returns a dotplot that is meant to be placed in a composite graph with a coverage heatmap.
#
# CALLS HELPERS: NA
# CALLED IN: summarize_off_targets()
#
# ARGUMENTS:
# summary_tb = data frame containing read counts for each target and sample
# scale_size_by_editing_freq = logical, whether or not to separate geom_points by size according to read coverage
# in the editing scatterplot
# fill = list containing c(name, values) for scale_fill_manual
# color = list containing c(name, values) for scale_color_manual
# shape = list containing c(name, values) for scale_shape_manual
# size = list containing aesthetic values for size of scatterplot/dotplot points
# guide_font_size = font size of guides/targets
# editing_freq_scale = editing frequency log10 scale
#
# OUTPUT:
# dotplot = dotplot of editing frequency for each target across samples
make_compiled_OT_editing_dotplot <- function(summary_tb, scale_size_by_editing_freq,
fill, color, shape, size,
guide_font_size, editing_freq_scale){
dotplot <- ggplot(data = summary_tb,
aes(x = name_seq,
y = frequency))
#generate jittered dotplot depending on whether scale_size_by_editing_freq == TRUE
if(scale_size_by_editing_freq){
dotplot <- dotplot +
geom_jitter(aes(fill = group,
color = group,
shape = group,
size = R_point_size),
width = 0.1,
height = 0) +
scale_size_manual(name = size[[1]],
values = size[[2]],
labels = size[[3]],
guide = guide_legend(ncol = 1))
}else{
dotplot <- dotplot +
geom_jitter(aes(fill = group,
color = group,
shape = group),
size = summary_tb$R_point_size[1],
width = 0.1,
height = 0) +
guides(size = FALSE)
}
dotplot <- dotplot +
xlab("") +
ylab("% Editing Frequency\n") +
scale_y_continuous(position = "right", trans="log10",
limits = c(editing_freq_scale[1], 100),
breaks = editing_freq_scale,
labels = c(0, editing_freq_scale[2:(length(editing_freq_scale)-2)], 10, 100)) +
coord_flip() +
scale_x_discrete(limits = rev(levels(summary_tb$name_seq)),
labels = rev(levels(summary_tb$name_seq)),
breaks = rev(levels(summary_tb$name_seq))) +
scale_fill_manual(name = fill[[1]],
values = fill[[2]],
guide = guide_legend(ncol = 1)) +
scale_color_manual(name = color[[1]],
values = color[[2]],
guide = guide_legend(ncol = 1)) +
scale_shape_manual(name = shape[[1]],
values = shape[[2]],
guide = guide_legend(ncol = 1)) +
theme_classic() +
theme(axis.text.y = element_text(family = "Courier" , size = guide_font_size))
return(dotplot)
}
###### make_compiled_summary_OT_editing_boxplot() ##################################################################
#Takes a summary table with all samples, all off-targets, and all editing outcomes and
# generates editing summary boxplot displaying the statistics of the mock and edited mean/median.
# Also takes fill and color parameters for ggplot2. (The off-targets are in the same order as the dotplot so
# that they are aligned when displayed together.)
# Returns a dotplot that is meant to be placed in a composite graph with a coverage heatmap.
#
# CALLS HELPERS: NA
# CALLED IN: summarize_off_targets()
#
# ARGUMENTS:
# summary_tb = data frame containing read counts for each target and sample
# scale_size_by_editing_freq = logical, whether or not to separate geom_points by size according to read coverage
# in the editing scatterplot
# fill = list containing c(name, values) for scale_fill_manual
# color = list containing c(name, values) for scale_color_manual
# shape = list containing c(name, values) for scale_shape_manual
# size = list containing aesthetic values for size of scatterplot/dotplot points
# guide_font_size = font size of guides/targets
# editing_freq_scale = editing frequency log10 scale
#
# OUTPUT:
# dotplot = dotplot of editing frequency for each target across samples
make_compiled_summary_OT_editing_boxplot <- function(summary_tb,
#fill, color, shape, size,
guide_font_size, editing_freq_scale){
#Add "mean" for displaying mean in the legend
summary_tb <- summary_tb %>%
mutate(mean = "mean")
boxplot <- ggplot(data = summary_tb,
aes(x = name_seq,
y = frequency,
fill = factor(condition))) +
geom_boxplot(position = position_dodge(0.5),
alpha = 0.5,
width = 0.5,
color = "gray60") +
stat_summary(fun.y = mean,
geom = "point",
size = 2,
color = "black",
show.legend = FALSE,
position = position_dodge(0.5),
aes(shape = mean)) +
geom_point(alpha = 0,
fill = "black",
aes(shape = mean)) +
xlab("") +
ylab("% Editing Frequency\n") +
scale_y_continuous(position = "right", trans="log10",
limits = c(editing_freq_scale[1], 100),
breaks = editing_freq_scale,
labels = c(0, editing_freq_scale[2:(length(editing_freq_scale)-2)], 10, 100)) +
coord_flip() +
scale_x_discrete(limits = rev(levels(summary_tb$name_seq)),
labels = rev(levels(summary_tb$name_seq)),
breaks = rev(levels(summary_tb$name_seq))) +
scale_fill_manual(name = "",
values = c("control" = "lightskyblue",
"edited" = "lightcoral"), #"mean" = "black"
labels = c("control" = "Control/Mock",
"edited" = "Edited"), #"mean" = "Mean % Editing"
guide = guide_legend(ncol = 1)) +
scale_shape_manual(name = "",
values = c("mean" = 18),
labels = c("mean" = "Mean % Editing")) +
theme_classic() +
theme(axis.text.y = element_text(family = "Courier" , size = guide_font_size)) +
guides(shape=guide_legend(title=NULL, override.aes = list(alpha = 1)))
return(boxplot)
}
###### make_compiled_OT_coverage_heatmap() ##################################################################
#Takes a summary table with all samples, all off-targets, and all editing outcomes and generates read
# coverage heatmap. (The off-targets are in the same order as the dotplot so that they are aligned when
# displayed together)
# Returns a heatmap that is meant to be placed in a composite graph with an editing dotplot.
#
# CALLS HELPERS: NA
# CALLED IN: summarize_off_targets()
#
# ARGUMENTS:
# summary_tb = data frame containing read counts for each target and sample
# guide_font_size = font size of guides/targets (not displayed in figure, mainly for formatting)
# group_font_size = font size of groups/samples across top of heatmap
# tile_font_size = font size of read counts displayed inside heatmap tiles
#
# OUTPUT:
# heatmap = heatmap of read coverage for each target across samples
make_compiled_OT_coverage_heatmap <- function(summary_tb, guide_font_size,
group_font_size, tile_font_size,
legend_breaks){
#generate heatmap
heatmap <- ggplot(data = summary_tb,
aes(x = name_seq,
y = group)) +
geom_tile(aes(fill = total_reads)) +
geom_text(aes(label = gsub("NA", "",
format(ceiling(total_reads),
trim = FALSE, accuracy = 1, big.mark = ","))),
size = tile_font_size,
family = "Courier") +
xlab("") +
ylab("Total Reads per Sample\n") +
scale_x_discrete(limits = rev(unique(summary_tb$name_seq)),
breaks = c(),
expand = c(0, 0)) +
scale_y_discrete(position = "right",
limits = levels(summary_tb$group),
labels = c(gsub("[ ]{1}", "\n", levels(summary_tb$group))),
expand = c(0, 0)) +
scale_fill_gradientn(name = "",
colours = c("grey95", "skyblue", "cornflowerblue"),
limits = c(legend_breaks[1], legend_breaks[length(legend_breaks)]),
breaks = legend_breaks,
labels = c("", as.character(comma(legend_breaks[2:length(legend_breaks)]))),
na.value = "white") +
coord_flip() +
theme_classic() +
theme(legend.position = "left",
axis.text.y = element_text(family = "Courier" , size = guide_font_size),
axis.text.x = element_text(colour = "grey50", size = group_font_size))
return(heatmap)
}
###### make_composite_grobPlot() ##################################################################
### Generates a composite plot showing coverage as a heatmap on the left and % editing as
### a dotplot on the right.
#
# CALLS HELPERS: NA
# CALLED IN: summarize_off_targets()
#
# ARGUMENTS:
# heatmap = ggplot heatmap object to be plotted
# dotplot = ggplot dotplot object to be plotted
# heatmap_width = width of heatmap relative to dotplot_width
# dotplot_width = width of dotplot relative to heatmap_width
#
# OUTPUT:
# composite_grob = composite plot as grob (and displays the grob object)
make_composite_grobPlot <- function(heatmap, dotplot, heatmap_width, dotplot_width){
#generate plot grobs & bind together
g_heatmap <- ggplotGrob(heatmap)
g_dotplot <- ggplotGrob(dotplot)
grid.newpage()
composite_grob <- plot_grid(heatmap, dotplot, ncol = 2, align = "h", axis = "bt",
rel_widths = c(heatmap_width, dotplot_width))
#visualize if in RStudio
composite_grob
return(composite_grob)
}
###### save_composite_plot() ##################################################################
# Takes file_name (without extension), composite plot object, and width/height of saved plot (in inches)
# and saves the plot as .pdf and png
#
# CALLS HELPERS: NA
# CALLED IN: summarize_off_targets()
#
# ARGUMENTS:
# file_name = file name of saved plots
# composite_plot = composite plot object to be plotted
# plot_width_in = width of saved plots in inches
# plot_height_in = height of saved plots in inches
#
# OUTPUT: none
save_composite_plot <- function(file_name, composite_plot, plot_width_in, plot_height_in){
#save composite plot as individual pdf
# ggsave(paste(file_name, ".pdf", sep = ""), plot = composite_plot,
# width = plot_width_in, height = plot_height_in,
# units = "in")
#save composite plot as png
ggsave(paste(file_name, ".png", sep = ""), plot = composite_plot,
width = plot_width_in, height = plot_height_in,
units = "in")
}
|
/crispresso_downstream/Summarize_off-target_editing_functs.R
|
no_license
|
ashen931/crispresso_downstream
|
R
| false
| false
| 59,805
|
r
|
#Summarize_off-target_editing_functs.R
# conda_environment: crispresso_downstream_env
# last modified: 2020_08_17 Anne Shen
# For use with CRISPResso version 2.0.40
#
### Dependencies:
# library(tidyverse)
# library(tidyselect)
# library(gtable)
# library(scales)
# library(gridExtra)
# library(grid)
# library(effsize)
# library(extrafont)
#
# font_import()
# loadfonts()
#
#
### Options:
# options(scipen=999) #turn off scientific notation
#
#
### Functions:
# summarize_off_targets()
# remove_guideseq_from_cols()
# get_formatted_summary()
# get_group_summary_table()
# get_table_by_guide()
# pair_mock_v_edited_samples_by_donor()
# get_stat_comparison_sample_tb()
# get_ttest_table()
# save_editing_results()
# check_plot_aesthetics()
# get_default_palette()
# order_alpha()
# get_aes_scale_values_for_dotplot()
# get_heatmap_legend_values()
# make_compiled_OT_editing_dotplot()
# make_compiled_summary_OT_editing_boxplot()
# make_compiled_OT_coverage_heatmap()
# make_composite_grobPlot()
# save_composite_plot()
######################################## FUNCTIONS ##################################################
###### summarize_off_targets() ##################################################################
# Function that generates all off-target analyses with the following steps:
# 1. format collapsed summary tables & read in metadata/reference tables
# 2. generate & format editing frequency & data tables
# 3. perform statistical tests (Mock v. Edited for each donor)
# 4. generate aesthetics for editing dotplot
# 5. generate all plots (editing dotplot, read coverage heatmap, composite plot)
#
# CALLS HELPERS: remove_guideseq_from_cols(), get_formatted_summary(), pair_mock_v_edited_samples_by_donor(),
# get_ttest_table(), save_editing_results(), check_plot_aesthetics(), order_alpha(),
# get_aes_scale_values_for_dotplot(), make_compiled_OT_editing_dotplot(),
# make_compiled_OT_coverage_heatmap(), make_composite_grobPlot(), save_composite_plot()
# CALLED IN: CRISPResso2_downstream.R
#
# ARGUMENTS:
# mode = run mode (ex. "collapse_BE_OT", "BE_OT", "OT_only" etc.)
# ref_seq_csv = csv name for reading in ref_seq_tb containing all reference, guide, and PAM sequences
# by off-target
# ot_sample_csv = name of ot_sample_csv file (contains donor, condition, CRISPResso_dir_name, sample_name)
# percent_freq_cutoff = percent cutoff for alleles in alleles summary tables (generated by "collapse"
# mode)
# conversion_nuc_from = (if BE mode) nucleotide to be edited by base editor
# conversion_nuc_to = (if BE mode) base editing result
# sort_by_pval = sort off-targets in composite plots by t-test value (if applicable) instead of off-target
# alphanumeric name
# scale_size_by_editing_freq = logical, whether or not to separate geom_points by size according to read coverage
# in the editing scatterplot
# low_coverage = the upper read count cutoff for "low-coverage" amplicons/samples
# high_coverage = the lower read count cutoff for "high-coverage" amplicons/samples
#
# OUTPUT:
# 1. saves ot_ttest_tb as "CRISPResso_OTs_ttest.csv"
# 2. saves edited_summary_tb as "CRISPResso_OT_editing_summary.csv"
# 3. saves off-target composite figures as .pdf and .png
summarize_off_targets <- function(mode, ref_seq_csv, ot_sample_csv, percent_freq_cutoff,
conversion_nuc_from, conversion_nuc_to, sort_by_pval,
scale_size_by_editing_freq, low_coverage, high_coverage){
#test code:
# saved_wd <- getwd()
# setwd("/Users/anneshen/Documents/local_working/local_Jing_BE/2020_1620_BE_NatureMed/20191206_1620_input_rhAMPSeq_integrated3")
# mode<- "BE_OT"
# ref_seq_csv <- "1620_ref_seqs_tb.csv"
# ot_sample_csv<- "2019_1620_BE_rhAMPSeq_samples.csv"
# percent_freq_cutoff <- 0
# conversion_nuc_from <- "C"
# conversion_nuc_to <- "T"
# sort_by_pval <- TRUE
# scale_size_by_editing_freq <- TRUE
# low_coverage <- 1000
# high_coverage <- 10000
#
# setwd("/Users/anneshen/Documents/local_working/IND_off_target/2020_CRISPResso2/20200722_DE_1450_rhAMPSeq")
# mode<- "OT_only"
# ref_seq_csv <- "202006_1450_0000_ref_seqs.csv"
# ot_sample_csv<- "202006_DE_1450_rhAMPSeq_samples.csv"
# percent_freq_cutoff <- 0
# scale_size_by_editing_freq <- TRUE
# low_coverage <- 1000
# high_coverage <- 10000
cat("Off_target_summary_log\n",
paste(Sys.time(), "\n", sep = ""),
paste(getwd(), "\n", sep = ""),
"ref_seq_csv: ", ref_seq_csv, "\n",
"ot_sample_csv: ", ot_sample_csv, "\n",
"percent_freq_cutoff: ", percent_freq_cutoff, "\n",
"analysis_mode: ", mode, "\n",
"sort_by_pval: ", sort_by_pval, "\n",
"scale_size_by_editing_freq: ", scale_size_by_editing_freq, "\n",
"low_coverage: ", low_coverage, "\n",
"high_coverage: ", high_coverage, "\n",
"\n")
#set date for saving figures
date <- format(Sys.Date(), "%Y%m%d")
##### 1. format collapsed summary tables & read in metadata/reference tables #####
#get all summary file names
if(grepl("BE", mode)){
#if running OT analysis on base editing data, use BE summary tables
conversion <- paste(conversion_nuc_from, "to", conversion_nuc_to, ".csv", sep = "")
summary_file_suffix <- paste("BE_summary_", conversion, sep = "")
list_summary_files <- list.files(pattern = summary_file_suffix)
}else{
#get collapsed file suffix
summary_file_suffix <- paste("collapsed_", percent_freq_cutoff, ".csv", sep = "")
#if not running OT analysis on base editing data, use collapsed allele tables
list_summary_files <- list.files(pattern = summary_file_suffix)
}
#read in ref_seq_tb linking off-target names and sequences
ref_seq_tb <- read.csv(ref_seq_csv, stringsAsFactors = FALSE) %>%
filter(grepl("OT", ot_id)) %>%
rename(off_target = ot_id) %>%
filter(!duplicated(off_target))
#Remove guide name from list_summary_files file columns (only need to do once) & re-saves .csv file.
# Also separates guides on the same amplicon.
# Non-fruitful function
remove_guideseq_from_cols(list_summary_files, ref_seq_tb, ref_seq_csv)
#read in sample table
ot_samples_tb <- read.csv(ot_sample_csv, stringsAsFactors = FALSE)
#get ot_samples_tb row indexes for mock & experimental samples
mock_samples_idx <- grep("mock", ot_samples_tb$condition, ignore.case = TRUE)
trt_samples_idx <- grep("mock", ot_samples_tb$condition, ignore.case = TRUE, invert = TRUE)
##### 2. generate & format editing frequency & data tables #####
#get editing tables from mock_sample collapsed/filtered allele tables
all_mock_tb <- get_formatted_summary(ot_samples_tb, mock_samples_idx, summary_file_suffix,
ref_seq_tb, condition = "control")
#get editing tables from experimental_sample collapsed/filtered allele tables
all_trt_tb <- get_formatted_summary(ot_samples_tb, trt_samples_idx, summary_file_suffix,
ref_seq_tb, condition = "edited")
#bind all data tables together by columns
all_samples_from_file <- rbind(all_mock_tb, all_trt_tb)
#generate all_samples table (complete table including all off-target and all samples)
unique_samples_idx <- unique(all_samples_from_file$sample)
unique_samples <- unique_samples_idx[which(!is.na(unique_samples_idx))]
n_control <- length(mock_samples_idx)
n_edited <- length(trt_samples_idx)
n_samples <- n_control + n_edited
all_samples <- data.frame(off_target = rep(ref_seq_tb$off_target, times = n_samples, each = 2),
amplicon_sequence = rep(ref_seq_tb$amplicon_sequence, times = n_samples, each = 2),
guide_sequence = rep(ref_seq_tb$guide_sequence, times = n_samples, each = 2),
pam = rep(ref_seq_tb$pam, times = n_samples, each = 2),
indel = rep(c("Unedited", "Edited"), nrow(ref_seq_tb) * n_samples),
sample = rep(c(unique_samples), times = 1, each = nrow(ref_seq_tb) * 2),
condition = rep(c(rep("control", n_control), rep("edited", n_edited)), times = 1,
each = nrow(ref_seq_tb) * 2))
#join all_samples & all_samples_from_file to get table with all off-targets and samples represented
# (NAs for frequencies and reads of samples that were not analyzed in CRISPResso2)
all_samples_tb <- left_join(all_samples, all_samples_from_file, by = names(all_samples)) %>%
mutate(group = paste(condition, sample, sep = " "))
#generate name_seq column (OT name + guide sequence + PAM) with standardized spacing
max_nseq_len <- max(str_length(all_samples_tb $off_target) +
str_length(all_samples_tb $guide_sequence))
all_samples_tb <- all_samples_tb %>%
mutate(padding = as.numeric(max_nseq_len - str_length(off_target) - str_length(guide_sequence)))
all_samples_tb$spaces <- str_dup(rep(" ", nrow(all_samples_tb )), all_samples_tb $padding)
all_samples_tb$name_seq <- paste(paste(all_samples_tb $off_target,
" ",
all_samples_tb $spaces,
all_samples_tb $guide, sep = ""),
all_samples_tb $pam, sep = " ")
#remove unnecessary columns
all_samples_tb <- all_samples_tb %>%
select(-c("padding", "spaces"))
#select only off-targets & samples that were represented in CRISPResso2 analysis
# (will not include off-targets with no samples in final composite figure)
all_samples_tb <- all_samples_tb[complete.cases(all_samples_tb), ]
#select data representing editing frequency
edited_summary_tb <- all_samples_tb %>%
filter(indel == "Edited") %>%
select(-amplicon_sequence)
##### 3. perform statistical tests (Mock v. Edited for each donor) #####
### statistical test
# Compare % edited in Mock v. Edited samples
paired_stats_tb <- pair_mock_v_edited_samples_by_donor(ot_sample_csv)
#get list of off-targets
off_targets <- unique(edited_summary_tb$off_target)
#generate table of Edited v. Mock editing frequency results
ot_ttest_tb <- get_ttest_table(off_targets, edited_summary_tb, paired_stats_tb)
sig_ots <- ot_ttest_tb$off_target[which(ot_ttest_tb$significant)] %>% droplevels()
#save OT ttest statistics as csv
write.csv(ot_ttest_tb, paste(date, "CRISPResso_OTs_ttest.csv", sep = "_"),
row.names = FALSE, quote = FALSE)
#add asterisks (*) to name_seq of significant off-targets
for(n in seq(1, nrow(edited_summary_tb))){
edited_summary_tb$name_seq <- as.character(edited_summary_tb$name_seq)
if(edited_summary_tb$off_target[n] %in% sig_ots){
edited_summary_tb$name_seq[n] <- paste(edited_summary_tb$name_seq[n],
"* ",
sep = " ")
}else{
edited_summary_tb$name_seq[n] <- paste(edited_summary_tb$name_seq[n],
" ",
sep = " ")
}
}
#format editing results and save as csv file (non-fruitful function)
save_editing_results(date, edited_summary_tb)
##### 4. set 0% editing frequency to power of 10 below lowest frequency (for plotting purposes) #####
#find the lowest editing % frequency in all the samples, then go a power of 10 below that minimum
# for the "0% editing" setting on the composite % editing scatterplot
min_edited_log_freq <- min(edited_summary_tb$log_freq[which(edited_summary_tb$log_freq > -Inf)])
power10_floor <- floor(min_edited_log_freq) - 1
#set all 0% editing to 10^power10_floor (for log10 transformation)
zero_freq_row_idx <- which(edited_summary_tb$frequency == 0)
edited_summary_tb$frequency[zero_freq_row_idx] <- 10^power10_floor
edited_summary_tb$log_freq[zero_freq_row_idx] <- power10_floor
##### 5. generate aesthetics for editing dotplot #####
#get aesthetics columns from ot_samples_tb to join with edited_summary_tb
aes_tb <- select(ot_samples_tb, c("condition", "sample_name", grep("R_", names(ot_samples_tb), value = TRUE)))
#check that R_color, R_fill, and R_shape aesthetics exist & generate defaults if non-existent
aes_tb <- check_plot_aesthetics(aes_tb)
#join aes_tb with edited_summary_tb to match aesthetics scale values with samples for plotting
full_plotting_tb <- full_join(edited_summary_tb, select(aes_tb, -condition), by = c("sample" = "sample_name")) %>%
order_alpha("off_target", decreasing_bool = FALSE)
#get list of dotplot aesthetics scale values (color, fill, shape)
aes_val_list <- get_aes_scale_values_for_dotplot(full_plotting_tb)
#generate counts total counts table
full_total_counts_tb <- all_samples_tb %>%
filter(indel == "Unedited" ) %>%
mutate(total_reads = ceiling((reads * 100) / frequency ))
##### 6. calculate number of OTs per plot and aesthetics sizes #####
#use the following data frame to calculate the number of plots to generate (between 20-60 OTs per plot)
n_targets <- length(unique(full_plotting_tb$off_target))
ots_per_plot_df <- data.frame(ots_per_plot = seq(20, 50, 10)) %>%
mutate(min_n_plots = n_targets %/% ots_per_plot,
ots_rem = n_targets %% ots_per_plot,
diff_ots_per_plot = ifelse(ots_rem > 0, ots_per_plot - ots_rem, 0))
#choose to plot the least number of plots with the most even distribution of off-targets among plots
min_idx <- max(which(ots_per_plot_df$diff_ots_per_plot == min(ots_per_plot_df$diff_ots_per_plot)))
n_ots_per_plot <- ots_per_plot_df$ots_per_plot[min_idx]
unique_ots <- levels(full_plotting_tb$off_target)
#adjust font sizes according to number of off-targets displayed per plot
guide_font_size <- 10 - (n_ots_per_plot %/% 30)
tile_font_size <- 2.75 - (n_ots_per_plot %/% 30)*0.25
pointsize <- 1.5 - (n_ots_per_plot %/% 30)*0.25
group_font_size <- 8 - (n_ots_per_plot %/% 10)*0.25
#adjust heatmap and dotplot width to match sample number
n_plot_samples <- length(unique(full_plotting_tb$sample))
heatmap_width <- n_plot_samples * 0.5 + 2
dotplot_width <- n_plot_samples * 0.05 + 8
cplot_height <- n_plot_samples * 0.1 + 8.5
#Generate legend values for all figures. Calculate the best legend label increments such that:
# -there are at >= 5 labels
# -the labels encompass 10^(max_log10 - 1) to at least the max_reads
legend_breaks <- get_heatmap_legend_values(full_total_counts_tb$total_reads)
#generate editing % frequency scatterplot scale (to go from 10^power10_floor --> 100)
# (power10_floor = log10(lowest editing % frequency) -1)
editing_freq_scale <- mapply(function(x) 10^x, seq(power10_floor, 2))
#add total_reads column to plotting_tb
full_plotting_tb <- full_join(full_plotting_tb, full_total_counts_tb[c("off_target", "group", "total_reads")],
by = c("off_target", "group"))
#if sort_by_pval, order full_plotting_tb rows by p-value (increasing) so that off-targets are divided into
# composite plots in the correct order
if(sort_by_pval){
full_plotting_tb <- full_join(full_plotting_tb, ot_ttest_tb[c("off_target", "ttest_p_value")],
by = "off_target")
full_plotting_tb <- full_plotting_tb[order(full_plotting_tb$ttest_p_value),]
#set unique_ots order to reflect p_value order, not alphanumeric order
unique_ots <- as.character(ot_ttest_tb[order(ot_ttest_tb$ttest_p_value),]$off_target)
}
#add geom_point size column (R_point_size) if scale_size_by_editing_freq == TRUE & generate aesthetics list
if(scale_size_by_editing_freq){
#set medium and high-coverage points
mid_coverage_size <- pointsize + 0.6
high_coverage_size <- pointsize + 1.5
full_plotting_tb$R_point_size <- as.factor(ifelse(full_plotting_tb$total_reads < low_coverage, pointsize,
ifelse(full_plotting_tb$total_reads <= high_coverage, mid_coverage_size,
high_coverage_size)))
#generate aesthetics list
size_range <- c(pointsize, high_coverage_size)
size_val <- c(pointsize, mid_coverage_size, high_coverage_size)
names(size_val) <- as.factor(c(pointsize, mid_coverage_size, high_coverage_size))
size_lab <- c(paste("<", low_coverage, " reads", sep = ""),
paste(low_coverage,"-", high_coverage, " reads", sep = ""),
paste(">", high_coverage, " reads", sep = ""))
names(size_lab) <- as.factor(c(pointsize, mid_coverage_size, high_coverage_size))
aes_val_list$size_scale_val <- list("Read\nCoverage", size_val, size_lab)
}else{
full_plotting_tb$R_point_size <- pointsize
aes_val_list$size_scale_val <- NULL
}
##### 7. generate all plots (editing dotplot, read coverage heatmap, composite plot) #####
#initialize while loop iterator
ots_plotted <- 0
#initialize plot number tracker
n_plots <- 0
#initialize composite plot list for pdf printing
composite_plot_list <- list()
#initialize composite summary plot list for pdf printing
composite_summary_plot_list <- list()
#generate the plots
while(ots_plotted < n_targets){
#increase plot number iterator
n_plots <- n_plots + 1
#calculate the number of off-targets to include the current plot
n_ots_this_plot <- min(n_targets - ots_plotted, n_ots_per_plot)
#filter full_plotting_tb by the number of ots
sub_plotting_tb <- full_plotting_tb %>%
filter(off_target %in% unique_ots[(ots_plotted + 1):(ots_plotted + n_ots_this_plot)])
#HERE
#if sort_by_pval is true, order_apha by pval. Otherwise, order_alpha by name_seq (in heatmap and scatterplots/boxplots)
if(sort_by_pval){
ordered_plotting_tb <- sub_plotting_tb #%>%
#order_by_pval("name_seq")
ordered_plotting_tb$name_seq <- factor(ordered_plotting_tb$name_seq,
levels = unique(ordered_plotting_tb$name_seq))
}else{
ordered_plotting_tb <- sub_plotting_tb %>%
order_alpha("name_seq", decreasing_bool = FALSE)
}
#get the total counts for the plotted off_targets
# total_counts <- full_total_counts_tb %>%
# filter(off_target %in% unique_ots[(ots_plotted + 1):(ots_plotted + n_ots_this_plot)]) %>%
# order_alpha("name_seq", decreasing_bool = FALSE)
#order the sample names so that samples are listed by donor
heatmap_sample_order <- ot_samples_tb$sample_name[str_order(ot_samples_tb$donor, numeric = TRUE)]
#heatmap_group_idx <- mapply(function(x) which(total_counts$sample == x )[1] , heatmap_sample_order)
#total_counts$group <- factor(total_counts$group, levels = total_counts$group[heatmap_group_idx])
heatmap_group_idx <- mapply(function(x) which(ordered_plotting_tb$sample == x )[1] , heatmap_sample_order)
ordered_plotting_tb$group <- factor(ordered_plotting_tb$group, levels = ordered_plotting_tb$group[heatmap_group_idx])
#filter full_plotting_tb to include only off-targets in the current plot
# plotting_tb <- full_plotting_tb %>%
# filter(off_target %in% unique_ots[(ots_plotted + 1):(ots_plotted + n_ots_this_plot)]) %>%
# order_alpha("name_seq", decreasing_bool = FALSE)
#### generate coverage heatmap
heatmap <- make_compiled_OT_coverage_heatmap(ordered_plotting_tb, guide_font_size,
group_font_size, tile_font_size,
legend_breaks)
#total_counts,
#### generate editing dotplot
dotplot <- make_compiled_OT_editing_dotplot(ordered_plotting_tb, scale_size_by_editing_freq,
aes_val_list$fill_scale_val,
aes_val_list$color_scale_val,
aes_val_list$shape_scale_val,
aes_val_list$size_scale_val,
guide_font_size, editing_freq_scale)
#plotting_tb,
summary_dotplot <- make_compiled_summary_OT_editing_boxplot(ordered_plotting_tb,
guide_font_size,
editing_freq_scale)
#plotting_tb,
#make and save composite plots
composite_plot <- make_composite_grobPlot(heatmap, dotplot, heatmap_width, dotplot_width)
summary_composite_plot <- make_composite_grobPlot(heatmap, summary_dotplot, heatmap_width, dotplot_width)
#save composite plots as individual png files
png_plot_name <- paste(date, "off_targets_Rplot", n_plots , sep = "_")
save_composite_plot(png_plot_name, composite_plot, heatmap_width + dotplot_width + 1, cplot_height + 1)
#save summary plots
png_summary_plot_name <- paste(date, "off_targets_summary_Rplot", n_plots , sep = "_")
save_composite_plot(png_summary_plot_name, summary_composite_plot,
heatmap_width + dotplot_width + 1, cplot_height + 1)
#add composite plot to list for printing in pdf
composite_plot_list[[n_plots]] <- composite_plot
#add composite summary plot to list for printing in pdf
composite_summary_plot_list[[n_plots]] <- summary_composite_plot
#record plotting in run log
cat("targets plotted in ", png_plot_name, ": ", ots_plotted+1, " - ", ots_plotted + n_ots_this_plot, "\n")
#increase iterators
ots_plotted <- ots_plotted + n_ots_this_plot
}
#printing composite plots in single .pdf file
pdf(file = paste(date, "off_targets_Rplot.pdf", sep = "_"), onefile = TRUE,
width = heatmap_width + dotplot_width + 1, height = cplot_height + 1)
#print composite plots
for(i in 1:length(composite_plot_list)){
print(composite_plot_list[[i]])
print(composite_summary_plot_list[[i]])
}
#disconnect device for pdf printing
dev.off()
#end off-target analysis log
cat("\n")
}
###### remove_guideseq_from_cols() ##################################################################
# Reads through the collapsed files (generated either in "collapse" or "BE" mode) in list_summary_files
# and removes the guide sequences from the column headers. Also resolves cases where multiple guides
# were found in the same amplicon; this function renames the column headers by guide, not amplicon.
# Immediately throws an error if the guide_sequences in ref_seq_tb are not unique. (Repeated guides will
# cause errors in table joining later in the function.)
#
# CALLS HELPERS: NA
# CALLED IN: summarize_off_targets()
#
# ARGUMENTS:
# list_summary_files = list of collapsed or BE allele summary files
# ref_seq_tb = table containing all reference, guide, and PAM sequences by off-target
# ref_seq_csv= csv name for reading in ref_seq_tb
#
# OUTPUT: none
remove_guideseq_from_cols <- function(list_summary_files, ref_seq_tb, ref_seq_csv){
# setwd("/Users/anneshen/Documents/local_working/IND_off_target/2020_CRISPResso2/20200722_DE_1450_rhAMPSeq")
# ref_seq_csv <- "202006_1450_0000_ref_seqs.csv"
# ref_seq_tb <- read.csv(ref_seq_csv, stringsAsFactors = FALSE) %>%
# rename(off_target = ot_id)
# list_summary_files <- list.files(pattern = "collapse")
# setwd("/Users/anneshen/Documents/local_working/local_Jing_BE/1620/20191206_1620_input_rhAMPSeq_integrated3")
# ref_seq_csv <- "1620_ref_seqs_tb.csv"
# ref_seq_tb <- read.csv(ref_seq_csv, stringsAsFactors = FALSE) %>%
# rename(off_target = ot_id)
# list_summary_files <- list.files(pattern = "collapse")
#remove guide name from list_summary_files file columns (only need to do once)
# Also separates guides on the same amplicon.
for(file in list_summary_files){
temp <- read.csv(file, stringsAsFactors = FALSE)
#remove guide sequence from column name if not removed already
if(any(grepl("__[ATCG]{6,}", names(temp)))){
renaming_df <- data.frame(sample_names = grep("__", names(temp), value = TRUE))%>%
separate(col = sample_names, into = c("sample", "guide"), sep = "__", remove = FALSE)
#order of sample_names is retained
joined_df <- left_join(renaming_df, ref_seq_tb, by = c("guide" = "aligned_guide_seq")) %>%
mutate(filter = str_detect(sample, off_target)) %>%
filter(filter) %>%
transform(off_target = ifelse(grepl("read", sample), paste(off_target, "reads", sep = "_"), off_target)) %>%
select(-filter)
#rename the columns of temp to be the off_target name
names(temp)[grep("__", names(temp))] <- as.character(joined_df$off_target)
write.csv(temp, file = file, row.names = FALSE)
}
}
}
###### get_formatted_summary() ##################################################################
# For either all the control or edited samples, reads all the allele frequency tables (generated either
# during the "collapse" or "BE" modes), summarizes the Edited v. Unedited allele frequencies for each
# target within each sample, and formats all the data within one data frame.
#
# CALLS HELPERS: get_group_summary_table()
# CALLED IN: summarize_off_targets()
#
# ARGUMENTS:
# ot_samples_tb = sample metadata table containing donor, condition, CRISPResso_dir_name, and sample_name
# ot_sample_idx = vector of desired samples' row indexes in ot_samples_tb
# summary_file_suffix = suffix of all allele collapsed summary tables (depends on whether
# "collapsed" or "BE" mode was used to generate most recent summary table)
# master_guide_tb = table containing all reference, guide, and PAM sequences by off-target
# condition = desired condition to samples in this table (either "control" or "edited")
#
# OUTPUT:
# summary_and_seq_tb = full data table containing Edited/Unedited frequencies of every off-target
# of every CRISPResso2 run/sample
get_formatted_summary <- function(ot_samples_tb, ot_sample_idx, summary_file_suffix,
master_guide_tb, condition){
#loop through all the summary files
for(n in seq(1, length(ot_sample_idx))){
#get idx of CRISPResso2 sample name in ot_samples_tb
idx <-ot_sample_idx[n]
#get the sample file names & read in summary tables
summary_file_name <- paste(ot_samples_tb$CRISPResso_dir_name[idx], summary_file_suffix, sep = "_")
summary_tb_raw <- read.csv(summary_file_name, stringsAsFactors = FALSE)
#get the sample name of the CRISPResso2 run
sample_name <- ot_samples_tb$sample_name[idx]
#for all off-targets in the CRISPResso2 run, obtain "Edited" and "Unedited" total allele frequencies
summary_tb <- get_group_summary_table(summary_tb_raw, sample_name)
#gather columns into off_target and read columns
summary_table_reads <-summary_tb %>%
select(vars_select(names(summary_tb), -matches("[0-9]$"))) %>%
gather(key = "off_target", value = "reads",
vars_select(names(summary_tb), contains("_reads")))
summary_table_reads$off_target <- gsub("_reads", "", summary_table_reads$off_target)
#gather columns into off_target and read columns
summary_table_freqs <- summary_tb %>%
select(vars_select(names(summary_tb), -ends_with("_reads"))) %>%
gather(key = "off_target", value = "frequency",
vars_select(grep("_reads", names(summary_tb), invert = TRUE, value = TRUE),
contains("_O")))
summary_table <-full_join(summary_table_freqs, summary_table_reads,
by =c("off_target", "indel", "sample"))
if(n == 1){
all_summary_table <- summary_table
}else{
all_summary_table <- rbind(all_summary_table, summary_table)
}
}
#add off-target sequence to summary table
summary_and_seq_tb <- left_join(master_guide_tb, all_summary_table, by = "off_target")
#log10 transform frequency
summary_and_seq_tb$log_freq <- log10(summary_and_seq_tb$frequency)
#order table by off_target
summary_and_seq_tb <- summary_and_seq_tb[order(summary_and_seq_tb$off_target),]
#add "condition" column to indicate whether this was an edited or control sample
summary_and_seq_tb$condition <- rep(condition, nrow(summary_and_seq_tb))
return(summary_and_seq_tb)
}
###### get_group_summary_table() ##################################################################
# For all off-targets in a CRISPResso2 run, obtains "Edited" and "Unedited" total allele frequencies.
#
# CALLS HELPERS: NA
# CALLED IN: get_formatted_summary()
#
# ARGUMENTS:
# pool_tb = collapsed allele table generated by "collapse" mode (read in from file name)
# sample_name = name of user-input CRISPResso2 run sample from ot_sample_csv file
#
# OUTPUT:
# pool_summary = table containing % edited and unedited alleles for all off-targets in CRISPResso run
get_group_summary_table <- function(pool_tb, sample_name){
#vector of fixed pool_tb headers
fixed_col_names <- c("Aligned_Sequence", "Reference_Sequence", "Unedited", "n_deleted",
"n_inserted", "n_mutated", "indel")
#remove "X" from beginning of off-target names (column headers) if they begin with numbers or
# special symbols
names(pool_tb) <- gsub("^X", "", names(pool_tb))
#get table with total Unedited frequency across all off-targets within CRISPResso sample/run
unedited <- pool_tb %>%
filter(indel == "Unedited") %>%
select(which(! names(pool_tb) %in% fixed_col_names)) %>%
mutate(indel = "Unedited", sample = sample_name)
#get table with total Edited frequency across all off-targets within CRISPResso sample/run
edited <- pool_tb %>%
filter(indel != "Unedited") %>%
select(which(! names(pool_tb) %in% fixed_col_names))%>%
colSums(na.rm = TRUE)
#bind edited & unedited data together
pool_summary <- rbind(unedited, edited)
#rename edited indel and sample appropriately
pool_summary$indel[2] <- "Edited"
pool_summary$sample[2] <- sample_name
return(pool_summary)
}
###### pair_mock_v_edited_samples_by_donor() ##################################################################
# Reads in ot_sample_csv table and adds a column complete_for_stats indicated whether each donor has both
# edited and mock CRIPSResso2 samples (ready for statistics). Each row contains mock & edited sample pairings
# by donor (if a donor has 1 mock and 2 edited samples, the mock will be repeated in 2 rows and paired
# with a different edited sample in each row).
#
# CALLS HELPERS: get_stat_comparison_sample_tb()
# CALLED IN: summarize_off_targets()
#
# ARGUMENTS:
# ot_sample_csv = name of ot_sample_csv file (contains donor, condition, CRISPResso_dir_name, sample_name)
#
# OUTPUT:
# donor_paired_tb = table containing "mock" and "edited" sample columns in which rows contain paired
# samples by donor (sets of samples appropriate for statistical analyses)
pair_mock_v_edited_samples_by_donor <- function(ot_sample_csv){
#filter table for just donors with samples available for statistical comparison
stat_comp_tb <- get_stat_comparison_sample_tb(ot_sample_csv)
#get list of unique donors for statistical comparison
unique_donors <- unique(stat_comp_tb$donor)
# pair donor/mock in donor_paired_tb
donor_paired_tb <- data.frame(donor = as.character(c()), mock = as.character(c()), edited = as.character(c()),
stringsAsFactors=FALSE)
#for each donor that has both mock and edited samples
for(donor in unique_donors){
#get the names of the donor's samples
donor_mock_samples <- stat_comp_tb[stat_comp_tb$donor == donor & grepl("mock", stat_comp_tb$condition),]$sample_name#[mock_samples_idx]
donor_trt_samples <- stat_comp_tb[stat_comp_tb$donor == donor & !grepl("mock", stat_comp_tb$condition), ]$sample_name#[trt_samples_idx]
n_mock_samples <- length(donor_mock_samples)
n_trt_samples <- length(donor_trt_samples)
#generate the donor-edited sample pairings (redundancy allowed)
donor_paired_samples <- data.frame(donor = as.character(rep(donor, times = n_mock_samples*n_trt_samples)),
mock = as.character(rep(donor_mock_samples,
each = n_trt_samples)),
edited = as.character(rep(donor_trt_samples,
times = n_mock_samples)),
stringsAsFactors = FALSE)
donor_paired_tb <- rbind(donor_paired_tb, donor_paired_samples)
}
donor_paired_tb <- donor_paired_tb %>%
mutate(sample = paste(donor, row.names(donor_paired_tb), sep = "_"))
return(donor_paired_tb)
}
###### get_stat_comparison_sample_tb() ##################################################################
# Reads in ot_sample_csv table and adds a column complete_for_stats indicated whether each donor has both
# edited and mock CRIPSResso2 samples (ready for statistics).
#
# CALLS HELPERS: NA
# CALLED IN: pair_mock_v_edited_samples_by_donor()
#
# ARGUMENTS:
# ot_sample_csv = name of ot_sample_csv file (contains donor, condition, CRISPResso_dir_name, sample_name)
#
# OUTPUT:
# sample_metadata = sample metadata table containing complete_for_stats column
get_stat_comparison_sample_tb <- function(ot_sample_csv){
#read sample metadata table
sample_metadata <- read.csv(file = ot_sample_csv, stringsAsFactors = FALSE)
#get list of donors with associated mock sample
donors_with_mock <- sample_metadata$donor[which(grepl("mock", sample_metadata$condition,
ignore.case = TRUE))]
#get list of donors with associated edited sample
donors_with_edited <- unique(sample_metadata$donor[ which( !grepl( "mock", sample_metadata$condition,
ignore.case = TRUE))])
#get list of donors with both mock and edited samples
donors_complete <- intersect(donors_with_mock, donors_with_edited)
#add complete_for_stats column to indicate whether a donor has both mock and edited conditions
sample_metadata <- sample_metadata %>%
mutate(complete_for_stats = ifelse(donor %in% donors_complete, TRUE, FALSE))
return(sample_metadata)
}
###### get_ttest_table() ##################################################################
#Takes a list of unique off-target names and edited summary table and performs a t-test
# comparing the mean editing frequency between Edited and Mock samples. Because all mock & edited samples
# are paired by donor, all Mocks are averaged (and all Edited samples are averaged) for t.test calculations.
# This does mean that some samples are represented twice (ex. Mock1 is represented twice if both Edited1
# and Edited2 are from the same donor and thus paired with Mock1.)
# Returns a table of off-targets and their corresponding t-test p-values, as well as whether the
# difference between Edited and Mock is significant.
# (NOTE: statistical comparison is ONLY performed for donors with AT LEAST one mock and one edited sample.
# Comparisons are skipped where there is only one Edited or Mock sample available for the target across
# donors.)
#
# CALLS HELPERS: NA
# CALLED IN: summarize_off_targets()
#
# ARGUMENTS:
# off_targets = list of off-targets for statistical comparison
# edited_summary_tb = summary table of off-target editing frequencies
# paired_stats_tb = table storing mock-edited sample pairings for each donor appropriate for statistical
# comparisons
#
# OUTPUT:
# ots_pval_tb = table showing median & mean editing frequencies for mock and edited samples as well as
# the p-value of the one-tailed t.test (alpha = 0.05) comparison and effect size
# (ordered by off-target)
get_ttest_table <- function(off_targets, edited_summary_tb, paired_stats_tb){
#initalize p-value vector
p_vals <- c()
#initalize effect size vector
d_vals <- c()
#initalize median vectors
control_median <- c()
edited_median <- c()
#initalize mean vectors
control_mean <- c()
edited_mean <- c()
#initalize sd vectors
control_sd <- c()
edited_sd <- c()
#calculate p-values with t-test comparing Mock v. Edited samples for each off-target
for(n in seq(1, length(off_targets))){
#get rows that correspond with the off-target of interest
ot_only_tb <- edited_summary_tb %>%
filter(complete.cases(edited_summary_tb)) %>%
filter(off_target == off_targets[n])
edited_freq <-ot_only_tb[ot_only_tb$sample %in% paired_stats_tb$edited,]$frequency
control_freq <-ot_only_tb[ot_only_tb$sample %in% paired_stats_tb$mock,]$frequency
#calculate median
edited_median <- c(edited_median, median(edited_freq, na.rm = TRUE))
control_median <- c(control_median, median(control_freq, na.rm = TRUE))
#calculate mean
ot_edited_mean <- mean(edited_freq, na.rm = TRUE)
ot_control_mean <- mean(control_freq, na.rm = TRUE)
edited_mean <- c(edited_mean, ot_edited_mean)
control_mean <- c(control_mean, ot_control_mean)
#calculate standard deviation
ot_edited_sd <- sd(edited_freq, na.rm = TRUE)
ot_control_sd <- sd(control_freq, na.rm = TRUE)
edited_sd <- c(edited_sd, ot_edited_sd)
control_sd <- c(control_sd, ot_control_sd)
#check that there is sufficient variance and
if( any(edited_freq != control_freq) & (length(edited_freq) > 1) & (length(control_freq) > 1) ){
#assuming edited_freq is larger than control_freq
p_vals <- c(p_vals,
t.test(edited_freq, control_freq, alternative = "greater", paired = FALSE)$p.value)
#calculate cohen's d
pooled_sd <- sqrt(( ot_edited_sd^2 + ot_control_sd^2 ) / 2)
cohen_d <- ( ot_edited_mean - ot_control_mean ) / pooled_sd
d_vals <- c(d_vals, cohen_d)
}else{
p_vals <- c(p_vals, NA)
d_vals <- c(d_vals, NA)
}
}
#generate data frame with off_targets, the t-test p-value associated with it,
# and boolean (significant or not)
ots_pval_tb <- data.frame(off_target = off_targets,
edited_median = edited_median,
control_median = control_median,
edited_mean = edited_mean,
control_mean = control_mean,
edited_sd = edited_sd,
control_sd = control_sd,
ttest_p_value = p_vals,
eff_size = d_vals) %>%
mutate(significant = ttest_p_value < 0.05)
return(ots_pval_tb[order(ots_pval_tb$off_target),])
}
###### save_editing_results() ##################################################################
# Formats and saves off-target editing frequency table
#
# CALLS HELPERS: NA
# CALLED IN: summarize_off_targets()
#
# ARGUMENTS:
# date = date in YYYYMMDD format
# edited_summary_tb = summary table of off-target editing frequencies to be formatted and saved
#
# OUTPUT: none
save_editing_results <- function(date, edited_summary_tb){
#format editing results
edited_summary_tb_csv <- edited_summary_tb %>%
rename(editing_freq = frequency) %>%
order_alpha("name_seq", decreasing_bool = TRUE) %>%
select(-c(log_freq, indel, name_seq))
write.csv(edited_summary_tb_csv, paste(date, "CRISPResso_OT_editing_summary.csv", sep = "_"),
row.names = FALSE)
}
###### check_plot_aesthetics() ##################################################################
# Check whether aesthetics (color, fill, shape) values have been entered by the user for editing dotplot
# generation. If not, fill in table with default aesthetics scale values.
#
# CALLS HELPERS: get_default_palette()
# CALLED IN: summarize_off_targets()
#
# ARGUMENTS:
# aes_tb = data frame storing aesthetics (color, fill, shape) to be used for the dotplot
#
# OUTPUT:
# aes_tb = aes_tb with default color, fill, and shape values (unless user had entered values)
check_plot_aesthetics <- function(aes_tb){
#check that R_color, R_fill, and R_shape aesthetics exist & generate defaults if non-existent
if(! "R_color" %in% names(aes_tb)){
#get custom palette
default_palette <- get_default_palette()
#set default colors
aes_tb$R_color <- default_palette[1:nrow(aes_tb)]
}
if(! "R_fill" %in% names(aes_tb)){
#defaults as same colors as R_color
aes_tb$R_fill <- aes_tb$R_color
}
if(! "R_shape" %in% names(aes_tb)){
#determine which samples are mock in aes_tb
is_mock <- aes_tb$condition == "mock"
#set defaults: control/mock is 1, edited is 16
aes_tb$R_shape <- ifelse(is_mock, 1, 16 )
}
return(aes_tb)
}
###### get_default_palette() ##################################################################
# Returns vector of default colors for dotplot color & fill aesthetics. Colors were chosen to be
# dark and distinct, though the colors become more similar as the number of visualized samples
# increases.
#
# CALLS HELPERS: NA
# CALLED IN: check_plot_aesthetics()
#
# ARGUMENTS: none
#
# OUTPUT:
# default_palette = vector storing 30 default colors for dotplot color & fill aesthetics
get_default_palette <- function(){
#6 colors per row: red, blue, green, orange/yellow, purple, brown
# 30 colors total
default_palette <- c("firebrick", "cornflowerblue", "olivedrab", "goldenrod", "mediumpurple", "tan4",
"tomato3", "steelblue1", "seagreen3", "darkorange", "darkorchid", "burlywood4",
"indianred1", "royalblue3", "palegreen4", "coral", "purple1", "peru",
"firebrick1", "deepskyblue", "darkseagreen", "chocolate1", "palevioletred1", "tan",
"darkred", "slateblue3", "springgreen", "darkgoldenrod", "plum", "wheat4")
return(default_palette)
}
###### order_alpha() ##################################################################
# Generate alphabetical levels for a specific column (for plotting purposes)
#
# CALLS HELPERS: NA
# CALLED IN: summarize_off_targets()
#
# ARGUMENTS:
# data_tb = any data frame
# colname = data_tb column name to save as factor with sorted levels
# decreasing_bool = logical indicating whether the alphanumeric sort is decreasing
#
# OUTPUT:
# ordered_tb = returns the data_tb with the colname values saved as factors with alphanumeric sorted
# levels
order_alpha <- function(data_tb, colname, decreasing_bool){
ordered_tb <- data_tb[str_order(data_tb[,colname], decreasing = decreasing_bool,
numeric = TRUE),]
ordered_tb[,colname] <- factor(ordered_tb[,colname],
levels = unique(ordered_tb[,colname]), ordered = TRUE)
return(ordered_tb)
}
#HERE
###### order_by_pval() ##################################################################
# Generate alphabetical levels for a specific column (for plotting purposes)
#
# CALLS HELPERS: NA
# CALLED IN: summarize_off_targets()
#
# ARGUMENTS:
# data_tb = any data frame with ttest_p_value column
# colname = data_tb column name to save as factor with sorted levels
#
# OUTPUT:
# ordered_tb = returns the data_tb with the colname values saved as factors with alphanumeric sorted
# levels
order_by_pval <- function(data_tb, colname){
data_tb[,colname] <- factor(data_tb[,colname],
levels = unique(data_tb[,colname]))
return(data_tb)
}
###### get_aes_scale_values_for_dotplot() ##################################################################
#Takes a data frame containing all information necessary to plot the editing frequency dotplot and
# generates the list of aesthetics (color, fill, shape) to pass to scale_manual when generating dotplot.
#
# CALLS HELPERS: NA
# CALLED IN: summarize_off_targets()
#
# ARGUMENTS:
# plotting_tb = data frame containing editing frequencies and target sequences to be plotted in dotplot
#
# OUTPUT:
# aes_val_list = list of dotplot aesthetics scale values (color, fill, shape)
get_aes_scale_values_for_dotplot <- function(plotting_tb){
#Generate color scale values
color_values <- plotting_tb$R_color
names(color_values) <- plotting_tb$group
color_values <- color_values[which(!duplicated(names(color_values)))]
color <- list("Group", color_values)
#Generate fill scale values
fill_values <- plotting_tb$R_fill
names(fill_values) <- plotting_tb$group
fill_values <- fill_values[which(!duplicated(names(fill_values)))]
fill <- list("Group", fill_values)
#Generate shape scale values
shape_values <- plotting_tb$R_shape
names(shape_values) <- plotting_tb$group
shape_values <- shape_values[which(!duplicated(names(shape_values)))]
shape <- list("Group", shape_values)
#save all scale value lists in aes_val_list
aes_val_list <- list("color_scale_val" = color,
"fill_scale_val" = fill,
"shape_scale_val" = shape)
return(aes_val_list)
}
###### get_heatmap_legend_values() ##################################################################
get_heatmap_legend_values <- function(total_read_vector){
#calculate legend breaks and values for read coverage legend
max_reads <- max(total_read_vector, na.rm = TRUE)
max_log10 <- round(log10(max_reads), digits = 0)
max_legend_val <- round(max_reads, digits = -max_log10) + 10^max_log10
legend_breaks_full <- seq(from = 0, to = max_legend_val, by = 10^(max_log10 - 1))
smallest_max_legend_vals <- which(legend_breaks_full > max_reads)[1:2]
#calculate the best legend label increments such that:
# -there are at >= 5 labels
# -the labels encompass 10^(max_log10 - 1) to at least the max_reads
max_legend_val_idx <- data.frame(max_legend_val = rep(smallest_max_legend_vals, each = 11),
factor = rep(seq(1,11), times = length(smallest_max_legend_vals))) %>%
mutate(divisible_by = max_legend_val%%factor == 0)
max_legend_val_idx$last_idx <- mapply(function(max_legend_val, factor) tail( seq(2, max_legend_val, by = factor), 1),
max_legend_val_idx$max_legend_val, max_legend_val_idx$factor)
max_legend_val_idx$n_labels <- mapply(function(max_legend_val, factor) length(seq(2, max_legend_val, by = factor)),
max_legend_val_idx$max_legend_val, max_legend_val_idx$factor)
max_legend_val_idx <- max_legend_val_idx %>%
mutate(pass_n_labels = ifelse((n_labels == last_idx) | (n_labels >= 4 & n_labels <= 8), TRUE, FALSE)) %>%
filter(last_idx >= smallest_max_legend_vals[1]) %>%
filter( pass_n_labels ) %>%
filter(factor == max(factor)) %>%
filter(max_legend_val == min(max_legend_val))
legend_breaks <- legend_breaks_full[c(1, seq(2, max_legend_val_idx$max_legend_val,
by = max_legend_val_idx$factor))]
return(legend_breaks)
}
###### make_compiled_OT_editing_dotplot() ##################################################################
#Takes a summary table with all samples, all off-targets, and all editing outcomes and
# generates editing summary dotplot. Also takes fill and color parameters for ggplot2. (The off-targets
# are in the same order as the dotplot so that they are aligned when displayed together.)
# Returns a dotplot that is meant to be placed in a composite graph with a coverage heatmap.
#
# CALLS HELPERS: NA
# CALLED IN: summarize_off_targets()
#
# ARGUMENTS:
# summary_tb = data frame containing read counts for each target and sample
# scale_size_by_editing_freq = logical, whether or not to separate geom_points by size according to read coverage
# in the editing scatterplot
# fill = list containing c(name, values) for scale_fill_manual
# color = list containing c(name, values) for scale_color_manual
# shape = list containing c(name, values) for scale_shape_manual
# size = list containing aesthetic values for size of scatterplot/dotplot points
# guide_font_size = font size of guides/targets
# editing_freq_scale = editing frequency log10 scale
#
# OUTPUT:
# dotplot = dotplot of editing frequency for each target across samples
make_compiled_OT_editing_dotplot <- function(summary_tb, scale_size_by_editing_freq,
fill, color, shape, size,
guide_font_size, editing_freq_scale){
dotplot <- ggplot(data = summary_tb,
aes(x = name_seq,
y = frequency))
#generate jittered dotplot depending on whether scale_size_by_editing_freq == TRUE
if(scale_size_by_editing_freq){
dotplot <- dotplot +
geom_jitter(aes(fill = group,
color = group,
shape = group,
size = R_point_size),
width = 0.1,
height = 0) +
scale_size_manual(name = size[[1]],
values = size[[2]],
labels = size[[3]],
guide = guide_legend(ncol = 1))
}else{
dotplot <- dotplot +
geom_jitter(aes(fill = group,
color = group,
shape = group),
size = summary_tb$R_point_size[1],
width = 0.1,
height = 0) +
guides(size = FALSE)
}
dotplot <- dotplot +
xlab("") +
ylab("% Editing Frequency\n") +
scale_y_continuous(position = "right", trans="log10",
limits = c(editing_freq_scale[1], 100),
breaks = editing_freq_scale,
labels = c(0, editing_freq_scale[2:(length(editing_freq_scale)-2)], 10, 100)) +
coord_flip() +
scale_x_discrete(limits = rev(levels(summary_tb$name_seq)),
labels = rev(levels(summary_tb$name_seq)),
breaks = rev(levels(summary_tb$name_seq))) +
scale_fill_manual(name = fill[[1]],
values = fill[[2]],
guide = guide_legend(ncol = 1)) +
scale_color_manual(name = color[[1]],
values = color[[2]],
guide = guide_legend(ncol = 1)) +
scale_shape_manual(name = shape[[1]],
values = shape[[2]],
guide = guide_legend(ncol = 1)) +
theme_classic() +
theme(axis.text.y = element_text(family = "Courier" , size = guide_font_size))
return(dotplot)
}
###### make_compiled_summary_OT_editing_boxplot() ##################################################################
#Takes a summary table with all samples, all off-targets, and all editing outcomes and
# generates editing summary boxplot displaying the statistics of the mock and edited mean/median.
# Also takes fill and color parameters for ggplot2. (The off-targets are in the same order as the dotplot so
# that they are aligned when displayed together.)
# Returns a dotplot that is meant to be placed in a composite graph with a coverage heatmap.
#
# CALLS HELPERS: NA
# CALLED IN: summarize_off_targets()
#
# ARGUMENTS:
# summary_tb = data frame containing read counts for each target and sample
# scale_size_by_editing_freq = logical, whether or not to separate geom_points by size according to read coverage
# in the editing scatterplot
# fill = list containing c(name, values) for scale_fill_manual
# color = list containing c(name, values) for scale_color_manual
# shape = list containing c(name, values) for scale_shape_manual
# size = list containing aesthetic values for size of scatterplot/dotplot points
# guide_font_size = font size of guides/targets
# editing_freq_scale = editing frequency log10 scale
#
# OUTPUT:
# dotplot = dotplot of editing frequency for each target across samples
make_compiled_summary_OT_editing_boxplot <- function(summary_tb,
#fill, color, shape, size,
guide_font_size, editing_freq_scale){
#Add "mean" for displaying mean in the legend
summary_tb <- summary_tb %>%
mutate(mean = "mean")
boxplot <- ggplot(data = summary_tb,
aes(x = name_seq,
y = frequency,
fill = factor(condition))) +
geom_boxplot(position = position_dodge(0.5),
alpha = 0.5,
width = 0.5,
color = "gray60") +
stat_summary(fun.y = mean,
geom = "point",
size = 2,
color = "black",
show.legend = FALSE,
position = position_dodge(0.5),
aes(shape = mean)) +
geom_point(alpha = 0,
fill = "black",
aes(shape = mean)) +
xlab("") +
ylab("% Editing Frequency\n") +
scale_y_continuous(position = "right", trans="log10",
limits = c(editing_freq_scale[1], 100),
breaks = editing_freq_scale,
labels = c(0, editing_freq_scale[2:(length(editing_freq_scale)-2)], 10, 100)) +
coord_flip() +
scale_x_discrete(limits = rev(levels(summary_tb$name_seq)),
labels = rev(levels(summary_tb$name_seq)),
breaks = rev(levels(summary_tb$name_seq))) +
scale_fill_manual(name = "",
values = c("control" = "lightskyblue",
"edited" = "lightcoral"), #"mean" = "black"
labels = c("control" = "Control/Mock",
"edited" = "Edited"), #"mean" = "Mean % Editing"
guide = guide_legend(ncol = 1)) +
scale_shape_manual(name = "",
values = c("mean" = 18),
labels = c("mean" = "Mean % Editing")) +
theme_classic() +
theme(axis.text.y = element_text(family = "Courier" , size = guide_font_size)) +
guides(shape=guide_legend(title=NULL, override.aes = list(alpha = 1)))
return(boxplot)
}
###### make_compiled_OT_coverage_heatmap() ##################################################################
#Takes a summary table with all samples, all off-targets, and all editing outcomes and generates read
# coverage heatmap. (The off-targets are in the same order as the dotplot so that they are aligned when
# displayed together)
# Returns a heatmap that is meant to be placed in a composite graph with an editing dotplot.
#
# CALLS HELPERS: NA
# CALLED IN: summarize_off_targets()
#
# ARGUMENTS:
# summary_tb = data frame containing read counts for each target and sample
# guide_font_size = font size of guides/targets (not displayed in figure, mainly for formatting)
# group_font_size = font size of groups/samples across top of heatmap
# tile_font_size = font size of read counts displayed inside heatmap tiles
#
# OUTPUT:
# heatmap = heatmap of read coverage for each target across samples
make_compiled_OT_coverage_heatmap <- function(summary_tb, guide_font_size,
group_font_size, tile_font_size,
legend_breaks){
#generate heatmap
heatmap <- ggplot(data = summary_tb,
aes(x = name_seq,
y = group)) +
geom_tile(aes(fill = total_reads)) +
geom_text(aes(label = gsub("NA", "",
format(ceiling(total_reads),
trim = FALSE, accuracy = 1, big.mark = ","))),
size = tile_font_size,
family = "Courier") +
xlab("") +
ylab("Total Reads per Sample\n") +
scale_x_discrete(limits = rev(unique(summary_tb$name_seq)),
breaks = c(),
expand = c(0, 0)) +
scale_y_discrete(position = "right",
limits = levels(summary_tb$group),
labels = c(gsub("[ ]{1}", "\n", levels(summary_tb$group))),
expand = c(0, 0)) +
scale_fill_gradientn(name = "",
colours = c("grey95", "skyblue", "cornflowerblue"),
limits = c(legend_breaks[1], legend_breaks[length(legend_breaks)]),
breaks = legend_breaks,
labels = c("", as.character(comma(legend_breaks[2:length(legend_breaks)]))),
na.value = "white") +
coord_flip() +
theme_classic() +
theme(legend.position = "left",
axis.text.y = element_text(family = "Courier" , size = guide_font_size),
axis.text.x = element_text(colour = "grey50", size = group_font_size))
return(heatmap)
}
###### make_composite_grobPlot() ##################################################################
### Generates a composite plot showing coverage as a heatmap on the left and % editing as
### a dotplot on the right.
#
# CALLS HELPERS: NA
# CALLED IN: summarize_off_targets()
#
# ARGUMENTS:
# heatmap = ggplot heatmap object to be plotted
# dotplot = ggplot dotplot object to be plotted
# heatmap_width = width of heatmap relative to dotplot_width
# dotplot_width = width of dotplot relative to heatmap_width
#
# OUTPUT:
# composite_grob = composite plot as grob (and displays the grob object)
make_composite_grobPlot <- function(heatmap, dotplot, heatmap_width, dotplot_width){
#generate plot grobs & bind together
g_heatmap <- ggplotGrob(heatmap)
g_dotplot <- ggplotGrob(dotplot)
grid.newpage()
composite_grob <- plot_grid(heatmap, dotplot, ncol = 2, align = "h", axis = "bt",
rel_widths = c(heatmap_width, dotplot_width))
#visualize if in RStudio
composite_grob
return(composite_grob)
}
###### save_composite_plot() ##################################################################
# Takes file_name (without extension), composite plot object, and width/height of saved plot (in inches)
# and saves the plot as .pdf and png
#
# CALLS HELPERS: NA
# CALLED IN: summarize_off_targets()
#
# ARGUMENTS:
# file_name = file name of saved plots
# composite_plot = composite plot object to be plotted
# plot_width_in = width of saved plots in inches
# plot_height_in = height of saved plots in inches
#
# OUTPUT: none
save_composite_plot <- function(file_name, composite_plot, plot_width_in, plot_height_in){
#save composite plot as individual pdf
# ggsave(paste(file_name, ".pdf", sep = ""), plot = composite_plot,
# width = plot_width_in, height = plot_height_in,
# units = "in")
#save composite plot as png
ggsave(paste(file_name, ".png", sep = ""), plot = composite_plot,
width = plot_width_in, height = plot_height_in,
units = "in")
}
|
# -*- R -*-
bibentry(bibtype = "Article",
header = "To cite dtw in publications use:",
title = "Computing and Visualizing Dynamic Time Warping Alignments in {R}: The {dtw} Package",
author = as.person("Toni Giorgino"),
journal = "Journal of Statistical Software",
year = "2009",
volume = "31",
number = "7",
pages = "1--24",
doi = "10.18637/jss.v031.i07"
## textVersion = paste("Toni Giorgino (2009).",
## "Computing and Visualizing Dynamic Time Warping Alignments in R: The dtw Package.",
## "Journal of Statistical Software, 31(7), 1-24.",
## "URL https://www.jstatsoft.org/v31/i07/"),
)
bibentry(bibtype = "Article",
header = "For partial matching and normalization strategies also cite:",
title = "Matching Incomplete Time Series with Dynamic Time Warping: An Algorithm and an Application to Post-Stroke Rehabilitation",
author = personList(
as.person("Paolo Tormene"),
as.person("Toni Giorgino"),
as.person("Silvana Quaglini"),
as.person("Mario Stefanelli")),
journal = "Artificial Intelligence in Medicine",
year = "2008",
volume = "45",
number = "1",
pages = "11--34",
doi = "10.1016/j.artmed.2008.11.007"
## url = "http://dx.doi.org/10.1016/j.artmed.2008.11.007",
## textVersion = paste("Paolo Tormene, Toni Giorgino, Silvana Quaglini, Mario Stefanelli (2008).",
## "Matching Incomplete Time Series with Dynamic Time Warping: An Algorithm and an Application to Post-Stroke Rehabilitation.",
## "Artificial Intelligence in Medicine, 45(1), 11-34.",
## "doi:10.1016/j.artmed.2008.11.007"),
)
|
/inst/CITATION
|
no_license
|
cran/dtw
|
R
| false
| false
| 2,012
|
# -*- R -*-
bibentry(bibtype = "Article",
header = "To cite dtw in publications use:",
title = "Computing and Visualizing Dynamic Time Warping Alignments in {R}: The {dtw} Package",
author = as.person("Toni Giorgino"),
journal = "Journal of Statistical Software",
year = "2009",
volume = "31",
number = "7",
pages = "1--24",
doi = "10.18637/jss.v031.i07"
## textVersion = paste("Toni Giorgino (2009).",
## "Computing and Visualizing Dynamic Time Warping Alignments in R: The dtw Package.",
## "Journal of Statistical Software, 31(7), 1-24.",
## "URL https://www.jstatsoft.org/v31/i07/"),
)
bibentry(bibtype = "Article",
header = "For partial matching and normalization strategies also cite:",
title = "Matching Incomplete Time Series with Dynamic Time Warping: An Algorithm and an Application to Post-Stroke Rehabilitation",
author = personList(
as.person("Paolo Tormene"),
as.person("Toni Giorgino"),
as.person("Silvana Quaglini"),
as.person("Mario Stefanelli")),
journal = "Artificial Intelligence in Medicine",
year = "2008",
volume = "45",
number = "1",
pages = "11--34",
doi = "10.1016/j.artmed.2008.11.007"
## url = "http://dx.doi.org/10.1016/j.artmed.2008.11.007",
## textVersion = paste("Paolo Tormene, Toni Giorgino, Silvana Quaglini, Mario Stefanelli (2008).",
## "Matching Incomplete Time Series with Dynamic Time Warping: An Algorithm and an Application to Post-Stroke Rehabilitation.",
## "Artificial Intelligence in Medicine, 45(1), 11-34.",
## "doi:10.1016/j.artmed.2008.11.007"),
)
|
|
## Put comments here that give an overall description of what your
## functions do
#Programming Assignment 2 to understand lexigraphical scoping and get more practice with
#defining functions. The code below is based on the sample code given for the makeVector
#and cachemean functions.
#the curly braces in makeCacheMatrix help me, as a beginner in R, understand the scope of
#individual functions that are then passed to the List function. The post "assign 2 how
#i run the code to see if I understand it?" and blog post "https://asitarrives.wordpress.
#com/2014/10/18/understanding-lexical-scoping-in-r-great-guidance-for-community-ta-
#in-coursera/" were very helpful for figuring this out and understanding what the
#code was doing.
## Write a short comment describing this function: makeCacheMatrix is a function that accepts
#the matrix to be inversed, and then sets up four functions for storing and retrieving the
#original matrix (set and get) and the inverse matrix (setInverse and getInverse). When
#called, by typing makeCacheMatrix(m), where "m" is a defined matrix object,
#it makes the inv null and puts the matrix in a variable x using the double assignment
#so that it is available outside the set function. The setInverse and getInverse are
#used by the cacheSolve function to "cache" the inverse of the matrix and be able to
#recall it.
makeCacheMatrix <- function(x = matrix()){
inv <- NULL
set <- function(y) {
x <<- y #makes the matrix passed to the function available as "x"
#for use in cacheSolve.
inv <<- NULL #makes the inv null outside the scope of the function
}
get <- function() {
x
}
setInverse <- function(inverse) {
inv <<- inverse #makes the inv variable available to cacheSolve
}
getInverse <- function() {
inv
}
list(set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Write a short comment describing this function: cachesolve accepts the matrix, checks to
#see if it has been "solved" (i.e. inverse calculated) by looking to see if getInverse
#has a value. If so, it retrieves the cached result. If "null", it retrieves the
#matrix with get(), solves it, (inv<-solve(data, ....)), and passes the result to the
#setInverse(inv) function so that next time it can just be retrieved, and then returns
#the result (but only when it is the first time solving the matrix)
cacheSolve <- function(x, ...){
inv <- x$getInverse ()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setInverse(inv)
inv ## Return a matrix that is the inverse of 'x'
}
|
/cachematrix.R
|
no_license
|
PKMarcom/ProgrammingAssignment2
|
R
| false
| false
| 2,955
|
r
|
## Put comments here that give an overall description of what your
## functions do
#Programming Assignment 2 to understand lexigraphical scoping and get more practice with
#defining functions. The code below is based on the sample code given for the makeVector
#and cachemean functions.
#the curly braces in makeCacheMatrix help me, as a beginner in R, understand the scope of
#individual functions that are then passed to the List function. The post "assign 2 how
#i run the code to see if I understand it?" and blog post "https://asitarrives.wordpress.
#com/2014/10/18/understanding-lexical-scoping-in-r-great-guidance-for-community-ta-
#in-coursera/" were very helpful for figuring this out and understanding what the
#code was doing.
## Write a short comment describing this function: makeCacheMatrix is a function that accepts
#the matrix to be inversed, and then sets up four functions for storing and retrieving the
#original matrix (set and get) and the inverse matrix (setInverse and getInverse). When
#called, by typing makeCacheMatrix(m), where "m" is a defined matrix object,
#it makes the inv null and puts the matrix in a variable x using the double assignment
#so that it is available outside the set function. The setInverse and getInverse are
#used by the cacheSolve function to "cache" the inverse of the matrix and be able to
#recall it.
makeCacheMatrix <- function(x = matrix()){
inv <- NULL
set <- function(y) {
x <<- y #makes the matrix passed to the function available as "x"
#for use in cacheSolve.
inv <<- NULL #makes the inv null outside the scope of the function
}
get <- function() {
x
}
setInverse <- function(inverse) {
inv <<- inverse #makes the inv variable available to cacheSolve
}
getInverse <- function() {
inv
}
list(set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Write a short comment describing this function: cachesolve accepts the matrix, checks to
#see if it has been "solved" (i.e. inverse calculated) by looking to see if getInverse
#has a value. If so, it retrieves the cached result. If "null", it retrieves the
#matrix with get(), solves it, (inv<-solve(data, ....)), and passes the result to the
#setInverse(inv) function so that next time it can just be retrieved, and then returns
#the result (but only when it is the first time solving the matrix)
cacheSolve <- function(x, ...){
inv <- x$getInverse ()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setInverse(inv)
inv ## Return a matrix that is the inverse of 'x'
}
|
num=as.integer(readline(prompt = "Enter the number"))
fact=1
for(i in 1:num)
fact=fact*i
print(fact)
#-------------------------------------------
mult=as.integer(readline("Enter a number"))
for(i in 1:10)
pri(mult)
#-----------------------------------------
#Example 1
var1=c("a","b","c","d")
for(i in seq_along(var1))
print(var1[i])
#Example 2
mat1=matrix(1:6,3,3)
for(i in seq_len(nrow(mat1)))
{
for(j in seq_len(ncol(mat1)))
{
print(mat1[i.j])
}
}
#fibonnaci Series------------------------
user_input=as.integer(readline(prompt = "Enter a number"))
n1=0
n2=1
count=2
if(user_input==1)
{
print("Fibo Series")
print(n1)
}else
{
print("Fibo Series")
print(n1)
print(n2)
while(count<user_input)
{
n3=n1+n2
print(n3)
n1=n2
n2=n3
count=count+1
}
}
#Sum of NAtural numbers----------------------
input1=as.integer(readline(prompt = "Enter a number"))
sum=0
if(input1<0)
{
print("Please enter a positive")
}else
{
while(input1>0)
{
sum=sum+input1
input1=input1-1
}
print("Sum of natural numbers")
print(sum)
}
#sum of digits-------------------------
input2=as.integer(readline(prompt = "Enter a number"))
sum_of_digit=0
while(input2>0)
{
sum_of_digit=sum_of_digit+input2%%10
input2=input2/10
}
print(as.integer(sum_of_digit))
#function------------------
add=function(no1,no2)
{
no1+no2
}
add(1,14)
#----------------------------------------------------
ifelse(sqrt(9)<2,sqrt(9),0)
#---------------------------------
ifelse(sqrt(100)>9,sqrt(100),0)
#--------------------------------
x=12
if(is.numeric(x))
{
y=x*2
}
print(y)
#-------------------------------
z=6
if(z<0)
{
y=z*3
}else
{
y=z*5
}
print(y)
#------------------------------
x=15
y=3
if(is.numeric(x))
if(is.numeric(y)%%y!=0)
z=x/y
print(z)
#---------------------------
x=letters[20]
if(is.numeric(x))
{
print("Is numeric")
}else if(is.character(x))
{
print("is Character")
}
#------------------------------
z='i'
if(z %in% letters)
{
isz='a'
n=1
}else if(z=='e')
{
n=2
}else if(z=='i')
{
n=3
}else if(z=='o')
{
n=4
}else
{
n=5
}
print(n)
|
/Basic_Function.R
|
permissive
|
ninadsumant/R-Programming
|
R
| false
| false
| 2,295
|
r
|
num=as.integer(readline(prompt = "Enter the number"))
fact=1
for(i in 1:num)
fact=fact*i
print(fact)
#-------------------------------------------
mult=as.integer(readline("Enter a number"))
for(i in 1:10)
pri(mult)
#-----------------------------------------
#Example 1
var1=c("a","b","c","d")
for(i in seq_along(var1))
print(var1[i])
#Example 2
mat1=matrix(1:6,3,3)
for(i in seq_len(nrow(mat1)))
{
for(j in seq_len(ncol(mat1)))
{
print(mat1[i.j])
}
}
#fibonnaci Series------------------------
user_input=as.integer(readline(prompt = "Enter a number"))
n1=0
n2=1
count=2
if(user_input==1)
{
print("Fibo Series")
print(n1)
}else
{
print("Fibo Series")
print(n1)
print(n2)
while(count<user_input)
{
n3=n1+n2
print(n3)
n1=n2
n2=n3
count=count+1
}
}
#Sum of NAtural numbers----------------------
input1=as.integer(readline(prompt = "Enter a number"))
sum=0
if(input1<0)
{
print("Please enter a positive")
}else
{
while(input1>0)
{
sum=sum+input1
input1=input1-1
}
print("Sum of natural numbers")
print(sum)
}
#sum of digits-------------------------
input2=as.integer(readline(prompt = "Enter a number"))
sum_of_digit=0
while(input2>0)
{
sum_of_digit=sum_of_digit+input2%%10
input2=input2/10
}
print(as.integer(sum_of_digit))
#function------------------
add=function(no1,no2)
{
no1+no2
}
add(1,14)
#----------------------------------------------------
ifelse(sqrt(9)<2,sqrt(9),0)
#---------------------------------
ifelse(sqrt(100)>9,sqrt(100),0)
#--------------------------------
x=12
if(is.numeric(x))
{
y=x*2
}
print(y)
#-------------------------------
z=6
if(z<0)
{
y=z*3
}else
{
y=z*5
}
print(y)
#------------------------------
x=15
y=3
if(is.numeric(x))
if(is.numeric(y)%%y!=0)
z=x/y
print(z)
#---------------------------
x=letters[20]
if(is.numeric(x))
{
print("Is numeric")
}else if(is.character(x))
{
print("is Character")
}
#------------------------------
z='i'
if(z %in% letters)
{
isz='a'
n=1
}else if(z=='e')
{
n=2
}else if(z=='i')
{
n=3
}else if(z=='o')
{
n=4
}else
{
n=5
}
print(n)
|
library(tidyverse)
library(lubridate)
library(feather)
library(prophet)
all_daily_digits <- read_feather("all_daily_digits.feather")
# When you get the full dataset, use different values for testing and training
dd_train <- all_daily_digits
dd_test <- all_daily_digits
# Plot all the values
ggplot(all_daily_digits, aes(x = date, y = daily_digit)) +
geom_point() +
geom_smooth() +
theme_bw()
# Is there a difference between different days of the week?
all_daily_digits %>%
mutate(wday = wday(date, label = TRUE, abbr = FALSE)) %>%
group_by(wday) %>%
summarise(daily_digit = mean(daily_digit, na.rm = TRUE)) %>%
ggplot(aes(x = wday, y = daily_digit)) +
geom_col() +
theme_bw()
aov(daily_digit ~ wday(date), data = all_daily_digits) %>% broom::tidy()
# # Put multiple years on the same chart
# ggplot(all_daily_digits, aes(x = ymd(paste0(2019, "-", month(all_daily_digits$date), "-", day(all_daily_digits$date))), y = daily_digit, color = factor(year(all_daily_digits$date)))) +
# geom_point() +
# geom_smooth()
dd_train_prophet <- set_names(dd_train, c("ds", "y"))
dd_test_prophet <- set_names(dd_test, c("ds", "y"))
model_prophet <- prophet(dd_train_prophet)
#future <- make_future_dataframe(model_prophet, periods = 365)
forecast <- predict(model_prophet, dd_test_prophet)
|
/2_eda.R
|
permissive
|
Breza/DailyDigit
|
R
| false
| false
| 1,307
|
r
|
library(tidyverse)
library(lubridate)
library(feather)
library(prophet)
all_daily_digits <- read_feather("all_daily_digits.feather")
# When you get the full dataset, use different values for testing and training
dd_train <- all_daily_digits
dd_test <- all_daily_digits
# Plot all the values
ggplot(all_daily_digits, aes(x = date, y = daily_digit)) +
geom_point() +
geom_smooth() +
theme_bw()
# Is there a difference between different days of the week?
all_daily_digits %>%
mutate(wday = wday(date, label = TRUE, abbr = FALSE)) %>%
group_by(wday) %>%
summarise(daily_digit = mean(daily_digit, na.rm = TRUE)) %>%
ggplot(aes(x = wday, y = daily_digit)) +
geom_col() +
theme_bw()
aov(daily_digit ~ wday(date), data = all_daily_digits) %>% broom::tidy()
# # Put multiple years on the same chart
# ggplot(all_daily_digits, aes(x = ymd(paste0(2019, "-", month(all_daily_digits$date), "-", day(all_daily_digits$date))), y = daily_digit, color = factor(year(all_daily_digits$date)))) +
# geom_point() +
# geom_smooth()
dd_train_prophet <- set_names(dd_train, c("ds", "y"))
dd_test_prophet <- set_names(dd_test, c("ds", "y"))
model_prophet <- prophet(dd_train_prophet)
#future <- make_future_dataframe(model_prophet, periods = 365)
forecast <- predict(model_prophet, dd_test_prophet)
|
#' Isomap Embedding
#'
#' `step_isomap` creates a *specification* of a recipe
#' step that will convert numeric data into one or more new
#' dimensions.
#'
#' @inheritParams step_center
#' @inherit step_center return
#' @param ... One or more selector functions to choose which
#' variables will be used to compute the dimensions. See
#' [selections()] for more details. For the `tidy`
#' method, these are not currently used.
#' @param role For model terms created by this step, what analysis
#' role should they be assigned?. By default, the function assumes
#' that the new dimension columns created by the original variables
#' will be used as predictors in a model.
#' @param num The number of isomap dimensions to retain as new
#' predictors. If `num` is greater than the number of columns
#' or the number of possible dimensions, a smaller value will be
#' used.
#' @param options A list of options to
#' [dimRed::Isomap()].
#' @param res The [dimRed::Isomap()] object is stored
#' here once this preprocessing step has be trained by
#' [prep.recipe()].
#' @param prefix A character string that will be the prefix to the
#' resulting new variables. See notes below.
#' @return An updated version of `recipe` with the new step
#' added to the sequence of existing steps (if any). For the
#' `tidy` method, a tibble with columns `terms` (the
#' selectors or variables selected).
#' @keywords datagen
#' @concept preprocessing isomap projection_methods
#' @export
#' @details Isomap is a form of multidimensional scaling (MDS).
#' MDS methods try to find a reduced set of dimensions such that
#' the geometric distances between the original data points are
#' preserved. This version of MDS uses nearest neighbors in the
#' data as a method for increasing the fidelity of the new
#' dimensions to the original data values.
#'
#' It is advisable to center and scale the variables prior to
#' running Isomap (`step_center` and `step_scale` can be
#' used for this purpose).
#'
#' The argument `num` controls the number of components that
#' will be retained (the original variables that are used to derive
#' the components are removed from the data). The new components
#' will have names that begin with `prefix` and a sequence of
#' numbers. The variable names are padded with zeros. For example,
#' if `num < 10`, their names will be `Isomap1` -
#' `Isomap9`. If `num = 101`, the names would be
#' `Isomap001` - `Isomap101`.
#' @references De Silva, V., and Tenenbaum, J. B. (2003). Global
#' versus local methods in nonlinear dimensionality reduction.
#' *Advances in Neural Information Processing Systems*.
#' 721-728.
#'
#' \pkg{dimRed}, a framework for dimensionality reduction,
#' https://github.com/gdkrmr
#'
#' @examples
#' data(biomass)
#'
#' biomass_tr <- biomass[biomass$dataset == "Training",]
#' biomass_te <- biomass[biomass$dataset == "Testing",]
#'
#' rec <- recipe(HHV ~ carbon + hydrogen + oxygen + nitrogen + sulfur,
#' data = biomass_tr)
#'
#' im_trans <- rec %>%
#' step_YeoJohnson(all_predictors()) %>%
#' step_center(all_predictors()) %>%
#' step_scale(all_predictors()) %>%
#' step_isomap(all_predictors(),
#' options = list(knn = 100),
#' num = 2)
#'
#' im_estimates <- prep(im_trans, training = biomass_tr)
#'
#' im_te <- bake(im_estimates, biomass_te)
#'
#' rng <- extendrange(c(im_te$Isomap1, im_te$Isomap2))
#' plot(im_te$Isomap1, im_te$Isomap2,
#' xlim = rng, ylim = rng)
#'
#' tidy(im_trans, number = 4)
#' tidy(im_estimates, number = 4)
#' @seealso [step_pca()] [step_kpca()]
#' [step_ica()] [recipe()] [prep.recipe()]
#' [bake.recipe()]
step_isomap <-
function(recipe,
...,
role = "predictor",
trained = FALSE,
num = 5,
options = list(knn = 50, .mute = c("message", "output")),
res = NULL,
prefix = "Isomap",
skip = FALSE) {
add_step(
recipe,
step_isomap_new(
terms = check_ellipses(...),
role = role,
trained = trained,
num = num,
options = options,
res = res,
prefix = prefix,
skip = skip
)
)
}
step_isomap_new <-
function(terms = NULL,
role = "predictor",
trained = FALSE,
num = NULL,
options = NULL,
res = NULL,
prefix = "isomap",
skip = FALSE) {
step(
subclass = "isomap",
terms = terms,
role = role,
trained = trained,
num = num,
options = options,
res = res,
prefix = prefix,
skip = skip
)
}
#' @importFrom dimRed embed dimRedData
#' @export
prep.step_isomap <- function(x, training, info = NULL, ...) {
col_names <- terms_select(x$terms, info = info)
x$num <- min(x$num, ncol(training))
x$options$knn <- min(x$options$knn, nrow(training))
imap <-
embed(
dimRedData(as.data.frame(training[, col_names, drop = FALSE])),
"Isomap",
knn = x$options$knn,
ndim = x$num,
.mute = x$options$.mute
)
step_isomap_new(
terms = x$terms,
role = x$role,
trained = TRUE,
num = x$num,
options = x$options,
res = imap,
prefix = x$prefix,
skip = x$skip
)
}
#' @export
bake.step_isomap <- function(object, newdata, ...) {
isomap_vars <- colnames(environment(object$res@apply)$indata)
comps <-
object$res@apply(
dimRedData(as.data.frame(newdata[, isomap_vars, drop = FALSE]))
)@data
comps <- comps[, 1:object$num, drop = FALSE]
colnames(comps) <- names0(ncol(comps), object$prefix)
newdata <- cbind(newdata, as_tibble(comps))
newdata <-
newdata[, !(colnames(newdata) %in% isomap_vars), drop = FALSE]
if (!is_tibble(newdata))
newdata <- as_tibble(newdata)
newdata
}
print.step_isomap <-
function(x, width = max(20, options()$width - 35), ...) {
cat("Isomap approximation with ")
printer(colnames(x$res@org.data), x$terms, x$trained, width = width)
invisible(x)
}
#' @rdname step_isomap
#' @param x A `step_isomap` object
tidy.step_isomap <- function(x, ...) {
if (is_trained(x)) {
res <- tibble(terms = colnames(x$res@org.data))
} else {
term_names <- sel2char(x$terms)
res <- tibble(terms = term_names)
}
res
}
|
/R/isomap.R
|
no_license
|
kevinwkc/recipes
|
R
| false
| false
| 6,327
|
r
|
#' Isomap Embedding
#'
#' `step_isomap` creates a *specification* of a recipe
#' step that will convert numeric data into one or more new
#' dimensions.
#'
#' @inheritParams step_center
#' @inherit step_center return
#' @param ... One or more selector functions to choose which
#' variables will be used to compute the dimensions. See
#' [selections()] for more details. For the `tidy`
#' method, these are not currently used.
#' @param role For model terms created by this step, what analysis
#' role should they be assigned?. By default, the function assumes
#' that the new dimension columns created by the original variables
#' will be used as predictors in a model.
#' @param num The number of isomap dimensions to retain as new
#' predictors. If `num` is greater than the number of columns
#' or the number of possible dimensions, a smaller value will be
#' used.
#' @param options A list of options to
#' [dimRed::Isomap()].
#' @param res The [dimRed::Isomap()] object is stored
#' here once this preprocessing step has be trained by
#' [prep.recipe()].
#' @param prefix A character string that will be the prefix to the
#' resulting new variables. See notes below.
#' @return An updated version of `recipe` with the new step
#' added to the sequence of existing steps (if any). For the
#' `tidy` method, a tibble with columns `terms` (the
#' selectors or variables selected).
#' @keywords datagen
#' @concept preprocessing isomap projection_methods
#' @export
#' @details Isomap is a form of multidimensional scaling (MDS).
#' MDS methods try to find a reduced set of dimensions such that
#' the geometric distances between the original data points are
#' preserved. This version of MDS uses nearest neighbors in the
#' data as a method for increasing the fidelity of the new
#' dimensions to the original data values.
#'
#' It is advisable to center and scale the variables prior to
#' running Isomap (`step_center` and `step_scale` can be
#' used for this purpose).
#'
#' The argument `num` controls the number of components that
#' will be retained (the original variables that are used to derive
#' the components are removed from the data). The new components
#' will have names that begin with `prefix` and a sequence of
#' numbers. The variable names are padded with zeros. For example,
#' if `num < 10`, their names will be `Isomap1` -
#' `Isomap9`. If `num = 101`, the names would be
#' `Isomap001` - `Isomap101`.
#' @references De Silva, V., and Tenenbaum, J. B. (2003). Global
#' versus local methods in nonlinear dimensionality reduction.
#' *Advances in Neural Information Processing Systems*.
#' 721-728.
#'
#' \pkg{dimRed}, a framework for dimensionality reduction,
#' https://github.com/gdkrmr
#'
#' @examples
#' data(biomass)
#'
#' biomass_tr <- biomass[biomass$dataset == "Training",]
#' biomass_te <- biomass[biomass$dataset == "Testing",]
#'
#' rec <- recipe(HHV ~ carbon + hydrogen + oxygen + nitrogen + sulfur,
#' data = biomass_tr)
#'
#' im_trans <- rec %>%
#' step_YeoJohnson(all_predictors()) %>%
#' step_center(all_predictors()) %>%
#' step_scale(all_predictors()) %>%
#' step_isomap(all_predictors(),
#' options = list(knn = 100),
#' num = 2)
#'
#' im_estimates <- prep(im_trans, training = biomass_tr)
#'
#' im_te <- bake(im_estimates, biomass_te)
#'
#' rng <- extendrange(c(im_te$Isomap1, im_te$Isomap2))
#' plot(im_te$Isomap1, im_te$Isomap2,
#' xlim = rng, ylim = rng)
#'
#' tidy(im_trans, number = 4)
#' tidy(im_estimates, number = 4)
#' @seealso [step_pca()] [step_kpca()]
#' [step_ica()] [recipe()] [prep.recipe()]
#' [bake.recipe()]
step_isomap <-
function(recipe,
...,
role = "predictor",
trained = FALSE,
num = 5,
options = list(knn = 50, .mute = c("message", "output")),
res = NULL,
prefix = "Isomap",
skip = FALSE) {
add_step(
recipe,
step_isomap_new(
terms = check_ellipses(...),
role = role,
trained = trained,
num = num,
options = options,
res = res,
prefix = prefix,
skip = skip
)
)
}
step_isomap_new <-
function(terms = NULL,
role = "predictor",
trained = FALSE,
num = NULL,
options = NULL,
res = NULL,
prefix = "isomap",
skip = FALSE) {
step(
subclass = "isomap",
terms = terms,
role = role,
trained = trained,
num = num,
options = options,
res = res,
prefix = prefix,
skip = skip
)
}
#' @importFrom dimRed embed dimRedData
#' @export
prep.step_isomap <- function(x, training, info = NULL, ...) {
col_names <- terms_select(x$terms, info = info)
x$num <- min(x$num, ncol(training))
x$options$knn <- min(x$options$knn, nrow(training))
imap <-
embed(
dimRedData(as.data.frame(training[, col_names, drop = FALSE])),
"Isomap",
knn = x$options$knn,
ndim = x$num,
.mute = x$options$.mute
)
step_isomap_new(
terms = x$terms,
role = x$role,
trained = TRUE,
num = x$num,
options = x$options,
res = imap,
prefix = x$prefix,
skip = x$skip
)
}
#' @export
bake.step_isomap <- function(object, newdata, ...) {
isomap_vars <- colnames(environment(object$res@apply)$indata)
comps <-
object$res@apply(
dimRedData(as.data.frame(newdata[, isomap_vars, drop = FALSE]))
)@data
comps <- comps[, 1:object$num, drop = FALSE]
colnames(comps) <- names0(ncol(comps), object$prefix)
newdata <- cbind(newdata, as_tibble(comps))
newdata <-
newdata[, !(colnames(newdata) %in% isomap_vars), drop = FALSE]
if (!is_tibble(newdata))
newdata <- as_tibble(newdata)
newdata
}
print.step_isomap <-
function(x, width = max(20, options()$width - 35), ...) {
cat("Isomap approximation with ")
printer(colnames(x$res@org.data), x$terms, x$trained, width = width)
invisible(x)
}
#' @rdname step_isomap
#' @param x A `step_isomap` object
tidy.step_isomap <- function(x, ...) {
if (is_trained(x)) {
res <- tibble(terms = colnames(x$res@org.data))
} else {
term_names <- sel2char(x$terms)
res <- tibble(terms = term_names)
}
res
}
|
# R code to automatically calculate degree days for the JRC-MARS gridded climate data
# ================================
# Anastasia Korycinska, Defra Risk and Horizon Scanning Team
# Animal and Plant Health Directorate, Defra, UK
# ================================
# SET THE THRESHOLD TEMPERATURE FOR DEVELOPMENT (oC) IN "threshold" [code line 30]
# SET THE ACCUMULATED DAY DEGREE THRESHOLD IN "accumulated_threshold" [code line 31]
# Set the input file folder by changing the input file path [code line 35]
# Set the output file folder by changing the output file path [code line 205]
# just remember to use / as a separator and enclose the path in ""
# Required fields: GRID_NO is a location identifier
# DAY is the date in YYYYMMDD format (no separators)
# TEMPERATURE_MAX and TEMPERATURE_MIN are self-explanatory
# Accumulated day degrees by grid square for each year in the data set are generated as output
# Min, mean and maximum accumulated day degrees for each grid square across all input years also created
# The number and percentage of years out of the total years analysed which have an actual accumulated threshold greater than
# or equal to the accumulated threshold also included in the output.
# Maximum gap & max. run (max no. of consecutive years unsuitable/suitable) also calculated courtesy of Matt Upson
# The output csv file will be saved with the filename "output_for_threshold_temp-XX_accumulatedDD-YY.csv" and can be imported into ArcGIS for mapping
# Using the JRC map in ArcGIS, the "GRID_NO" field in the output can be linked to the "Grid_code" field (i.e. lat/long are not required)
# remove all pre-existing variables in the workspace
rm(list=ls())
threshold = 10
accumulated_threshold = 500
# load files: put the desired files for analysis into one folder and include the file path for that folder below.
# Multiple files can be read at once.
#allfiles <- list.files(path = "set input file path", full.names = TRUE)
#temperature <- do.call(rbind, lapply(allfiles, read.csv, header = TRUE, sep = ";"))
library(tidyverse)
t1 <- read_delim("../pest-risk/input/jrc-gridded-agrometeo/efsa20180420.csv",delim = ";")
temperature <-t1 %>%
bind_rows(read_delim("../pest-risk/input/jrc-gridded-agrometeo/efsa20180613.csv",delim = ";")) %>%
rename(GRID_NO=IDGRID,
TEMPERATURE_MIN=TMIN,
TEMPERATURE_MAX=TMAX) %>%
select(GRID_NO,TEMPERATURE_MIN,TEMPERATURE_MAX,DAY) %>%
distinct(GRID_NO,DAY,.keep_all = T)
rm(t1)
# add mean temp to dataset
temperature$mean <- (temperature$TEMPERATURE_MAX + temperature$TEMPERATURE_MIN)/2
# extract the text date string for year and add this to the dataset
temperature$year <- substr(temperature$DAY, 1, 4)
# filter years
#temperature <- temperature %>%
# filter(as.numeric(year)<2015) %>%
# filter(as.numeric(year)>1999)
# select days where max temperature equal to or below threshold
below <- temperature[temperature$TEMPERATURE_MAX <= threshold, ]
# accumulated degree days for days max temperature below threshold (= zero)
dd_below <- cbind(below, acc_dd = (below$TEMPERATURE_MAX - below$TEMPERATURE_MAX))
# select days where min temperature is equal to or above threshold
above <- temperature[temperature$TEMPERATURE_MIN >= threshold, ]
# accumulated degree days for days min temp above threshold (= average max + min, minus the threshold)
dd_above <- cbind(above,acc_dd = ((above$TEMPERATURE_MAX + above$TEMPERATURE_MIN)*0.5)-threshold)
# select days where the mean temp is greater than or equal to threshold
mean_above <- temperature[temperature$mean >= threshold &
temperature$TEMPERATURE_MIN < threshold & temperature$TEMPERATURE_MAX > threshold, ]
# accummulated degree days where mean is over threshold (= ((max-threshold)/2) - ((threshold-min)/4)
dd_mean_above <- cbind(mean_above, acc_dd = (((mean_above$TEMPERATURE_MAX-threshold)/2)
- ((threshold-mean_above$TEMPERATURE_MIN)/4)))
# select days where the mean is less than threshold
mean_below <- temperature[temperature$mean < threshold &
temperature$TEMPERATURE_MIN < threshold & temperature$TEMPERATURE_MAX > threshold,]
# acccumulated degree days where mean less than threshold (= (max-threshold)/4)
dd_mean_below <- cbind(mean_below, acc_dd = ((mean_below$TEMPERATURE_MAX - threshold)/4))
remove(temperature)
# accumulated degree days, all temperatures recombined
dd_temperature <- rbind(dd_below, dd_above, dd_mean_above, dd_mean_below)
# accumulated degree days, by year and by location
final <- aggregate(acc_dd ~ GRID_NO + year, data = dd_temperature, sum)
# some tidying up
# reshaping the dataset
final_tidy <- reshape(final, v.names = "acc_dd", idvar = "GRID_NO", timevar = "year", direction = "wide" )
# sort by grid code
final_sort <- final_tidy[order(final_tidy$GRID_NO),]
final_data <- final_sort[, 2:ncol(final_sort)]
# Replace the default column names with year-only labels
col_labels <- unique(final[,2])
names (final_sort) <- c("GRID_NO", col_labels)
# some data analysis
# Minimum accumulated DD, all years
final_sort$min <- apply(final_data, 1, min, na.rm=TRUE)
# Average all years (mean)
final_sort$mean <- apply(final_data, 1, mean, na.rm=TRUE)
# Maximum accumulated DD, all years
final_sort$max <- apply(final_data, 1, max, na.rm=TRUE)
# Total number of years analysed
final_sort$no_years_analysed <- length(col_labels)
# Number of years over accumulated threshold
function_years_over <- function (x) {
return (length(which(x >= accumulated_threshold)))
}
final_sort$count_years_over <- apply(final_data, 1, function_years_over)
# Percentage of years over accumulated threshold
final_sort$percent_years_over <- (final_sort$count_years_over/final_sort$no_years_analysed)*100
# Maximum gap analysis - what is the greatest number of years threshold not reached?
# identify which years over and under threshold
function_over <- function(x) {
return (x >= accumulated_threshold)
}
# apply true/false to whether over or under threshold
gap <- apply(final_sort[,2:(ncol(final_sort)-6)], 2, function_over)
# ==================
# This section of code courtesy of Matt Upson
# ==================
# Define the max_rl function ----
max_rl <- function(x, val) {
# x is the target vector, val is the value for which we want to know the longest
# run length.
# Test that we are passing a vector a list to the function
if (!is.vector(x) && !is.list(x)) stop("'x' must be a vector of an atomic type")
# Caclulate length of vector
n <- length(x)
# Offset the vector with itself minus the last value
y <- x[-1L] != x[-n]
# Calculate index
i <- c(which(y | is.na(y)), n)
# Calculate length and values as in rle
lengths <- diff(c(0L, i))
values <- x[i]
# Determine the max which equals val. Need to first check that val is present
# in values, if not return 0.
if (val %in% values) {
max_val <- max(lengths[values == val])
} else {
max_val <- 0
}
# This step appears to be necessary to avoid issues when calling map_int later
max_val <- as.integer(max_val)
return(max_val)
}
# Now lets wrap this up into a function that can handle dataframes.
max_rl_df <- function(x, val) {
# First check that x is a dataframe or a matrix (note this encompasses data_frames/
# tibbles).
if (!is.data.frame(x) && !is.matrix(x)) stop("'x' must be a data.frame or matrix")
# Takes a df an val as an argument to pass to max_rl
x <- apply(x, MARGIN = 1, FUN = function(x) max_rl(x, val))
return(x)
}
# ============
# Now bind the dataframe to the max length of the TRUEs.
final_sort <- cbind(
final_sort,
max_run = max_rl_df(gap, TRUE),
max_gap = max_rl_df(gap, FALSE)
)
# Mean annual number of generations possible
final_sort$mean_annual_generation <- final_sort$mean/accumulated_threshold
# export the file: default location as specified, including the two threshold values in the filename
filename <- paste("set output file path",
threshold, "_accumulatedDD-", accumulated_threshold, "-20y.csv", sep = "")
write.csv(final_sort, file = filename)
|
/Defra_JRC-MARS-accumulatedDD.R
|
no_license
|
openefsa/DefraJrcAccumulatedDD
|
R
| false
| false
| 8,225
|
r
|
# R code to automatically calculate degree days for the JRC-MARS gridded climate data
# ================================
# Anastasia Korycinska, Defra Risk and Horizon Scanning Team
# Animal and Plant Health Directorate, Defra, UK
# ================================
# SET THE THRESHOLD TEMPERATURE FOR DEVELOPMENT (oC) IN "threshold" [code line 30]
# SET THE ACCUMULATED DAY DEGREE THRESHOLD IN "accumulated_threshold" [code line 31]
# Set the input file folder by changing the input file path [code line 35]
# Set the output file folder by changing the output file path [code line 205]
# just remember to use / as a separator and enclose the path in ""
# Required fields: GRID_NO is a location identifier
# DAY is the date in YYYYMMDD format (no separators)
# TEMPERATURE_MAX and TEMPERATURE_MIN are self-explanatory
# Accumulated day degrees by grid square for each year in the data set are generated as output
# Min, mean and maximum accumulated day degrees for each grid square across all input years also created
# The number and percentage of years out of the total years analysed which have an actual accumulated threshold greater than
# or equal to the accumulated threshold also included in the output.
# Maximum gap & max. run (max no. of consecutive years unsuitable/suitable) also calculated courtesy of Matt Upson
# The output csv file will be saved with the filename "output_for_threshold_temp-XX_accumulatedDD-YY.csv" and can be imported into ArcGIS for mapping
# Using the JRC map in ArcGIS, the "GRID_NO" field in the output can be linked to the "Grid_code" field (i.e. lat/long are not required)
# remove all pre-existing variables in the workspace
rm(list=ls())
threshold = 10
accumulated_threshold = 500
# load files: put the desired files for analysis into one folder and include the file path for that folder below.
# Multiple files can be read at once.
#allfiles <- list.files(path = "set input file path", full.names = TRUE)
#temperature <- do.call(rbind, lapply(allfiles, read.csv, header = TRUE, sep = ";"))
library(tidyverse)
t1 <- read_delim("../pest-risk/input/jrc-gridded-agrometeo/efsa20180420.csv",delim = ";")
temperature <-t1 %>%
bind_rows(read_delim("../pest-risk/input/jrc-gridded-agrometeo/efsa20180613.csv",delim = ";")) %>%
rename(GRID_NO=IDGRID,
TEMPERATURE_MIN=TMIN,
TEMPERATURE_MAX=TMAX) %>%
select(GRID_NO,TEMPERATURE_MIN,TEMPERATURE_MAX,DAY) %>%
distinct(GRID_NO,DAY,.keep_all = T)
rm(t1)
# add mean temp to dataset
temperature$mean <- (temperature$TEMPERATURE_MAX + temperature$TEMPERATURE_MIN)/2
# extract the text date string for year and add this to the dataset
temperature$year <- substr(temperature$DAY, 1, 4)
# filter years
#temperature <- temperature %>%
# filter(as.numeric(year)<2015) %>%
# filter(as.numeric(year)>1999)
# select days where max temperature equal to or below threshold
below <- temperature[temperature$TEMPERATURE_MAX <= threshold, ]
# accumulated degree days for days max temperature below threshold (= zero)
dd_below <- cbind(below, acc_dd = (below$TEMPERATURE_MAX - below$TEMPERATURE_MAX))
# select days where min temperature is equal to or above threshold
above <- temperature[temperature$TEMPERATURE_MIN >= threshold, ]
# accumulated degree days for days min temp above threshold (= average max + min, minus the threshold)
dd_above <- cbind(above,acc_dd = ((above$TEMPERATURE_MAX + above$TEMPERATURE_MIN)*0.5)-threshold)
# select days where the mean temp is greater than or equal to threshold
mean_above <- temperature[temperature$mean >= threshold &
temperature$TEMPERATURE_MIN < threshold & temperature$TEMPERATURE_MAX > threshold, ]
# accummulated degree days where mean is over threshold (= ((max-threshold)/2) - ((threshold-min)/4)
dd_mean_above <- cbind(mean_above, acc_dd = (((mean_above$TEMPERATURE_MAX-threshold)/2)
- ((threshold-mean_above$TEMPERATURE_MIN)/4)))
# select days where the mean is less than threshold
mean_below <- temperature[temperature$mean < threshold &
temperature$TEMPERATURE_MIN < threshold & temperature$TEMPERATURE_MAX > threshold,]
# acccumulated degree days where mean less than threshold (= (max-threshold)/4)
dd_mean_below <- cbind(mean_below, acc_dd = ((mean_below$TEMPERATURE_MAX - threshold)/4))
remove(temperature)
# accumulated degree days, all temperatures recombined
dd_temperature <- rbind(dd_below, dd_above, dd_mean_above, dd_mean_below)
# accumulated degree days, by year and by location
final <- aggregate(acc_dd ~ GRID_NO + year, data = dd_temperature, sum)
# some tidying up
# reshaping the dataset
final_tidy <- reshape(final, v.names = "acc_dd", idvar = "GRID_NO", timevar = "year", direction = "wide" )
# sort by grid code
final_sort <- final_tidy[order(final_tidy$GRID_NO),]
final_data <- final_sort[, 2:ncol(final_sort)]
# Replace the default column names with year-only labels
col_labels <- unique(final[,2])
names (final_sort) <- c("GRID_NO", col_labels)
# some data analysis
# Minimum accumulated DD, all years
final_sort$min <- apply(final_data, 1, min, na.rm=TRUE)
# Average all years (mean)
final_sort$mean <- apply(final_data, 1, mean, na.rm=TRUE)
# Maximum accumulated DD, all years
final_sort$max <- apply(final_data, 1, max, na.rm=TRUE)
# Total number of years analysed
final_sort$no_years_analysed <- length(col_labels)
# Number of years over accumulated threshold
function_years_over <- function (x) {
return (length(which(x >= accumulated_threshold)))
}
final_sort$count_years_over <- apply(final_data, 1, function_years_over)
# Percentage of years over accumulated threshold
final_sort$percent_years_over <- (final_sort$count_years_over/final_sort$no_years_analysed)*100
# Maximum gap analysis - what is the greatest number of years threshold not reached?
# identify which years over and under threshold
function_over <- function(x) {
return (x >= accumulated_threshold)
}
# apply true/false to whether over or under threshold
gap <- apply(final_sort[,2:(ncol(final_sort)-6)], 2, function_over)
# ==================
# This section of code courtesy of Matt Upson
# ==================
# Define the max_rl function ----
max_rl <- function(x, val) {
# x is the target vector, val is the value for which we want to know the longest
# run length.
# Test that we are passing a vector a list to the function
if (!is.vector(x) && !is.list(x)) stop("'x' must be a vector of an atomic type")
# Caclulate length of vector
n <- length(x)
# Offset the vector with itself minus the last value
y <- x[-1L] != x[-n]
# Calculate index
i <- c(which(y | is.na(y)), n)
# Calculate length and values as in rle
lengths <- diff(c(0L, i))
values <- x[i]
# Determine the max which equals val. Need to first check that val is present
# in values, if not return 0.
if (val %in% values) {
max_val <- max(lengths[values == val])
} else {
max_val <- 0
}
# This step appears to be necessary to avoid issues when calling map_int later
max_val <- as.integer(max_val)
return(max_val)
}
# Now lets wrap this up into a function that can handle dataframes.
max_rl_df <- function(x, val) {
# First check that x is a dataframe or a matrix (note this encompasses data_frames/
# tibbles).
if (!is.data.frame(x) && !is.matrix(x)) stop("'x' must be a data.frame or matrix")
# Takes a df an val as an argument to pass to max_rl
x <- apply(x, MARGIN = 1, FUN = function(x) max_rl(x, val))
return(x)
}
# ============
# Now bind the dataframe to the max length of the TRUEs.
final_sort <- cbind(
final_sort,
max_run = max_rl_df(gap, TRUE),
max_gap = max_rl_df(gap, FALSE)
)
# Mean annual number of generations possible
final_sort$mean_annual_generation <- final_sort$mean/accumulated_threshold
# export the file: default location as specified, including the two threshold values in the filename
filename <- paste("set output file path",
threshold, "_accumulatedDD-", accumulated_threshold, "-20y.csv", sep = "")
write.csv(final_sort, file = filename)
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/topology.R
\name{permute}
\alias{permute}
\alias{permute.vertices}
\title{Permute the vertices of a graph}
\usage{
permute(graph, permutation)
}
\arguments{
\item{graph}{The input graph, it can directed or undirected.}
\item{permutation}{A numeric vector giving the permutation to apply. The
first element is the new id of vertex 1, etc. Every number between one and
\code{vcount(graph)} must appear exactly once.}
}
\value{
A new graph object.
}
\description{
Create a new graph, by permuting vertex ids.
}
\details{
This function creates a new graph from the input graph by permuting its
vertices according to the specified mapping. Call this function with the
output of \code{\link{canonical_permutation}} to create the canonical form
of a graph.
\code{permute} keeps all graph, vertex and edge attributes of the graph.
}
\examples{
# Random permutation of a random graph
g <- sample_gnm(20, 50)
g2 <- permute(g, sample(vcount(g)))
graph.isomorphic(g, g2)
# Permutation keeps all attributes
g$name <- "Random graph, Gnm, 20, 50"
V(g)$name <- letters[1:vcount(g)]
E(g)$weight <- sample(1:5, ecount(g), replace=TRUE)
g2 <- permute(g, sample(vcount(g)))
graph.isomorphic(g, g2)
g2$name
V(g2)$name
E(g2)$weight
all(sort(E(g2)$weight) == sort(E(g)$weight))
}
\author{
Gabor Csardi \email{csardi.gabor@gmail.com}
}
\seealso{
\code{\link{canonical_permutation}}
}
\keyword{graphs}
|
/man/permute.Rd
|
no_license
|
davidmaciel/rigraph
|
R
| false
| false
| 1,467
|
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/topology.R
\name{permute}
\alias{permute}
\alias{permute.vertices}
\title{Permute the vertices of a graph}
\usage{
permute(graph, permutation)
}
\arguments{
\item{graph}{The input graph, it can directed or undirected.}
\item{permutation}{A numeric vector giving the permutation to apply. The
first element is the new id of vertex 1, etc. Every number between one and
\code{vcount(graph)} must appear exactly once.}
}
\value{
A new graph object.
}
\description{
Create a new graph, by permuting vertex ids.
}
\details{
This function creates a new graph from the input graph by permuting its
vertices according to the specified mapping. Call this function with the
output of \code{\link{canonical_permutation}} to create the canonical form
of a graph.
\code{permute} keeps all graph, vertex and edge attributes of the graph.
}
\examples{
# Random permutation of a random graph
g <- sample_gnm(20, 50)
g2 <- permute(g, sample(vcount(g)))
graph.isomorphic(g, g2)
# Permutation keeps all attributes
g$name <- "Random graph, Gnm, 20, 50"
V(g)$name <- letters[1:vcount(g)]
E(g)$weight <- sample(1:5, ecount(g), replace=TRUE)
g2 <- permute(g, sample(vcount(g)))
graph.isomorphic(g, g2)
g2$name
V(g2)$name
E(g2)$weight
all(sort(E(g2)$weight) == sort(E(g)$weight))
}
\author{
Gabor Csardi \email{csardi.gabor@gmail.com}
}
\seealso{
\code{\link{canonical_permutation}}
}
\keyword{graphs}
|
normalize_Prots_AALength <- function(data,genes,proteins,organism){
#normalize_Prots_AALength
#
#Function that gets a spectral counts proteomics dataset, divides the counts values
#for each protein by its AA chain length (queried from Uniprot). Then the transformed
#dataset is normalized by the sum of all values multiplied by their respective molecular
#weights. The final units are [mmol/g protein] for each entry.
#
# data (dataframe) Spectral counts measurements for a set genes/proteins
# (rows) in n-conditions (columns).It should include all the replicates
# of the original dataset.
# genes List of gene IDs for the dataset
# proteins List of Uniprot protein IDs for the dataset
# organism 'kma' for K. marxianus; 'sce' for S. cerevisiae; 'yli' for Y. lipolytica
#
# output list including: newData - Normalized dataset.The dataset filters out those proteins (rows),
# for which either MW or AA chain length was not found in uniprot
# genes - list of genes in newData, filtering out those entries for which
# MW was not available.
# proteins - list of proteins in newData.
#
# Usage: outputLust <- normalize_Prots_AALength(data,genes,proteins,organism)
#
# Last modified: Ivan Domenzain. 2019-04-24
#
filename <- paste('uniprot_',organism,'.txt',sep='')
database <- read.delim(filename, header = TRUE, sep = "\t",stringsAsFactors=FALSE, na.strings = "NA")
MWs <- database[,6]
SEQ <- database[,7]
if (all(organism != 'sce')){
#For Kma and Yli the MW query is done based on DMKU and CLIB122
#uniprot IDs respectively.
DB_ids <- database[,1]
ids <- proteins
if (all(organism == 'yli')){
MWs <- database[,7]
SEQ <- database[,8]
}
}else {
#for sce the match is done based on CENPK gene IDs
DB_ids <- database[,1]
ids <- genes
}
#Reduce dataset to those entries which have a correspondance in the
#uniprot database
DB_indxs <- match(ids,DB_ids)
toRemove <- c()
newData <- c()
newGenes <- c()
newProts <- c()
#Exclude indexes without a match in the database, with non-positive or
#non-numerical MW
MWs <- as.numeric(MWs)/1000 #g/mmol
NormFactors <- c()
for (i in 1:length(DB_indxs)){
index <- DB_indxs[i]
if (!is.na(index)){
if ((!is.na(MWs[index]) | MWs[index]>0) & !is.na(SEQ[index])){
LengthAA <- nchar(SEQ[index])
#Divide each protein row by its correspondant AA chain length
newRow <- data[i,]/LengthAA
newData <- rbind(newData,newRow)
newRow <- MWs[index]*newRow
NormFactors <- rbind(NormFactors,newRow)
newGenes <- rbind(newGenes,genes[i])
newProts <- rbind(newProts,proteins[i])
}
}
}
NormFactors <- colSums(NormFactors)
print(NormFactors)
#Normalize dataset, brings the values back to their original order of
#magnitude. Multiplying all columns by the same constant does not affect
#fold-change calculations
for (j in 1:ncol(newData)){
newData[,j] <- newData[,j]/NormFactors[j]
}
return(list(newData,newGenes,newProts))
}
|
/ComplementaryScripts/normalize_Prots_AALength.R
|
permissive
|
SysBioChalmers/OrthOmics
|
R
| false
| false
| 3,246
|
r
|
normalize_Prots_AALength <- function(data,genes,proteins,organism){
#normalize_Prots_AALength
#
#Function that gets a spectral counts proteomics dataset, divides the counts values
#for each protein by its AA chain length (queried from Uniprot). Then the transformed
#dataset is normalized by the sum of all values multiplied by their respective molecular
#weights. The final units are [mmol/g protein] for each entry.
#
# data (dataframe) Spectral counts measurements for a set genes/proteins
# (rows) in n-conditions (columns).It should include all the replicates
# of the original dataset.
# genes List of gene IDs for the dataset
# proteins List of Uniprot protein IDs for the dataset
# organism 'kma' for K. marxianus; 'sce' for S. cerevisiae; 'yli' for Y. lipolytica
#
# output list including: newData - Normalized dataset.The dataset filters out those proteins (rows),
# for which either MW or AA chain length was not found in uniprot
# genes - list of genes in newData, filtering out those entries for which
# MW was not available.
# proteins - list of proteins in newData.
#
# Usage: outputLust <- normalize_Prots_AALength(data,genes,proteins,organism)
#
# Last modified: Ivan Domenzain. 2019-04-24
#
filename <- paste('uniprot_',organism,'.txt',sep='')
database <- read.delim(filename, header = TRUE, sep = "\t",stringsAsFactors=FALSE, na.strings = "NA")
MWs <- database[,6]
SEQ <- database[,7]
if (all(organism != 'sce')){
#For Kma and Yli the MW query is done based on DMKU and CLIB122
#uniprot IDs respectively.
DB_ids <- database[,1]
ids <- proteins
if (all(organism == 'yli')){
MWs <- database[,7]
SEQ <- database[,8]
}
}else {
#for sce the match is done based on CENPK gene IDs
DB_ids <- database[,1]
ids <- genes
}
#Reduce dataset to those entries which have a correspondance in the
#uniprot database
DB_indxs <- match(ids,DB_ids)
toRemove <- c()
newData <- c()
newGenes <- c()
newProts <- c()
#Exclude indexes without a match in the database, with non-positive or
#non-numerical MW
MWs <- as.numeric(MWs)/1000 #g/mmol
NormFactors <- c()
for (i in 1:length(DB_indxs)){
index <- DB_indxs[i]
if (!is.na(index)){
if ((!is.na(MWs[index]) | MWs[index]>0) & !is.na(SEQ[index])){
LengthAA <- nchar(SEQ[index])
#Divide each protein row by its correspondant AA chain length
newRow <- data[i,]/LengthAA
newData <- rbind(newData,newRow)
newRow <- MWs[index]*newRow
NormFactors <- rbind(NormFactors,newRow)
newGenes <- rbind(newGenes,genes[i])
newProts <- rbind(newProts,proteins[i])
}
}
}
NormFactors <- colSums(NormFactors)
print(NormFactors)
#Normalize dataset, brings the values back to their original order of
#magnitude. Multiplying all columns by the same constant does not affect
#fold-change calculations
for (j in 1:ncol(newData)){
newData[,j] <- newData[,j]/NormFactors[j]
}
return(list(newData,newGenes,newProts))
}
|
## Time Series Modeling
# Install required Libraries if necessary
list.of.packages <- c("caret", "dplyr","Boruta","mlbench",
"tidyr","fUnitRoots","FitAR","forecast",
"stringr","Metrics","tictoc","MLmetrics","h2o","opera","urca")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages)
#Load required libraries
library(lmtest)
library("fUnitRoots")
library(FitAR)
library("forecast")
library(caret)
library(dplyr)
library(Boruta)
library(mlbench)
library(tidyr)
library(stringr)
library(Metrics)
library(opera)
library(urca)
library(tictoc)
library(rlist)
library(MLmetrics)
library(doParallel)
library(h2o)
train_KagCsv <- read.csv("Datasets/Week5/train.csv", na.strings = c("", "NA"))
train_Kag<- as.data.frame(train_KagCsv)
train_Kag <- train_Kag %>% mutate(Date=as.Date(Date))
test_KagCsv <- read.csv("Datasets/Week5/test.csv", na.strings = c("", "NA"))
test_Kag <- as.data.frame(test_KagCsv)
test_Kag <- test_Kag %>% mutate(Date=as.Date(Date))
sub_KagCsv <- read.csv("Datasets/Week5/submission.csv", na.strings = c("", "NA"))
sub_Kag <- as.data.frame(sub_KagCsv)
minDate <- min(train_Kag$Date)
# Clean Data
sum(train_Kag$TargetValue < 0)
train_Kag$TargetValue <- ifelse(train_Kag$TargetValue<0,abs(train_Kag$TargetValue),train_Kag$TargetValue)
sum(train_Kag$TargetValue < 0)
#Add lifting to the to all Target Values to insure it is away from zero and away from constant
lift<-10
train_Kag$TargetValue <-train_Kag$TargetValue + rnorm(length(train_Kag$TargetValue),mean=lift,sd=1)
train_Kag_clean <- train_Kag %>%
mutate(CPR=factor(paste(County,Province_State ,Country_Region,sep = "_"))) %>%
mutate(days = as.numeric(Date-min(Date)), Wday=factor(weekdays(Date)),month=factor(as.numeric(format(Date,"%m"))))
levels(train_Kag_clean$Wday) <-c(6,2,7,1,5,3,4) # Start from sunday =1
train_Kag_clean%>% head(10)%>% knitr::kable()
test_Kag_clean <- test_Kag %>%
mutate(CPR=factor(paste(County,Province_State ,Country_Region,sep = "_"))) %>%
mutate(days = as.numeric(Date-min(Date)), Wday=factor(weekdays(Date)),month=factor(as.numeric(format(Date,"%m"))))
levels(test_Kag_clean$Wday) <-c(6,2,7,1,5,3,4) # Start from sund =1
test_Kag_clean%>% head(10) %>% knitr::kable()
# plot confirmed cases for one region
Conf_US <- train_Kag_clean %>% dplyr::filter(Target=="ConfirmedCases" & CPR=="NA_NA_Italy") %>%
group_by(CPR,Date) %>% summarise(TargetValue=sum(TargetValue)) %>%
select(Date,TargetValue)
par(mar = c(2, 2, 2, 2))
plot(Conf_US$Date,Conf_US$TargetValue)
# plot Fat cases for one region
Fat_US<-train_Kag_clean %>% dplyr::filter(Target=="Fatalities" & CPR=="NA_NA_Italy") %>%
group_by(CPR,Date) %>% summarise(TargetValue=sum(TargetValue)) %>%
select(Date,TargetValue)
plot(Fat_US$Date,Fat_US$TargetValue)
# Split Kaggle train Data for local train test
ForecastDate <- as.Date("2020-05-08") # one weeks forecast
StartDate <- as.Date("2020-03-01") # since there is a tall tail at the begining for most counties
indx <- train_Kag_clean$Date < ForecastDate & train_Kag_clean$Date>StartDate
ts_indx <-train_Kag_clean$Date > ForecastDate
train <- train_Kag_clean[indx,]
test <- train_Kag_clean[ts_indx,]
# Split Confirmed and Fatality cases and add accumulated sum
train_Conf <- train %>% dplyr::filter(Target=="ConfirmedCases") %>%
group_by(CPR) %>% mutate(accSum=cumsum(TargetValue))%>% ungroup()
train_Fat <- train %>% dplyr::filter(Target=="Fatalities") %>%
group_by(CPR) %>% mutate(accSum=cumsum(TargetValue))%>% ungroup()
test_Conf <- test%>% dplyr::filter(Target=="ConfirmedCases") %>%
group_by(CPR) %>% mutate(accSum=cumsum(TargetValue))%>% ungroup()
test_Fat <- test %>% dplyr::filter(Target=="Fatalities") %>%
group_by(CPR) %>% mutate(accSum=cumsum(TargetValue))%>% ungroup()
# Lets see top 10 Confirmed cases county
top10 <- train_Conf %>% group_by(CPR) %>% summarise(TargetValue=sum(TargetValue)) %>%
arrange(desc(TargetValue)) %>% dplyr::filter(str_detect(CPR,"_US\\b"))%>% head(10)
top10%>% knitr::kable()
# Lets View Bottom 10 with at least 100 Target Value
bot10 <- train_Conf%>% dplyr::filter(TargetValue > 100) %>% group_by(CPR) %>% summarise(TargetValue=sum(TargetValue)) %>%
arrange(TargetValue)
bot10%>% knitr::kable()
######################################
# Case study US Confirmed cases
train_Conf_USA <- train_Conf %>% dplyr::filter(CPR=="NA_NA_US")
#View(train_Conf_USA%>%head(10))
test_Conf_USA <- test_Conf %>% dplyr::filter(CPR=="NA_NA_US")
#View(train_Conf_USA)
train_Fat_USA <- train_Fat %>% dplyr::filter(CPR=="NA_NA_US")
test_Fat_USA <- test_Fat %>% dplyr::filter(CPR=="NA_NA_US")
tsTrConf<- ts(select(data.frame(train_Conf_USA),TargetValue),start=c(2020,3,1),frequency = 365) # consider weekly data
plot(tsTrConf)
#detemine stationarity of data by urkpss Test
my_urkpssTest <- function (x, type, lags, use.lag, doplot) {
x <- as.vector(x)
urca <- urca::ur.kpss(x, type = type[1], lags = lags[1], use.lag = use.lag)
output = capture.output(urca::summary(urca))[-(1:4)]
output = output[-length(output)]
for (i in 1:length(output)) output[i] = paste(" ", output[i])
ans = list(name = "ur.kpss", test = urca, output = output)
if (doplot)
#plot(urca)
new("fHTEST", call = match.call(), data = list(x = x),
test = ans, title = "KPSS Unit Root Test", description = description())
}
my_urkpssTest(tsTrConf, type = c("tau"), lags = c("long"),
use.lag = NULL, doplot = TRUE)
# From the results it is clear that "Value of test-statistic" > "Critical value for a significance level of 5%"
# Thus the null hypothesis of stationarity is rejected.
tsstationary<-diff(tsTrConf, differences=1)
plot(tsstationary)
acf(tsTrConf,lag.max=34)
######
## Feature Selection Analysis
train_Con_dt<-select(train_Conf_USA,TargetValue,Date,days,Wday)
train_Fat_dt<-select(train_Fat_USA,TargetValue,Date,days,Wday)
# We Can Use Boruta library to rank features
set.seed(2020)
Br_Con <- Boruta(TargetValue ~., data=train_Con_dt, doTrace=2, maxRuns=50)
print(Br_Con)
plot(Br_Con,las=2,cex.axis=0.7) # las: to make lables vertical, cex.axis for scaling view
# We can see that only the Day and days features are important for estimating TargetValue in Confirmed Cases
# Now lets explore Fatality cases
set.seed(2020)
Br_Fat <- Boruta(TargetValue ~., data=train_Fat_dt, doTrace=2, maxRuns=50)
print(Br_Fat)
plot(Br_Fat,las=2,cex.axis=0.7) # las: to make lables vertical, cex.axis for scaling view
#We get approximately the same results on Fatality cases
#########################################################
#############################
## Modeleling
Location <- "US"
fit <- auto.arima(select(data.frame(train_Con_dt) ,TargetValue))
acc<- forecast::accuracy(fit)
acc
training_Con_RMSE<-acc[1,2]
Forc <- forecast(fit,h=nrow(test_Conf_USA))
plot(Forc)
# calculate RMSE
ForcMean<- as.numeric(Forc$mean)
Conf_RMSE<-rmse(ForcMean,test_Conf_USA$TargetValue)
Conf_R2 <-R2_Score(ForcMean,test_Conf_USA$TargetValue)
fit <- auto.arima(select(data.frame(train_Fat_dt) ,TargetValue))
Forc <- forecast(fit,h=nrow(test_Fat_USA))
par(mar=c(2,2,2,2))
plot(Forc)
# calculate RMSE
ForcMean<- as.numeric(Forc$mean)
Fat_RMSE<-rmse(ForcMean,test_Fat_USA$TargetValue) #1800.502
Fat_R2 <-R2_Score(ForcMean,test_Fat_USA$TargetValue)
# Show RMSE results
rmse_results <- tibble(Method = "Auto-ARIMA", Conf_RMSE = Conf_RMSE, Fat_RMSE=Fat_RMSE,Conf_R2=Conf_R2,Fat_R2=Fat_R2, Location=Location)
rmse_results %>% knitr::kable()
##########################################################
## Using H2O library to perform the task in parallel way
h2o.init(nthreads = 2, #Number of threads -1 means use all cores on your machine
max_mem_size = "7G") #max mem size is the maximum memory to allocate to H2O
train_Con_h2o <- as.h2o(train_Con_dt)
train_Fat_h2o <- as.h2o(train_Fat_dt)
test_Conf_h2o<-as.h2o(test_Conf_USA)
test_Fat_h2o <- as.h2o(test_Fat_USA)
x<-"days"
y<-"TargetValue"
nfolds<-5
# fit rf model
rf_model_Con_h2o<- h2o.randomForest(x = x,
y = y,
training_frame = train_Con_h2o,
ntrees = 50,
nfolds = nfolds,
keep_cross_validation_predictions = TRUE,
seed = 1)
Prd <- h2o.predict(rf_model_Con_h2o,newdata = test_Conf_h2o)
perf <- h2o.performance(rf_model_Con_h2o, newdata = test_Conf_h2o)
print(perf)
Conf_RMSE <- perf@metrics[["RMSE"]]
Conf_R2<-perf@metrics[["r2"]]
############# Fiting Fatalities############
rf_model_Fat_h2o<- h2o.randomForest(x = x,
y = y,
training_frame = train_Fat_h2o,
ntrees = 50,
nfolds = nfolds,
keep_cross_validation_predictions = TRUE,
seed = 1)
Prd <- h2o.predict(rf_model_Fat_h2o,newdata = test_Fat_h2o)
perf <- h2o.performance(rf_model_Fat_h2o, newdata = test_Fat_h2o)
print(perf)
Fat_RMSE <- perf@metrics[["RMSE"]]
Fat_R2<-perf@metrics[["r2"]]
# Show RMSE results
rmse_results <- rbind(rmse_results,tibble(Method = perf@algorithm, Conf_RMSE = Conf_RMSE,Conf_R2=Conf_R2, Fat_RMSE=Fat_RMSE,Fat_R2=Fat_R2, Location=Location))
rmse_results %>% knitr::kable()
###########################################################
# Train & Cross-validate a Gradient Boosting Machine
gbm_model_Con_h2o <- h2o.gbm(x = x,
y = y,
training_frame = train_Con_h2o,
ntrees = 10,
max_depth = 3,
min_rows = 2,
learn_rate = 0.2,
nfolds = nfolds,
keep_cross_validation_predictions = TRUE,
seed = 1)
Prd <- h2o.predict(gbm_model_Con_h2o,newdata = test_Conf_h2o)
perf <- h2o.performance(gbm_model_Con_h2o, newdata = test_Conf_h2o)
print(perf)
Conf_RMSE <- perf@metrics[["RMSE"]]
Conf_R2<-perf@metrics[["r2"]]
###### Fit Fatalities
gbm_model_Fat_h2o <- h2o.gbm(x = x,
y = y,
training_frame = train_Fat_h2o,
ntrees = 10,
max_depth = 3,
min_rows = 2,
learn_rate = 0.2,
nfolds = nfolds,
keep_cross_validation_predictions = TRUE,
seed = 1)
Prd <- h2o.predict(gbm_model_Fat_h2o,newdata = test_Fat_h2o)
perf <- h2o.performance(gbm_model_Fat_h2o, newdata = test_Fat_h2o)
print(perf)
Fat_RMSE <- perf@metrics[["RMSE"]]
Fat_R2<-perf@metrics[["r2"]]
# Show RMSE results
rmse_results <- rbind(rmse_results,tibble(Method = perf@algorithm, Conf_RMSE = Conf_RMSE,Conf_R2=Conf_R2, Fat_RMSE=Fat_RMSE,Fat_R2=Fat_R2, Location=Location))
rmse_results %>% knitr::kable()
### Generalized Linear regression
glm_model_Con_h2o <- h2o.glm(x = x,
y = y,
nfolds = nfolds,
alpha=0.5,
training_frame = train_Con_h2o,
keep_cross_validation_predictions = TRUE,
seed = 1)
Prd <- h2o.predict(glm_model_Con_h2o,newdata = test_Conf_h2o)
perf <- h2o.performance(glm_model_Con_h2o, newdata = test_Conf_h2o)
print(perf)
Conf_RMSE <- perf@metrics[["RMSE"]]
Conf_R2<-perf@metrics[["r2"]]
# Fit Fatalities
glm_model_Fat_h2o <- h2o.glm(x = x,
y = y,
nfolds = nfolds,
alpha=0.5,
training_frame = train_Fat_h2o,
keep_cross_validation_predictions = TRUE,
seed = 1)
Prd <- h2o.predict(glm_model_Fat_h2o,newdata = test_Fat_h2o)
perf <- h2o.performance(glm_model_Fat_h2o, newdata = test_Fat_h2o)
print(perf)
Fat_RMSE <- perf@metrics[["RMSE"]]
Fat_R2<-perf@metrics[["r2"]]
# Show RMSE results
rmse_results <- rbind(rmse_results,tibble(Method = perf@algorithm, Conf_RMSE = Conf_RMSE,Conf_R2=Conf_R2, Fat_RMSE=Fat_RMSE,Fat_R2=Fat_R2, Location=Location))
rmse_results %>% knitr::kable()
######################################################################
#Fit Stack ensembler
# Train a stacked ensemble using the above models
ensemble_model_Con_h2o <- h2o.stackedEnsemble(x = x,
y = y,
training_frame = train_Con_h2o,
base_models = list(glm_model_Con_h2o,gbm_model_Con_h2o,rf_model_Con_h2o))
# Eval ensemble performance on a test set
Prd <- h2o.predict(ensemble_model_Con_h2o,newdata = test_Conf_h2o)
perf <- h2o.performance(ensemble_model_Con_h2o, newdata = test_Conf_h2o)
print(perf)
Conf_RMSE <- perf@metrics[["RMSE"]]
Conf_R2<-perf@metrics[["r2"]]
###### Fit fatalities
# Train a stacked ensemble using the above models
ensemble_model_Fat_h2o <- h2o.stackedEnsemble(x = x,
y = y,
training_frame = train_Fat_h2o,
base_models = list(glm_model_Fat_h2o,gbm_model_Fat_h2o,rf_model_Fat_h2o))
# Eval ensemble performance on a test set
Prd <- h2o.predict(ensemble_model_Fat_h2o,newdata = test_Fat_h2o)
perf <- h2o.performance(ensemble_model_Fat_h2o, newdata = test_Fat_h2o)
print(perf)
Fat_RMSE <- perf@metrics[["RMSE"]]
Fat_R2<-perf@metrics[["r2"]]
# Show RMSE results
rmse_results <- rbind(rmse_results,tibble(Method = perf@algorithm, Conf_RMSE = Conf_RMSE,Conf_R2=Conf_R2, Fat_RMSE=Fat_RMSE,Fat_R2=Fat_R2, Location=Location))
rmse_results %>% knitr::kable()
##################################################################
## Modeling different regions with different Models by selecting 3 places from the top and 3 places from the bottom
fit_Model <- function(Model,train_Con_h2o,test_Con_h2o,train_Fat_h2o, test_Fat_h2o,x="Date",y="TargetValue" )
{
nfolds<-5
# This function will take a model from caret package or "auto.arima" and return a list
# contains Predictions of confirmed and Fatilities in addition to the RMSE of the predictions
if(Model=="drf"){
# fit rf model
rf_model_Con_h2o<- h2o.randomForest(x = x,
y = y,
training_frame = train_Con_h2o,
ntrees = 50,
nfolds = nfolds,
keep_cross_validation_predictions = TRUE,
seed = 1)
#Prd_Con <- h2o.predict(rf_model_Con_h2o,newdata = test_Con_h2o)
perf <- h2o.performance(rf_model_Con_h2o, newdata = test_Con_h2o)
#print(perf)
Conf_RMSE <- perf@metrics[["RMSE"]]
Conf_R2<-perf@metrics[["r2"]]
############# Fiting Fatalities############
rf_model_Fat_h2o<- h2o.randomForest(x = x,
y = y,
training_frame = train_Fat_h2o,
ntrees = 50,
nfolds = nfolds,
keep_cross_validation_predictions = TRUE,
seed = 1)
#Prd_Fat <- h2o.predict(rf_model_Fat_h2o,newdata = test_Fat_h2o)
perf <- h2o.performance(rf_model_Fat_h2o, newdata = test_Fat_h2o)
#print(perf)
Fat_RMSE <- perf@metrics[["RMSE"]]
Fat_R2<-perf@metrics[["r2"]]
}else if(Model=="gbm"){
# Train & Cross-validate a Gradient Boosting Machine
gbm_model_Con_h2o <- h2o.gbm(x = x,
y = y,
training_frame = train_Con_h2o,
ntrees = 10,
max_depth = 3,
min_rows = 2,
learn_rate = 0.2,
nfolds = nfolds,
keep_cross_validation_predictions = TRUE,
seed = 1)
#Prd_Con <- h2o.predict(gbm_model_Con_h2o,newdata = test_Con_h2o)
perf <- h2o.performance(gbm_model_Con_h2o, newdata = test_Con_h2o)
#print(perf)
Conf_RMSE <- perf@metrics[["RMSE"]]
Conf_R2<-perf@metrics[["r2"]]
###### Fit Fatalities
gbm_model_Fat_h2o <- h2o.gbm(x = x,
y = y,
training_frame = train_Fat_h2o,
ntrees = 10,
max_depth = 3,
min_rows = 2,
learn_rate = 0.2,
nfolds = nfolds,
keep_cross_validation_predictions = TRUE,
seed = 1)
#Prd_Fat <- h2o.predict(gbm_model_Fat_h2o,newdata = test_Fat_h2o)
perf <- h2o.performance(gbm_model_Fat_h2o, newdata = test_Fat_h2o)
#print(perf)
Fat_RMSE <- perf@metrics[["RMSE"]]
Fat_R2<-perf@metrics[["r2"]]
}else if(Model=="glm"){
### Generalized Linear regression
glm_model_Con_h2o <- h2o.glm(x = x,
y = y,
nfolds = nfolds,
alpha=0.5,
training_frame = train_Con_h2o,
keep_cross_validation_predictions = TRUE,
seed = 1)
#Prd_Con <- h2o.predict(glm_model_Con_h2o,newdata = test_Con_h2o)
perf <- h2o.performance(glm_model_Con_h2o, newdata = test_Con_h2o)
#print(perf)
Conf_RMSE <- perf@metrics[["RMSE"]]
Conf_R2<-perf@metrics[["r2"]]
# Fit Fatalities
glm_model_Fat_h2o <- h2o.glm(x = x,
y = y,
nfolds = nfolds,
alpha=0.5,
training_frame = train_Fat_h2o,
keep_cross_validation_predictions = TRUE,
seed = 1)
#Prd_Fat <- h2o.predict(glm_model_Fat_h2o,newdata = test_Fat_h2o)
perf <- h2o.performance(glm_model_Fat_h2o, newdata = test_Fat_h2o)
#print(perf)
Fat_RMSE <- perf@metrics[["RMSE"]]
Fat_R2<-perf@metrics[["r2"]]
}
#as.h2o(data.frame(Predicted_Confirmed=as.vector(Prd_Con),Predicted_Fatilities=as.vector(Prd_Fat))),
list(Conf_RMSE=Conf_RMSE,Fat_RMSE=Fat_RMSE,Conf_R2=Conf_R2, Fat_R2=Fat_R2)
}
Apply_Models<-function(Md=Mdls,tr_Con=tr_C_h2o,ts_Con=ts_C_h2o,tr_Fat=tr_F_h2o,ts_Fat=ts_F_h2o,pl=places,x="Date",y="TargetValue"){
lapply(pl,function(p){
tr_Con_CPR <- tr_Con[tr_Con[,"CPR"]==p,]
ts_Con_CPR <-ts_Con[ts_Con[,"CPR"]==p ,]
tr_Fat_CPR <- tr_Fat[tr_Fat[,"CPR"]==p,]
ts_Fat_CPR <- ts_Fat[ts_Fat[,"CPR"]==p,]
lapply(Md,function(M){
fit_Model(M,tr_Con_CPR,ts_Con_CPR,tr_Fat_CPR,ts_Fat_CPR,x="Date",y="TargetValue")
})
})
}
#Lets compare models on diferrent locations
Mdls<- c("gbm","drf","glm") #, ,
# first lets select a sample of 10% of the place where 70% are from USA
SmpSize<-0.1*(length(unique(train_Kag_clean$CPR)))
set.seed(2020)
Sample_US <- sample(train_Conf$CPR[str_detect(train_Conf$CPR,"_US\\b")],round(SmpSize*0.7))
set.seed(2020)
Sample_NotUS<- sample(train_Conf$CPR[!str_detect(train_Conf$CPR,"_US\\b")],round(SmpSize*0.3))
set.seed(2020)
Sample_CPR <-sample(c(as.character(Sample_US) ,as.character(Sample_NotUS)))
Sample_CPR %>% head() %>% knitr::kable()
places <-Sample_CPR
# lets use H2O library to do it in parallel
tr_C_h2o<-as.h2o(select(train_Conf,CPR,Date, TargetValue))
ts_C_h2o<-as.h2o(select(test_Conf,CPR,Date, TargetValue))
tr_F_h2o<-as.h2o(select(train_Fat,CPR,Date, TargetValue))
ts_F_h2o<-as.h2o(select(test_Fat,CPR,Date, TargetValue))
x<-"Date"
y<-"TargetValue"
#### this will take about an hour on modest pc
tic("Total time")
#results <- Apply_Models() # I had already save the results
load("Var/ML_Res2020-05-29_19-35.rda")# if you want to generate your own results please comment this line
toc()
####### Show results
ShowResults_tbl <- function(M=Mdls,p=places, r=results){
RMSE_results<- tibble(Model=character(),Place=character(),Conf_RMSE=numeric(),Fat_RMSE=numeric(),Conf_R2=numeric(),Fat_R2=numeric())
for(j in 1:length(p)){
for (i in 1:length(M)){
RMSE_results <- bind_rows(RMSE_results,tibble( Model = M[i],Place=p[j], Conf_RMSE=r[[j]][[i]][["Conf_RMSE"]], Fat_RMSE=r[[j]][[i]][["Fat_RMSE"]],Conf_R2 = r[[j]][[i]][["Conf_R2"]], Fat_R2=r[[j]][[i]][["Fat_R2"]] ))
}
#RMSE_results <- bind_rows(RMSE_results,tibble( Model = "---------",Place="---------",Conf_RMSE=0000000,Fat_RMSE=000000, Conf_R2 = 0000000, Fat_R2=000000 ))
}
RMSE_results
}
#Res_clean<-ShowResults_tbl() # if you want to generate your own results uncomment this tag
##### Save result to file
#current_DT<-format(Sys.time(), "%Y-%m-%d_%H-%M")
#Resfile<- paste0("Var/ML_Res",current_DT,".rda")
#save(Res_clean,results , file = Resfile)
#########################################################################
########################################################################
Res_clean %>% head() %>% knitr::kable()
Res_clean %>% select(Place,Model,Conf_R2) %>% arrange(Place,desc(Conf_R2))%>% knitr::kable()
Res_clean %>% select(Place,Model,Fat_R2) %>%arrange(Place,desc(Fat_R2))%>% knitr::kable()
# From the results one can infere that there is no model always giving the best R- score
# Lets see which model has the Highest average R2 score
Res_clean %>% select(Place,Model,Conf_R2) %>% group_by (Model)%>%
summarise(Conf_R2=sum(Conf_R2)/n())%>% arrange(desc(Conf_R2))%>% knitr::kable()
Res_clean %>% select(Place,Model,Fat_R2) %>% group_by (Model)%>%
summarise(Fat_R2=sum(Fat_R2)/n())%>% arrange(desc(Fat_R2))%>% knitr::kable()
# It is obvious that Gradient Boosting Machine Model has the best Average R-Score for both
# Confirmed cases and Fatality cases
######################### Final Training
#Now lets train Gradient Boosting Machine on the whole training set
#h2o.init(nthreads = -1,max_mem_size = "7G") #max mem size is the maximum memory to allocate to H2O
#Number of threads -1 means use all cores on your machine
## Prediction Function
Predic_gbm <- function(train_Con_h2o,test_Con_h2o,train_Fat_h2o, test_Fat_h2o,x="Date",y="TargetValue" )
{
nfolds<-5
# Train & Cross-validate a Gradient Boosting Machine
gbm_model_Con_h2o <- h2o.gbm(x = x,
y = y,
training_frame = train_Con_h2o,
ntrees = 10,
max_depth = 3,
min_rows = 2,
learn_rate = 0.2,
nfolds = nfolds,
keep_cross_validation_predictions = TRUE,
seed = 1)
Prd_Con <-h2o.predict(gbm_model_Con_h2o,newdata = test_Con_h2o)
#Mean Absolute Error
mae_Con <-gbm_model_Con_h2o@model[["training_metrics"]]@metrics[["mae"]]
RMSE_Con <-gbm_model_Con_h2o@model[["training_metrics"]]@metrics[["RMSE"]]
###### Fit Fatalities
gbm_model_Fat_h2o <- h2o.gbm(x = x,
y = y,
training_frame = train_Fat_h2o,
ntrees = 10,
max_depth = 3,
min_rows = 2,
learn_rate = 0.2,
nfolds = nfolds,
keep_cross_validation_predictions = TRUE,
seed = 1)
Prd_Fat <- h2o.predict(gbm_model_Fat_h2o,newdata = test_Fat_h2o)
##Mean Absolute Error
mae_Fat<-gbm_model_Fat_h2o@model[["training_metrics"]]@metrics[["mae"]]
RMSE_Fat <-gbm_model_Fat_h2o@model[["training_metrics"]]@metrics[["RMSE"]]
list(data.frame(Predicted_Confirmed=as.vector(Prd_Con),Predicted_Fatilities=as.vector(Prd_Fat)),mae_Con=mae_Con,mae_Fat=mae_Fat, RMSE_Con=RMSE_Con,RMSE_Fat=RMSE_Fat)
}
Apply_CPR<-function(tr_Con=tr_C_h2o,ts_Con=ts_C_h2o,tr_Fat=tr_F_h2o,ts_Fat=ts_F_h2o,pl=places,x="Date",y="TargetValue"){
#tr=train
#ts=test
#pl= places
#Md=Mdls
# register parallel processing with all cores
#cl <- makeCluster(detectCores(), type='PSOCK')
#registerDoParallel(cl)
lapply(pl,function(p){
tr_Con_CPR <- tr_Con[tr_Con[,"CPR"]==p,]
ts_Con_CPR <-ts_Con[ts_Con[,"CPR"]==p ,]
tr_Fat_CPR <- tr_Fat[tr_Fat[,"CPR"]==p,]
ts_Fat_CPR <- ts_Fat[ts_Fat[,"CPR"]==p,]
Predic_gbm(tr_Con_CPR,ts_Con_CPR,tr_Fat_CPR,ts_Fat_CPR,x="Date",y="TargetValue")
})
# turn parallel processing off and run sequentially again:
#registerDoSEQ()
}
train_Kag_Con<- train_Kag_clean %>% dplyr::filter(Target=="ConfirmedCases")
test_Kag_Con<- test_Kag_clean %>% dplyr::filter(Target=="ConfirmedCases")
train_Kag_Fat <-train_Kag_clean %>% dplyr::filter(Target=="Fatalities")
test_Kag_Fat <-test_Kag_clean %>% dplyr::filter(Target=="Fatalities")
tr_C_h2o<-as.h2o(select(train_Kag_Con,CPR,Date, TargetValue))
ts_C_h2o<-as.h2o(select(test_Kag_Con,CPR,Date))
tr_F_h2o<-as.h2o(select(train_Kag_Fat,CPR,Date, TargetValue))
ts_F_h2o<-as.h2o(select(test_Kag_Fat,CPR,Date))
x<-"Date"
y<-"TargetValue"
All_CPR<- as.vector(unique(test_Kag_clean$CPR))
places<-All_CPR
#### this will take about an hour on modest pc
tic("Total time")
#gmb_Res <- Apply_CPR() # I had already save the results
load("Var/gmb_Res2020-05-30_19-17.rda")# if you want to generate your own results please comment this line
toc() #Total time: 5543.53 sec elapsed
##### Save result to file
#current_DT<-format(Sys.time(), "%Y-%m-%d_%H-%M")
#Resfile<- paste0("Var/gmb_Res",current_DT,".rda")
#save(gmb_Res , file = Resfile)
#### Process Prediction Results
predicted_results<- tibble(ForecastId=numeric(),CPR=character(),ConfirmedCases=numeric(),Fatalities=numeric(),mae_Con=numeric(),mae_Fat=numeric())
Predlength<-length(gmb_Res[[1]][[1]][["Predicted_Confirmed"]])
for(i in 1:length(gmb_Res)){
CPR_i<- rep(All_CPR[i],Predlength)
predicted_results <- bind_rows(predicted_results,tibble(ForecastId=i,
CPR=CPR_i,
ConfirmedCases=gmb_Res[[i]][[1]][["Predicted_Confirmed"]],
Fatalities=gmb_Res[[i]][[1]][["Predicted_Fatilities"]],
mae_Con=rep(gmb_Res[[i]][["mae_Con"]],Predlength),
mae_Fat=rep(gmb_Res[[i]][["mae_Fat"]],Predlength)))
}
predicted <- predicted_results %>% pivot_longer(c("ConfirmedCases","Fatalities"), names_to = "Target", values_to = "TargetValue") %>%
arrange(ForecastId) %>% mutate(ForecastId=seq(1:nrow(.))) %>% left_join(select(test_Kag,Date,ForecastId), by="ForecastId")
predicted <-as.data.frame(predicted)
# adjust columns to be according to the follwing structure "ForecastId", "q_0.05","q_0.5","q_0.95"
### use mid quantile only for predicted data
Predicted_C <- predicted %>% dplyr::filter(Target=="ConfirmedCases") %>%
select(ForecastId,mae_Con,TargetValue,Date)%>%
mutate(q_0.05=qnorm(0.05,TargetValue,mae_Con),
q_0.5=TargetValue,
q_0.95=qnorm(0.95,TargetValue,mae_Con))%>%
select(-mae_Con,-TargetValue)
Predicted_F <- predicted %>% dplyr::filter(Target=="Fatalities") %>%
select(ForecastId,mae_Fat,TargetValue,Date)%>%
mutate(q_0.05=qnorm(0.05,TargetValue,mae_Fat),
q_0.5=TargetValue,
q_0.95=qnorm(0.95,
TargetValue,mae_Fat))%>%
select(-mae_Fat,-TargetValue)
#############################################
#Evaluate using Pinball evaluation
#Pinball evaluation performance
pinball <- function(true_array, predicted_array, tau, weight){
array <- predicted_array * (predicted_array > 0)
abs_diff <- abs(true_array - array)
result <- abs_diff * (1 -tau) * (array > true_array) + abs_diff * (tau) * (array <= true_array)
result <- (mean(result)) * weight
return (mean(result))
}
Avg_loss<- function(true_array, mean_array, min_array, max_array, weights){
result <- (pinball(true_array, max_array, 0.95, weights) +
pinball(true_array, min_array, 0.05, weights) +
pinball(true_array, mean_array, 0.5, weights))
return (result / 3)
}
# Dates intersection between training and testing
minDate<-min(test_Kag$Date)
maxDate<-max(train_Kag$Date)
# Actual Values from original Trian Kaggle Dataset
TargetVal_C_trained <- train_Kag %>% dplyr::filter(Target=="ConfirmedCases" & Date>= minDate)%>% .$TargetValue
TargetVal_F_trained <- train_Kag %>% dplyr::filter (Target=="Fatalities" & Date>= minDate) %>% .$TargetValue
weights_C <-train_Kag %>% dplyr::filter(Target=="ConfirmedCases" & Date>= minDate)%>% .$Weight
weights_F <-train_Kag %>% dplyr::filter(Target=="Fatalities" & Date>= minDate)%>% .$Weight
# prepare masks for intersection period between training and testing
Prd_Dt_Msk_C<- Predicted_C[,"Date"]<= maxDate & Predicted_C[,"Date"]>= minDate
Prd_Dt_Msk_F<- Predicted_F[,"Date"]<= maxDate & Predicted_F[,"Date"]>= minDate
# Pinball results for Confirmed Cases
Pinball_Results_Con <- Avg_loss(TargetVal_C_trained,
Predicted_C[Prd_Dt_Msk_C,]$q_0.5,
Predicted_C[Prd_Dt_Msk_C,]$q_0.05,
Predicted_C[Prd_Dt_Msk_C,]$q_0.95,
weights_C)
print(paste("Pinball Result for Confirmed Cases:",Pinball_Results_Con))
# Pinball Results for Fatalities
Pinball_Results_Fat <- Avg_loss(TargetVal_F_trained,
Predicted_F[Prd_Dt_Msk_F,]$q_0.5,
Predicted_F[Prd_Dt_Msk_F,]$q_0.05,
Predicted_F[Prd_Dt_Msk_F,]$q_0.95,
weights_F)
print(paste("Pinball Result for Fatality Cases:",Pinball_Results_Fat))
######################### Plot Prediction Results
# Italy Results
Italy_C_IdDate<-test_Kag %>% dplyr::filter(Country_Region=="Italy" & Target=="ConfirmedCases" & Date <=maxDate) %>% select(ForecastId,Date)
Italy_F_IdDate<-test_Kag %>% dplyr::filter(Country_Region=="Italy" & Target=="Fatalities" & Date <=maxDate) %>% select(ForecastId,Date)
Italy_C_test<-train_Kag %>% dplyr::filter(Country_Region=="Italy" & Target=="ConfirmedCases" & Date >=minDate) %>% select(TargetValue,Date)
Italy_F_test<-train_Kag %>% dplyr::filter(Country_Region=="Italy" & Target=="Fatalities"& Date >=minDate) %>% select(TargetValue,Date)
Prd_It_C<-Predicted_C%>% select(-Date)%>%inner_join(Italy_C_IdDate, by="ForecastId")
Prd_It_F<-Predicted_F%>% select(-Date)%>%inner_join(Italy_F_IdDate, by="ForecastId")
ggplot(aes(x=Date), data=Prd_It_C) +
geom_line(aes(y=q_0.5), col="blue",data=Prd_It_C) +
geom_line(aes(y=TargetValue), data =Italy_C_test, col="red" ) +
labs(colour="TargetValue",
x="Date",
y="Confirmed Cases") +
scale_color_manual(name="", values = c("red","blue")) +
scale_fill_manual(name="", values=c("red","blue"))+
ggtitle("Actual vs Prediction For Italy confirmed cases")
ggplot(aes(x=Date), data=Prd_It_F) +
geom_line(aes(y=q_0.5), col="blue",data=Prd_It_F) +
geom_line(aes(y=TargetValue), data =Italy_F_test, col="red" ) +
labs(colour="TargetValue",
x="Date",
y="Fatality Cases") +
scale_color_manual(name="", values = c("red","blue")) +
scale_fill_manual(name="", values=c("red","blue"))+
ggtitle("Actual vs Prediction For Italy Fatality cases")
#################### Set training Targeet Values to their original known values
Predicted_C[Prd_Dt_Msk_C,]$q_0.5 <-TargetVal_C_trained
Predicted_C[Prd_Dt_Msk_C,]$q_0.05 <-TargetVal_C_trained
Predicted_C[Prd_Dt_Msk_C,]$q_0.95 <-TargetVal_C_trained
Predicted_F[Prd_Dt_Msk_F,]$q_0.5 <-TargetVal_F_trained
Predicted_F[Prd_Dt_Msk_F,]$q_0.05 <-TargetVal_F_trained
Predicted_F[Prd_Dt_Msk_F,]$q_0.95 <-TargetVal_F_trained
#########################################################
## Submit Results
# Submission function
Submit <- function(test_c,test_f){
#test_c <-Predicted_C
#test_f <- Predicted_F
# expected test_x to have "ForecastId", "q_0.05","q_0.5","q_0.95" columns
test_full <- rbind.data.frame(test_c,test_f) %>% select(ForecastId,q_0.05,q_0.5,q_0.95) %>%
"colnames<-"(c("Id", "0.05","0.5","0.95")) %>% arrange(Id)
# prepare and join submission dataset
sub_temp <- sub_Kag %>% separate(ForecastId_Quantile,c("Id","q"),"_") %>%
pivot_wider(names_from=q,values_from=TargetValue ) %>% select(Id) %>% mutate(Id=as.numeric(Id)) %>%
left_join(test_full, by="Id") %>%
pivot_longer("0.05":"0.95",names_to = "Quantile",values_to = "TargetValue") %>%
mutate(Id=as.character(Id)) %>%unite("_", Id:Quantile) %>% rename("ForecastId_Quantile"="_")
current_DT<-format(Sys.time(), "%Y-%m-%d_%H-%M")
Subfile<- paste0("Output/submission",current_DT,".csv")
write.csv(sub_temp,Subfile,row.names = FALSE)
}
####################################################
Submit(select(Predicted_C,-Date),select(Predicted_F,-Date))
#################################################
# shutdown H2o Server
h2o.shutdown(prompt = TRUE)
|
/Time_SeriesModeling_Final_H2o.R
|
no_license
|
ahmabboud/COVID-19_Cyberthreats
|
R
| false
| false
| 33,698
|
r
|
## Time Series Modeling
# Install required Libraries if necessary
list.of.packages <- c("caret", "dplyr","Boruta","mlbench",
"tidyr","fUnitRoots","FitAR","forecast",
"stringr","Metrics","tictoc","MLmetrics","h2o","opera","urca")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages)
#Load required libraries
library(lmtest)
library("fUnitRoots")
library(FitAR)
library("forecast")
library(caret)
library(dplyr)
library(Boruta)
library(mlbench)
library(tidyr)
library(stringr)
library(Metrics)
library(opera)
library(urca)
library(tictoc)
library(rlist)
library(MLmetrics)
library(doParallel)
library(h2o)
train_KagCsv <- read.csv("Datasets/Week5/train.csv", na.strings = c("", "NA"))
train_Kag<- as.data.frame(train_KagCsv)
train_Kag <- train_Kag %>% mutate(Date=as.Date(Date))
test_KagCsv <- read.csv("Datasets/Week5/test.csv", na.strings = c("", "NA"))
test_Kag <- as.data.frame(test_KagCsv)
test_Kag <- test_Kag %>% mutate(Date=as.Date(Date))
sub_KagCsv <- read.csv("Datasets/Week5/submission.csv", na.strings = c("", "NA"))
sub_Kag <- as.data.frame(sub_KagCsv)
minDate <- min(train_Kag$Date)
# Clean Data
sum(train_Kag$TargetValue < 0)
train_Kag$TargetValue <- ifelse(train_Kag$TargetValue<0,abs(train_Kag$TargetValue),train_Kag$TargetValue)
sum(train_Kag$TargetValue < 0)
#Add lifting to the to all Target Values to insure it is away from zero and away from constant
lift<-10
train_Kag$TargetValue <-train_Kag$TargetValue + rnorm(length(train_Kag$TargetValue),mean=lift,sd=1)
train_Kag_clean <- train_Kag %>%
mutate(CPR=factor(paste(County,Province_State ,Country_Region,sep = "_"))) %>%
mutate(days = as.numeric(Date-min(Date)), Wday=factor(weekdays(Date)),month=factor(as.numeric(format(Date,"%m"))))
levels(train_Kag_clean$Wday) <-c(6,2,7,1,5,3,4) # Start from sunday =1
train_Kag_clean%>% head(10)%>% knitr::kable()
test_Kag_clean <- test_Kag %>%
mutate(CPR=factor(paste(County,Province_State ,Country_Region,sep = "_"))) %>%
mutate(days = as.numeric(Date-min(Date)), Wday=factor(weekdays(Date)),month=factor(as.numeric(format(Date,"%m"))))
levels(test_Kag_clean$Wday) <-c(6,2,7,1,5,3,4) # Start from sund =1
test_Kag_clean%>% head(10) %>% knitr::kable()
# plot confirmed cases for one region
Conf_US <- train_Kag_clean %>% dplyr::filter(Target=="ConfirmedCases" & CPR=="NA_NA_Italy") %>%
group_by(CPR,Date) %>% summarise(TargetValue=sum(TargetValue)) %>%
select(Date,TargetValue)
par(mar = c(2, 2, 2, 2))
plot(Conf_US$Date,Conf_US$TargetValue)
# plot Fat cases for one region
Fat_US<-train_Kag_clean %>% dplyr::filter(Target=="Fatalities" & CPR=="NA_NA_Italy") %>%
group_by(CPR,Date) %>% summarise(TargetValue=sum(TargetValue)) %>%
select(Date,TargetValue)
plot(Fat_US$Date,Fat_US$TargetValue)
# Split Kaggle train Data for local train test
ForecastDate <- as.Date("2020-05-08") # one weeks forecast
StartDate <- as.Date("2020-03-01") # since there is a tall tail at the begining for most counties
indx <- train_Kag_clean$Date < ForecastDate & train_Kag_clean$Date>StartDate
ts_indx <-train_Kag_clean$Date > ForecastDate
train <- train_Kag_clean[indx,]
test <- train_Kag_clean[ts_indx,]
# Split Confirmed and Fatality cases and add accumulated sum
train_Conf <- train %>% dplyr::filter(Target=="ConfirmedCases") %>%
group_by(CPR) %>% mutate(accSum=cumsum(TargetValue))%>% ungroup()
train_Fat <- train %>% dplyr::filter(Target=="Fatalities") %>%
group_by(CPR) %>% mutate(accSum=cumsum(TargetValue))%>% ungroup()
test_Conf <- test%>% dplyr::filter(Target=="ConfirmedCases") %>%
group_by(CPR) %>% mutate(accSum=cumsum(TargetValue))%>% ungroup()
test_Fat <- test %>% dplyr::filter(Target=="Fatalities") %>%
group_by(CPR) %>% mutate(accSum=cumsum(TargetValue))%>% ungroup()
# Lets see top 10 Confirmed cases county
top10 <- train_Conf %>% group_by(CPR) %>% summarise(TargetValue=sum(TargetValue)) %>%
arrange(desc(TargetValue)) %>% dplyr::filter(str_detect(CPR,"_US\\b"))%>% head(10)
top10%>% knitr::kable()
# Lets View Bottom 10 with at least 100 Target Value
bot10 <- train_Conf%>% dplyr::filter(TargetValue > 100) %>% group_by(CPR) %>% summarise(TargetValue=sum(TargetValue)) %>%
arrange(TargetValue)
bot10%>% knitr::kable()
######################################
# Case study US Confirmed cases
train_Conf_USA <- train_Conf %>% dplyr::filter(CPR=="NA_NA_US")
#View(train_Conf_USA%>%head(10))
test_Conf_USA <- test_Conf %>% dplyr::filter(CPR=="NA_NA_US")
#View(train_Conf_USA)
train_Fat_USA <- train_Fat %>% dplyr::filter(CPR=="NA_NA_US")
test_Fat_USA <- test_Fat %>% dplyr::filter(CPR=="NA_NA_US")
tsTrConf<- ts(select(data.frame(train_Conf_USA),TargetValue),start=c(2020,3,1),frequency = 365) # consider weekly data
plot(tsTrConf)
#detemine stationarity of data by urkpss Test
my_urkpssTest <- function (x, type, lags, use.lag, doplot) {
x <- as.vector(x)
urca <- urca::ur.kpss(x, type = type[1], lags = lags[1], use.lag = use.lag)
output = capture.output(urca::summary(urca))[-(1:4)]
output = output[-length(output)]
for (i in 1:length(output)) output[i] = paste(" ", output[i])
ans = list(name = "ur.kpss", test = urca, output = output)
if (doplot)
#plot(urca)
new("fHTEST", call = match.call(), data = list(x = x),
test = ans, title = "KPSS Unit Root Test", description = description())
}
my_urkpssTest(tsTrConf, type = c("tau"), lags = c("long"),
use.lag = NULL, doplot = TRUE)
# From the results it is clear that "Value of test-statistic" > "Critical value for a significance level of 5%"
# Thus the null hypothesis of stationarity is rejected.
tsstationary<-diff(tsTrConf, differences=1)
plot(tsstationary)
acf(tsTrConf,lag.max=34)
######
## Feature Selection Analysis
train_Con_dt<-select(train_Conf_USA,TargetValue,Date,days,Wday)
train_Fat_dt<-select(train_Fat_USA,TargetValue,Date,days,Wday)
# We Can Use Boruta library to rank features
set.seed(2020)
Br_Con <- Boruta(TargetValue ~., data=train_Con_dt, doTrace=2, maxRuns=50)
print(Br_Con)
plot(Br_Con,las=2,cex.axis=0.7) # las: to make lables vertical, cex.axis for scaling view
# We can see that only the Day and days features are important for estimating TargetValue in Confirmed Cases
# Now lets explore Fatality cases
set.seed(2020)
Br_Fat <- Boruta(TargetValue ~., data=train_Fat_dt, doTrace=2, maxRuns=50)
print(Br_Fat)
plot(Br_Fat,las=2,cex.axis=0.7) # las: to make lables vertical, cex.axis for scaling view
#We get approximately the same results on Fatality cases
#########################################################
#############################
## Modeleling
Location <- "US"
fit <- auto.arima(select(data.frame(train_Con_dt) ,TargetValue))
acc<- forecast::accuracy(fit)
acc
training_Con_RMSE<-acc[1,2]
Forc <- forecast(fit,h=nrow(test_Conf_USA))
plot(Forc)
# calculate RMSE
ForcMean<- as.numeric(Forc$mean)
Conf_RMSE<-rmse(ForcMean,test_Conf_USA$TargetValue)
Conf_R2 <-R2_Score(ForcMean,test_Conf_USA$TargetValue)
fit <- auto.arima(select(data.frame(train_Fat_dt) ,TargetValue))
Forc <- forecast(fit,h=nrow(test_Fat_USA))
par(mar=c(2,2,2,2))
plot(Forc)
# calculate RMSE
ForcMean<- as.numeric(Forc$mean)
Fat_RMSE<-rmse(ForcMean,test_Fat_USA$TargetValue) #1800.502
Fat_R2 <-R2_Score(ForcMean,test_Fat_USA$TargetValue)
# Show RMSE results
rmse_results <- tibble(Method = "Auto-ARIMA", Conf_RMSE = Conf_RMSE, Fat_RMSE=Fat_RMSE,Conf_R2=Conf_R2,Fat_R2=Fat_R2, Location=Location)
rmse_results %>% knitr::kable()
##########################################################
## Using H2O library to perform the task in parallel way
h2o.init(nthreads = 2, #Number of threads -1 means use all cores on your machine
max_mem_size = "7G") #max mem size is the maximum memory to allocate to H2O
train_Con_h2o <- as.h2o(train_Con_dt)
train_Fat_h2o <- as.h2o(train_Fat_dt)
test_Conf_h2o<-as.h2o(test_Conf_USA)
test_Fat_h2o <- as.h2o(test_Fat_USA)
x<-"days"
y<-"TargetValue"
nfolds<-5
# fit rf model
rf_model_Con_h2o<- h2o.randomForest(x = x,
y = y,
training_frame = train_Con_h2o,
ntrees = 50,
nfolds = nfolds,
keep_cross_validation_predictions = TRUE,
seed = 1)
Prd <- h2o.predict(rf_model_Con_h2o,newdata = test_Conf_h2o)
perf <- h2o.performance(rf_model_Con_h2o, newdata = test_Conf_h2o)
print(perf)
Conf_RMSE <- perf@metrics[["RMSE"]]
Conf_R2<-perf@metrics[["r2"]]
############# Fiting Fatalities############
rf_model_Fat_h2o<- h2o.randomForest(x = x,
y = y,
training_frame = train_Fat_h2o,
ntrees = 50,
nfolds = nfolds,
keep_cross_validation_predictions = TRUE,
seed = 1)
Prd <- h2o.predict(rf_model_Fat_h2o,newdata = test_Fat_h2o)
perf <- h2o.performance(rf_model_Fat_h2o, newdata = test_Fat_h2o)
print(perf)
Fat_RMSE <- perf@metrics[["RMSE"]]
Fat_R2<-perf@metrics[["r2"]]
# Show RMSE results
rmse_results <- rbind(rmse_results,tibble(Method = perf@algorithm, Conf_RMSE = Conf_RMSE,Conf_R2=Conf_R2, Fat_RMSE=Fat_RMSE,Fat_R2=Fat_R2, Location=Location))
rmse_results %>% knitr::kable()
###########################################################
# Train & Cross-validate a Gradient Boosting Machine
gbm_model_Con_h2o <- h2o.gbm(x = x,
y = y,
training_frame = train_Con_h2o,
ntrees = 10,
max_depth = 3,
min_rows = 2,
learn_rate = 0.2,
nfolds = nfolds,
keep_cross_validation_predictions = TRUE,
seed = 1)
Prd <- h2o.predict(gbm_model_Con_h2o,newdata = test_Conf_h2o)
perf <- h2o.performance(gbm_model_Con_h2o, newdata = test_Conf_h2o)
print(perf)
Conf_RMSE <- perf@metrics[["RMSE"]]
Conf_R2<-perf@metrics[["r2"]]
###### Fit Fatalities
gbm_model_Fat_h2o <- h2o.gbm(x = x,
y = y,
training_frame = train_Fat_h2o,
ntrees = 10,
max_depth = 3,
min_rows = 2,
learn_rate = 0.2,
nfolds = nfolds,
keep_cross_validation_predictions = TRUE,
seed = 1)
Prd <- h2o.predict(gbm_model_Fat_h2o,newdata = test_Fat_h2o)
perf <- h2o.performance(gbm_model_Fat_h2o, newdata = test_Fat_h2o)
print(perf)
Fat_RMSE <- perf@metrics[["RMSE"]]
Fat_R2<-perf@metrics[["r2"]]
# Show RMSE results
rmse_results <- rbind(rmse_results,tibble(Method = perf@algorithm, Conf_RMSE = Conf_RMSE,Conf_R2=Conf_R2, Fat_RMSE=Fat_RMSE,Fat_R2=Fat_R2, Location=Location))
rmse_results %>% knitr::kable()
### Generalized Linear regression
glm_model_Con_h2o <- h2o.glm(x = x,
y = y,
nfolds = nfolds,
alpha=0.5,
training_frame = train_Con_h2o,
keep_cross_validation_predictions = TRUE,
seed = 1)
Prd <- h2o.predict(glm_model_Con_h2o,newdata = test_Conf_h2o)
perf <- h2o.performance(glm_model_Con_h2o, newdata = test_Conf_h2o)
print(perf)
Conf_RMSE <- perf@metrics[["RMSE"]]
Conf_R2<-perf@metrics[["r2"]]
# Fit Fatalities
glm_model_Fat_h2o <- h2o.glm(x = x,
y = y,
nfolds = nfolds,
alpha=0.5,
training_frame = train_Fat_h2o,
keep_cross_validation_predictions = TRUE,
seed = 1)
Prd <- h2o.predict(glm_model_Fat_h2o,newdata = test_Fat_h2o)
perf <- h2o.performance(glm_model_Fat_h2o, newdata = test_Fat_h2o)
print(perf)
Fat_RMSE <- perf@metrics[["RMSE"]]
Fat_R2<-perf@metrics[["r2"]]
# Show RMSE results
rmse_results <- rbind(rmse_results,tibble(Method = perf@algorithm, Conf_RMSE = Conf_RMSE,Conf_R2=Conf_R2, Fat_RMSE=Fat_RMSE,Fat_R2=Fat_R2, Location=Location))
rmse_results %>% knitr::kable()
######################################################################
#Fit Stack ensembler
# Train a stacked ensemble using the above models
ensemble_model_Con_h2o <- h2o.stackedEnsemble(x = x,
y = y,
training_frame = train_Con_h2o,
base_models = list(glm_model_Con_h2o,gbm_model_Con_h2o,rf_model_Con_h2o))
# Eval ensemble performance on a test set
Prd <- h2o.predict(ensemble_model_Con_h2o,newdata = test_Conf_h2o)
perf <- h2o.performance(ensemble_model_Con_h2o, newdata = test_Conf_h2o)
print(perf)
Conf_RMSE <- perf@metrics[["RMSE"]]
Conf_R2<-perf@metrics[["r2"]]
###### Fit fatalities
# Train a stacked ensemble using the above models
ensemble_model_Fat_h2o <- h2o.stackedEnsemble(x = x,
y = y,
training_frame = train_Fat_h2o,
base_models = list(glm_model_Fat_h2o,gbm_model_Fat_h2o,rf_model_Fat_h2o))
# Eval ensemble performance on a test set
Prd <- h2o.predict(ensemble_model_Fat_h2o,newdata = test_Fat_h2o)
perf <- h2o.performance(ensemble_model_Fat_h2o, newdata = test_Fat_h2o)
print(perf)
Fat_RMSE <- perf@metrics[["RMSE"]]
Fat_R2<-perf@metrics[["r2"]]
# Show RMSE results
rmse_results <- rbind(rmse_results,tibble(Method = perf@algorithm, Conf_RMSE = Conf_RMSE,Conf_R2=Conf_R2, Fat_RMSE=Fat_RMSE,Fat_R2=Fat_R2, Location=Location))
rmse_results %>% knitr::kable()
##################################################################
## Modeling different regions with different Models by selecting 3 places from the top and 3 places from the bottom
fit_Model <- function(Model,train_Con_h2o,test_Con_h2o,train_Fat_h2o, test_Fat_h2o,x="Date",y="TargetValue" )
{
nfolds<-5
# This function will take a model from caret package or "auto.arima" and return a list
# contains Predictions of confirmed and Fatilities in addition to the RMSE of the predictions
if(Model=="drf"){
# fit rf model
rf_model_Con_h2o<- h2o.randomForest(x = x,
y = y,
training_frame = train_Con_h2o,
ntrees = 50,
nfolds = nfolds,
keep_cross_validation_predictions = TRUE,
seed = 1)
#Prd_Con <- h2o.predict(rf_model_Con_h2o,newdata = test_Con_h2o)
perf <- h2o.performance(rf_model_Con_h2o, newdata = test_Con_h2o)
#print(perf)
Conf_RMSE <- perf@metrics[["RMSE"]]
Conf_R2<-perf@metrics[["r2"]]
############# Fiting Fatalities############
rf_model_Fat_h2o<- h2o.randomForest(x = x,
y = y,
training_frame = train_Fat_h2o,
ntrees = 50,
nfolds = nfolds,
keep_cross_validation_predictions = TRUE,
seed = 1)
#Prd_Fat <- h2o.predict(rf_model_Fat_h2o,newdata = test_Fat_h2o)
perf <- h2o.performance(rf_model_Fat_h2o, newdata = test_Fat_h2o)
#print(perf)
Fat_RMSE <- perf@metrics[["RMSE"]]
Fat_R2<-perf@metrics[["r2"]]
}else if(Model=="gbm"){
# Train & Cross-validate a Gradient Boosting Machine
gbm_model_Con_h2o <- h2o.gbm(x = x,
y = y,
training_frame = train_Con_h2o,
ntrees = 10,
max_depth = 3,
min_rows = 2,
learn_rate = 0.2,
nfolds = nfolds,
keep_cross_validation_predictions = TRUE,
seed = 1)
#Prd_Con <- h2o.predict(gbm_model_Con_h2o,newdata = test_Con_h2o)
perf <- h2o.performance(gbm_model_Con_h2o, newdata = test_Con_h2o)
#print(perf)
Conf_RMSE <- perf@metrics[["RMSE"]]
Conf_R2<-perf@metrics[["r2"]]
###### Fit Fatalities
gbm_model_Fat_h2o <- h2o.gbm(x = x,
y = y,
training_frame = train_Fat_h2o,
ntrees = 10,
max_depth = 3,
min_rows = 2,
learn_rate = 0.2,
nfolds = nfolds,
keep_cross_validation_predictions = TRUE,
seed = 1)
#Prd_Fat <- h2o.predict(gbm_model_Fat_h2o,newdata = test_Fat_h2o)
perf <- h2o.performance(gbm_model_Fat_h2o, newdata = test_Fat_h2o)
#print(perf)
Fat_RMSE <- perf@metrics[["RMSE"]]
Fat_R2<-perf@metrics[["r2"]]
}else if(Model=="glm"){
### Generalized Linear regression
glm_model_Con_h2o <- h2o.glm(x = x,
y = y,
nfolds = nfolds,
alpha=0.5,
training_frame = train_Con_h2o,
keep_cross_validation_predictions = TRUE,
seed = 1)
#Prd_Con <- h2o.predict(glm_model_Con_h2o,newdata = test_Con_h2o)
perf <- h2o.performance(glm_model_Con_h2o, newdata = test_Con_h2o)
#print(perf)
Conf_RMSE <- perf@metrics[["RMSE"]]
Conf_R2<-perf@metrics[["r2"]]
# Fit Fatalities
glm_model_Fat_h2o <- h2o.glm(x = x,
y = y,
nfolds = nfolds,
alpha=0.5,
training_frame = train_Fat_h2o,
keep_cross_validation_predictions = TRUE,
seed = 1)
#Prd_Fat <- h2o.predict(glm_model_Fat_h2o,newdata = test_Fat_h2o)
perf <- h2o.performance(glm_model_Fat_h2o, newdata = test_Fat_h2o)
#print(perf)
Fat_RMSE <- perf@metrics[["RMSE"]]
Fat_R2<-perf@metrics[["r2"]]
}
#as.h2o(data.frame(Predicted_Confirmed=as.vector(Prd_Con),Predicted_Fatilities=as.vector(Prd_Fat))),
list(Conf_RMSE=Conf_RMSE,Fat_RMSE=Fat_RMSE,Conf_R2=Conf_R2, Fat_R2=Fat_R2)
}
Apply_Models<-function(Md=Mdls,tr_Con=tr_C_h2o,ts_Con=ts_C_h2o,tr_Fat=tr_F_h2o,ts_Fat=ts_F_h2o,pl=places,x="Date",y="TargetValue"){
lapply(pl,function(p){
tr_Con_CPR <- tr_Con[tr_Con[,"CPR"]==p,]
ts_Con_CPR <-ts_Con[ts_Con[,"CPR"]==p ,]
tr_Fat_CPR <- tr_Fat[tr_Fat[,"CPR"]==p,]
ts_Fat_CPR <- ts_Fat[ts_Fat[,"CPR"]==p,]
lapply(Md,function(M){
fit_Model(M,tr_Con_CPR,ts_Con_CPR,tr_Fat_CPR,ts_Fat_CPR,x="Date",y="TargetValue")
})
})
}
#Lets compare models on diferrent locations
Mdls<- c("gbm","drf","glm") #, ,
# first lets select a sample of 10% of the place where 70% are from USA
SmpSize<-0.1*(length(unique(train_Kag_clean$CPR)))
set.seed(2020)
Sample_US <- sample(train_Conf$CPR[str_detect(train_Conf$CPR,"_US\\b")],round(SmpSize*0.7))
set.seed(2020)
Sample_NotUS<- sample(train_Conf$CPR[!str_detect(train_Conf$CPR,"_US\\b")],round(SmpSize*0.3))
set.seed(2020)
Sample_CPR <-sample(c(as.character(Sample_US) ,as.character(Sample_NotUS)))
Sample_CPR %>% head() %>% knitr::kable()
places <-Sample_CPR
# lets use H2O library to do it in parallel
tr_C_h2o<-as.h2o(select(train_Conf,CPR,Date, TargetValue))
ts_C_h2o<-as.h2o(select(test_Conf,CPR,Date, TargetValue))
tr_F_h2o<-as.h2o(select(train_Fat,CPR,Date, TargetValue))
ts_F_h2o<-as.h2o(select(test_Fat,CPR,Date, TargetValue))
x<-"Date"
y<-"TargetValue"
#### this will take about an hour on modest pc
tic("Total time")
#results <- Apply_Models() # I had already save the results
load("Var/ML_Res2020-05-29_19-35.rda")# if you want to generate your own results please comment this line
toc()
####### Show results
ShowResults_tbl <- function(M=Mdls,p=places, r=results){
RMSE_results<- tibble(Model=character(),Place=character(),Conf_RMSE=numeric(),Fat_RMSE=numeric(),Conf_R2=numeric(),Fat_R2=numeric())
for(j in 1:length(p)){
for (i in 1:length(M)){
RMSE_results <- bind_rows(RMSE_results,tibble( Model = M[i],Place=p[j], Conf_RMSE=r[[j]][[i]][["Conf_RMSE"]], Fat_RMSE=r[[j]][[i]][["Fat_RMSE"]],Conf_R2 = r[[j]][[i]][["Conf_R2"]], Fat_R2=r[[j]][[i]][["Fat_R2"]] ))
}
#RMSE_results <- bind_rows(RMSE_results,tibble( Model = "---------",Place="---------",Conf_RMSE=0000000,Fat_RMSE=000000, Conf_R2 = 0000000, Fat_R2=000000 ))
}
RMSE_results
}
#Res_clean<-ShowResults_tbl() # if you want to generate your own results uncomment this tag
##### Save result to file
#current_DT<-format(Sys.time(), "%Y-%m-%d_%H-%M")
#Resfile<- paste0("Var/ML_Res",current_DT,".rda")
#save(Res_clean,results , file = Resfile)
#########################################################################
########################################################################
Res_clean %>% head() %>% knitr::kable()
Res_clean %>% select(Place,Model,Conf_R2) %>% arrange(Place,desc(Conf_R2))%>% knitr::kable()
Res_clean %>% select(Place,Model,Fat_R2) %>%arrange(Place,desc(Fat_R2))%>% knitr::kable()
# From the results one can infere that there is no model always giving the best R- score
# Lets see which model has the Highest average R2 score
Res_clean %>% select(Place,Model,Conf_R2) %>% group_by (Model)%>%
summarise(Conf_R2=sum(Conf_R2)/n())%>% arrange(desc(Conf_R2))%>% knitr::kable()
Res_clean %>% select(Place,Model,Fat_R2) %>% group_by (Model)%>%
summarise(Fat_R2=sum(Fat_R2)/n())%>% arrange(desc(Fat_R2))%>% knitr::kable()
# It is obvious that Gradient Boosting Machine Model has the best Average R-Score for both
# Confirmed cases and Fatality cases
######################### Final Training
#Now lets train Gradient Boosting Machine on the whole training set
#h2o.init(nthreads = -1,max_mem_size = "7G") #max mem size is the maximum memory to allocate to H2O
#Number of threads -1 means use all cores on your machine
## Prediction Function
Predic_gbm <- function(train_Con_h2o,test_Con_h2o,train_Fat_h2o, test_Fat_h2o,x="Date",y="TargetValue" )
{
nfolds<-5
# Train & Cross-validate a Gradient Boosting Machine
gbm_model_Con_h2o <- h2o.gbm(x = x,
y = y,
training_frame = train_Con_h2o,
ntrees = 10,
max_depth = 3,
min_rows = 2,
learn_rate = 0.2,
nfolds = nfolds,
keep_cross_validation_predictions = TRUE,
seed = 1)
Prd_Con <-h2o.predict(gbm_model_Con_h2o,newdata = test_Con_h2o)
#Mean Absolute Error
mae_Con <-gbm_model_Con_h2o@model[["training_metrics"]]@metrics[["mae"]]
RMSE_Con <-gbm_model_Con_h2o@model[["training_metrics"]]@metrics[["RMSE"]]
###### Fit Fatalities
gbm_model_Fat_h2o <- h2o.gbm(x = x,
y = y,
training_frame = train_Fat_h2o,
ntrees = 10,
max_depth = 3,
min_rows = 2,
learn_rate = 0.2,
nfolds = nfolds,
keep_cross_validation_predictions = TRUE,
seed = 1)
Prd_Fat <- h2o.predict(gbm_model_Fat_h2o,newdata = test_Fat_h2o)
##Mean Absolute Error
mae_Fat<-gbm_model_Fat_h2o@model[["training_metrics"]]@metrics[["mae"]]
RMSE_Fat <-gbm_model_Fat_h2o@model[["training_metrics"]]@metrics[["RMSE"]]
list(data.frame(Predicted_Confirmed=as.vector(Prd_Con),Predicted_Fatilities=as.vector(Prd_Fat)),mae_Con=mae_Con,mae_Fat=mae_Fat, RMSE_Con=RMSE_Con,RMSE_Fat=RMSE_Fat)
}
Apply_CPR<-function(tr_Con=tr_C_h2o,ts_Con=ts_C_h2o,tr_Fat=tr_F_h2o,ts_Fat=ts_F_h2o,pl=places,x="Date",y="TargetValue"){
#tr=train
#ts=test
#pl= places
#Md=Mdls
# register parallel processing with all cores
#cl <- makeCluster(detectCores(), type='PSOCK')
#registerDoParallel(cl)
lapply(pl,function(p){
tr_Con_CPR <- tr_Con[tr_Con[,"CPR"]==p,]
ts_Con_CPR <-ts_Con[ts_Con[,"CPR"]==p ,]
tr_Fat_CPR <- tr_Fat[tr_Fat[,"CPR"]==p,]
ts_Fat_CPR <- ts_Fat[ts_Fat[,"CPR"]==p,]
Predic_gbm(tr_Con_CPR,ts_Con_CPR,tr_Fat_CPR,ts_Fat_CPR,x="Date",y="TargetValue")
})
# turn parallel processing off and run sequentially again:
#registerDoSEQ()
}
train_Kag_Con<- train_Kag_clean %>% dplyr::filter(Target=="ConfirmedCases")
test_Kag_Con<- test_Kag_clean %>% dplyr::filter(Target=="ConfirmedCases")
train_Kag_Fat <-train_Kag_clean %>% dplyr::filter(Target=="Fatalities")
test_Kag_Fat <-test_Kag_clean %>% dplyr::filter(Target=="Fatalities")
tr_C_h2o<-as.h2o(select(train_Kag_Con,CPR,Date, TargetValue))
ts_C_h2o<-as.h2o(select(test_Kag_Con,CPR,Date))
tr_F_h2o<-as.h2o(select(train_Kag_Fat,CPR,Date, TargetValue))
ts_F_h2o<-as.h2o(select(test_Kag_Fat,CPR,Date))
x<-"Date"
y<-"TargetValue"
All_CPR<- as.vector(unique(test_Kag_clean$CPR))
places<-All_CPR
#### this will take about an hour on modest pc
tic("Total time")
#gmb_Res <- Apply_CPR() # I had already save the results
load("Var/gmb_Res2020-05-30_19-17.rda")# if you want to generate your own results please comment this line
toc() #Total time: 5543.53 sec elapsed
##### Save result to file
#current_DT<-format(Sys.time(), "%Y-%m-%d_%H-%M")
#Resfile<- paste0("Var/gmb_Res",current_DT,".rda")
#save(gmb_Res , file = Resfile)
#### Process Prediction Results
predicted_results<- tibble(ForecastId=numeric(),CPR=character(),ConfirmedCases=numeric(),Fatalities=numeric(),mae_Con=numeric(),mae_Fat=numeric())
Predlength<-length(gmb_Res[[1]][[1]][["Predicted_Confirmed"]])
for(i in 1:length(gmb_Res)){
CPR_i<- rep(All_CPR[i],Predlength)
predicted_results <- bind_rows(predicted_results,tibble(ForecastId=i,
CPR=CPR_i,
ConfirmedCases=gmb_Res[[i]][[1]][["Predicted_Confirmed"]],
Fatalities=gmb_Res[[i]][[1]][["Predicted_Fatilities"]],
mae_Con=rep(gmb_Res[[i]][["mae_Con"]],Predlength),
mae_Fat=rep(gmb_Res[[i]][["mae_Fat"]],Predlength)))
}
predicted <- predicted_results %>% pivot_longer(c("ConfirmedCases","Fatalities"), names_to = "Target", values_to = "TargetValue") %>%
arrange(ForecastId) %>% mutate(ForecastId=seq(1:nrow(.))) %>% left_join(select(test_Kag,Date,ForecastId), by="ForecastId")
predicted <-as.data.frame(predicted)
# adjust columns to be according to the follwing structure "ForecastId", "q_0.05","q_0.5","q_0.95"
### use mid quantile only for predicted data
Predicted_C <- predicted %>% dplyr::filter(Target=="ConfirmedCases") %>%
select(ForecastId,mae_Con,TargetValue,Date)%>%
mutate(q_0.05=qnorm(0.05,TargetValue,mae_Con),
q_0.5=TargetValue,
q_0.95=qnorm(0.95,TargetValue,mae_Con))%>%
select(-mae_Con,-TargetValue)
Predicted_F <- predicted %>% dplyr::filter(Target=="Fatalities") %>%
select(ForecastId,mae_Fat,TargetValue,Date)%>%
mutate(q_0.05=qnorm(0.05,TargetValue,mae_Fat),
q_0.5=TargetValue,
q_0.95=qnorm(0.95,
TargetValue,mae_Fat))%>%
select(-mae_Fat,-TargetValue)
#############################################
#Evaluate using Pinball evaluation
#Pinball evaluation performance
pinball <- function(true_array, predicted_array, tau, weight){
array <- predicted_array * (predicted_array > 0)
abs_diff <- abs(true_array - array)
result <- abs_diff * (1 -tau) * (array > true_array) + abs_diff * (tau) * (array <= true_array)
result <- (mean(result)) * weight
return (mean(result))
}
Avg_loss<- function(true_array, mean_array, min_array, max_array, weights){
result <- (pinball(true_array, max_array, 0.95, weights) +
pinball(true_array, min_array, 0.05, weights) +
pinball(true_array, mean_array, 0.5, weights))
return (result / 3)
}
# Dates intersection between training and testing
minDate<-min(test_Kag$Date)
maxDate<-max(train_Kag$Date)
# Actual Values from original Trian Kaggle Dataset
TargetVal_C_trained <- train_Kag %>% dplyr::filter(Target=="ConfirmedCases" & Date>= minDate)%>% .$TargetValue
TargetVal_F_trained <- train_Kag %>% dplyr::filter (Target=="Fatalities" & Date>= minDate) %>% .$TargetValue
weights_C <-train_Kag %>% dplyr::filter(Target=="ConfirmedCases" & Date>= minDate)%>% .$Weight
weights_F <-train_Kag %>% dplyr::filter(Target=="Fatalities" & Date>= minDate)%>% .$Weight
# prepare masks for intersection period between training and testing
Prd_Dt_Msk_C<- Predicted_C[,"Date"]<= maxDate & Predicted_C[,"Date"]>= minDate
Prd_Dt_Msk_F<- Predicted_F[,"Date"]<= maxDate & Predicted_F[,"Date"]>= minDate
# Pinball results for Confirmed Cases
Pinball_Results_Con <- Avg_loss(TargetVal_C_trained,
Predicted_C[Prd_Dt_Msk_C,]$q_0.5,
Predicted_C[Prd_Dt_Msk_C,]$q_0.05,
Predicted_C[Prd_Dt_Msk_C,]$q_0.95,
weights_C)
print(paste("Pinball Result for Confirmed Cases:",Pinball_Results_Con))
# Pinball Results for Fatalities
Pinball_Results_Fat <- Avg_loss(TargetVal_F_trained,
Predicted_F[Prd_Dt_Msk_F,]$q_0.5,
Predicted_F[Prd_Dt_Msk_F,]$q_0.05,
Predicted_F[Prd_Dt_Msk_F,]$q_0.95,
weights_F)
print(paste("Pinball Result for Fatality Cases:",Pinball_Results_Fat))
######################### Plot Prediction Results
# Italy Results
Italy_C_IdDate<-test_Kag %>% dplyr::filter(Country_Region=="Italy" & Target=="ConfirmedCases" & Date <=maxDate) %>% select(ForecastId,Date)
Italy_F_IdDate<-test_Kag %>% dplyr::filter(Country_Region=="Italy" & Target=="Fatalities" & Date <=maxDate) %>% select(ForecastId,Date)
Italy_C_test<-train_Kag %>% dplyr::filter(Country_Region=="Italy" & Target=="ConfirmedCases" & Date >=minDate) %>% select(TargetValue,Date)
Italy_F_test<-train_Kag %>% dplyr::filter(Country_Region=="Italy" & Target=="Fatalities"& Date >=minDate) %>% select(TargetValue,Date)
Prd_It_C<-Predicted_C%>% select(-Date)%>%inner_join(Italy_C_IdDate, by="ForecastId")
Prd_It_F<-Predicted_F%>% select(-Date)%>%inner_join(Italy_F_IdDate, by="ForecastId")
ggplot(aes(x=Date), data=Prd_It_C) +
geom_line(aes(y=q_0.5), col="blue",data=Prd_It_C) +
geom_line(aes(y=TargetValue), data =Italy_C_test, col="red" ) +
labs(colour="TargetValue",
x="Date",
y="Confirmed Cases") +
scale_color_manual(name="", values = c("red","blue")) +
scale_fill_manual(name="", values=c("red","blue"))+
ggtitle("Actual vs Prediction For Italy confirmed cases")
ggplot(aes(x=Date), data=Prd_It_F) +
geom_line(aes(y=q_0.5), col="blue",data=Prd_It_F) +
geom_line(aes(y=TargetValue), data =Italy_F_test, col="red" ) +
labs(colour="TargetValue",
x="Date",
y="Fatality Cases") +
scale_color_manual(name="", values = c("red","blue")) +
scale_fill_manual(name="", values=c("red","blue"))+
ggtitle("Actual vs Prediction For Italy Fatality cases")
#################### Set training Targeet Values to their original known values
Predicted_C[Prd_Dt_Msk_C,]$q_0.5 <-TargetVal_C_trained
Predicted_C[Prd_Dt_Msk_C,]$q_0.05 <-TargetVal_C_trained
Predicted_C[Prd_Dt_Msk_C,]$q_0.95 <-TargetVal_C_trained
Predicted_F[Prd_Dt_Msk_F,]$q_0.5 <-TargetVal_F_trained
Predicted_F[Prd_Dt_Msk_F,]$q_0.05 <-TargetVal_F_trained
Predicted_F[Prd_Dt_Msk_F,]$q_0.95 <-TargetVal_F_trained
#########################################################
## Submit Results
# Submission function
Submit <- function(test_c,test_f){
#test_c <-Predicted_C
#test_f <- Predicted_F
# expected test_x to have "ForecastId", "q_0.05","q_0.5","q_0.95" columns
test_full <- rbind.data.frame(test_c,test_f) %>% select(ForecastId,q_0.05,q_0.5,q_0.95) %>%
"colnames<-"(c("Id", "0.05","0.5","0.95")) %>% arrange(Id)
# prepare and join submission dataset
sub_temp <- sub_Kag %>% separate(ForecastId_Quantile,c("Id","q"),"_") %>%
pivot_wider(names_from=q,values_from=TargetValue ) %>% select(Id) %>% mutate(Id=as.numeric(Id)) %>%
left_join(test_full, by="Id") %>%
pivot_longer("0.05":"0.95",names_to = "Quantile",values_to = "TargetValue") %>%
mutate(Id=as.character(Id)) %>%unite("_", Id:Quantile) %>% rename("ForecastId_Quantile"="_")
current_DT<-format(Sys.time(), "%Y-%m-%d_%H-%M")
Subfile<- paste0("Output/submission",current_DT,".csv")
write.csv(sub_temp,Subfile,row.names = FALSE)
}
####################################################
Submit(select(Predicted_C,-Date),select(Predicted_F,-Date))
#################################################
# shutdown H2o Server
h2o.shutdown(prompt = TRUE)
|
#----------------------------------------------------------------
# Environment Set-up
#----------------------------------------------------------------
rm(list=ls(all=TRUE))
gc()
options(scipen=999)
library(xgboost)
setwd("/home/rstudio/Dropbox/Public/Springleaf")
subversion <- 1
version <- 7
#----------------------------------------------------------------
# Data
#----------------------------------------------------------------
load("Kaggle_RawData.RData")
y <- train$target
train <- train[, -c(1, 1934)]
test <- test[, -1]
# Character variables
train_char <- train[, sapply(train, is.character)]
train_date <- train_char[, grep("JAN1|FEB1|MAR1", train_char), ]
train_char <- train_char[, !colnames(train_char) %in% colnames(train_date)]
train_date <- sapply(train_date, function(x) strptime(x, "%d%B%y:%H:%M:%S"))
train_date <- do.call(cbind.data.frame, train_date)
train_date <- sapply(train_date, function(x) format(x, "%Y%m"))
train_date <- data.frame(train_date, stringsAsFactors=FALSE)
train[, names(train_char)] <- train_char
train[, names(train_date)] <- train_date
test_char <- test[, sapply(test, is.character)]
test_date <- test_char[, grep("JAN1|FEB1|MAR1", test_char), ]
test_char <- test_char[, !colnames(test_char) %in% colnames(test_date)]
test_date <- sapply(test_date, function(x) strptime(x, "%d%B%y:%H:%M:%S"))
test_date <- do.call(cbind.data.frame, test_date)
test_date <- sapply(test_date, function(x) format(x, "%Y%m"))
test_date <- data.frame(test_date, stringsAsFactors=FALSE)
test[, names(test_char)] <- test_char
test[, names(test_date)] <- test_date
for(i in 1:ncol(train)) {
if(class(train[, i]) == "character") {
tmp <- as.numeric(as.factor(c(train[, i], test[, i])))
train[, i] <- head(tmp, nrow(train))
test[, i] <- tail(tmp, nrow(test))
}
}
set.seed(1948 ^ subversion)
hold <- sample(1:nrow(train), 15000) #10% training data for stopping
xgtrain <- xgb.DMatrix(as.matrix(train[-hold, ]), label = y[-hold], missing = NA)
xgval <- xgb.DMatrix(as.matrix(train[hold, ]), label = y[hold], missing = NA)
gc()
#----------------------------------------------------------------
# Model
#----------------------------------------------------------------
param0 <- list(
"objective" = "binary:logistic"
, "eval_metric" = "auc"
, "eta" = 0.01
, "subsample" = 0.7
, "colsample_bytree" = 0.5
, "min_child_weight" = 6
, "max_depth" = 9
, "alpha" = 4
)
watchlist <- list('val' = xgval)
model = xgb.train(
nrounds = 5000 # increase for more results at home
, params = param0
, data = xgtrain
# , early.stop.round = 5
, watchlist = watchlist
, print.every.n = 5
)
#----------------------------------------------------------------
# Scoring
#----------------------------------------------------------------
xgtest <- xgb.DMatrix(as.matrix(test), missing = NA)
preds_out <- predict(model, xgtest, ntreelimit = 4430)
sub <- read.csv("sample_submission.csv")
sub$target <- preds_out
write.csv(sub, paste0("test_submission_", version, ".csv"), row.names=FALSE)
|
/Benchmark Scripts/WO Seed/test_submission_7.R
|
no_license
|
vikasnitk85/SpringleafMarketingesponse
|
R
| false
| false
| 3,033
|
r
|
#----------------------------------------------------------------
# Environment Set-up
#----------------------------------------------------------------
rm(list=ls(all=TRUE))
gc()
options(scipen=999)
library(xgboost)
setwd("/home/rstudio/Dropbox/Public/Springleaf")
subversion <- 1
version <- 7
#----------------------------------------------------------------
# Data
#----------------------------------------------------------------
load("Kaggle_RawData.RData")
y <- train$target
train <- train[, -c(1, 1934)]
test <- test[, -1]
# Character variables
train_char <- train[, sapply(train, is.character)]
train_date <- train_char[, grep("JAN1|FEB1|MAR1", train_char), ]
train_char <- train_char[, !colnames(train_char) %in% colnames(train_date)]
train_date <- sapply(train_date, function(x) strptime(x, "%d%B%y:%H:%M:%S"))
train_date <- do.call(cbind.data.frame, train_date)
train_date <- sapply(train_date, function(x) format(x, "%Y%m"))
train_date <- data.frame(train_date, stringsAsFactors=FALSE)
train[, names(train_char)] <- train_char
train[, names(train_date)] <- train_date
test_char <- test[, sapply(test, is.character)]
test_date <- test_char[, grep("JAN1|FEB1|MAR1", test_char), ]
test_char <- test_char[, !colnames(test_char) %in% colnames(test_date)]
test_date <- sapply(test_date, function(x) strptime(x, "%d%B%y:%H:%M:%S"))
test_date <- do.call(cbind.data.frame, test_date)
test_date <- sapply(test_date, function(x) format(x, "%Y%m"))
test_date <- data.frame(test_date, stringsAsFactors=FALSE)
test[, names(test_char)] <- test_char
test[, names(test_date)] <- test_date
for(i in 1:ncol(train)) {
if(class(train[, i]) == "character") {
tmp <- as.numeric(as.factor(c(train[, i], test[, i])))
train[, i] <- head(tmp, nrow(train))
test[, i] <- tail(tmp, nrow(test))
}
}
set.seed(1948 ^ subversion)
hold <- sample(1:nrow(train), 15000) #10% training data for stopping
xgtrain <- xgb.DMatrix(as.matrix(train[-hold, ]), label = y[-hold], missing = NA)
xgval <- xgb.DMatrix(as.matrix(train[hold, ]), label = y[hold], missing = NA)
gc()
#----------------------------------------------------------------
# Model
#----------------------------------------------------------------
param0 <- list(
"objective" = "binary:logistic"
, "eval_metric" = "auc"
, "eta" = 0.01
, "subsample" = 0.7
, "colsample_bytree" = 0.5
, "min_child_weight" = 6
, "max_depth" = 9
, "alpha" = 4
)
watchlist <- list('val' = xgval)
model = xgb.train(
nrounds = 5000 # increase for more results at home
, params = param0
, data = xgtrain
# , early.stop.round = 5
, watchlist = watchlist
, print.every.n = 5
)
#----------------------------------------------------------------
# Scoring
#----------------------------------------------------------------
xgtest <- xgb.DMatrix(as.matrix(test), missing = NA)
preds_out <- predict(model, xgtest, ntreelimit = 4430)
sub <- read.csv("sample_submission.csv")
sub$target <- preds_out
write.csv(sub, paste0("test_submission_", version, ".csv"), row.names=FALSE)
|
\name{getSens}
\alias{getSens}
\title{Estimate $Q_{10}$ value and time varying $R_b$ from temperature and efflux time series including uncertainty.}
\description{Function to determine the temperature sensitivity ($Q_{10}$ value) and time varying
basal efflux (R$_b(i)$) from a given temperature and efflux (usually respiration) time series
according the principle of "SCAle dependent Parameter Estimation, SCAPE" (Mahecha et al. 2010). }
\usage{getSens(temperature, respiration, sf, gettau, fborder = 30, M = -1,
nss = 0, method = "Fourier", weights = NULL, lag = NULL,
gapFilling = TRUE, doPlot = FALSE)}
\arguments{
\item{temperature}{numeric vector: temperature time series}
\item{respiration}{numeric vector: respiration time series}
\item{sf}{numeric: sampling rate, number of measurements (per day)}
\item{gettau}{numeric: function to transform the exponent in the sensitivity model}
\item{fborder}{numeric: boundary for dividing high- and low-frequency parts (in days)}
\item{M}{numeric vector: size of SSA window (in days)}
\item{nss}{numeric vector: number of surrogate samples}
\item{method}{String: method to be applied for signal decomposition (choose from "Fourier","SSA","MA","EMD","Spline")}
\item{weights}{numeric vector: optional vector of weights to be used for linear regression, points can be set to 0 for bad data points}
\item{lag}{numeric vector: optional vector of time lags between respiration and temprature signal}
\item{gapFilling}{Logical: Choose whether Gap-Filling should be applied}
\item{doPlot}{Logical: Choose whether Surrogates should be plotted}
}
\details{General Function to determine the temperature sensitivity ($S$ value) and time varying basal efflux (R$_b$) from a given temperature and efflux (usually respiration) time series.
The following general model is assumed:
Resp(i) = R_b exp(S/tau),
where $i$ is the time index. It has been shown, however, that this model is misleading when $R_b$ is varying over time which can be expected in many real world examples (e.g. Sampson et al. 2008).
If $R_b$ varies slowly, i.e. with some low frequency then the "scale dependent parameter estimation, SCAPE"
allows us to identify this oscillatory pattern. As a consequence, the estimation of $Q_{10}$ can be substantially stabilized (Mahecha et al. 2010). The model becomes
Resp(i) = R_b(i)Q_{10}^((T(i)-Tref)/(gamma),
where $R_b(i)$ is the time varying "basal respiration", i.e. the respiration expected at $Tref$. The convenience function getQ10 allows to extract the $Q_{10}$ value minimizing the confounding factor of the time varying $R_b$. Four different spectral methods can be used and compared. A surrogate technique (function by curtsey of Dr. Henning Rust, written in the context of Venema et al. 2006) is applied to propagate the uncertainty due to the decomposition.
The user is strongly encouraged to use the function with caution, i.e. see critique by Graf et al. (2011).}
\value{A list with elements
$SCAPE_Q10 : the estimated Q_{10} with the SCAPE principle and the method chosen.
$Conv_Q10 : the conventional Q_{10} (assuming constant Rb)
$DAT$SCAPE_R_pred : the SCAPE prediction of respiration
$DAT$SCAPE_Rb : the basal respiration based on the the SCAPE principle
$DAT$Conv_R_pred : the conventional prediction of respiration
$DAT$Conv_Rb : the conventional (constant) basal respiration}
\author{Fabian Gans, Miguel D. Mahecha, MPI BGC Jena, Germany, fgans@bgc-jena.mpg.de mmahecha@bgc-jena.mpg.de
Fabian Gans, Miguel Mahecha, Max-Planck-Institute for Biogeochemistry, Jena}
|
/man/getSens.Rd
|
no_license
|
zhuj27/RSCAPE
|
R
| false
| false
| 3,640
|
rd
|
\name{getSens}
\alias{getSens}
\title{Estimate $Q_{10}$ value and time varying $R_b$ from temperature and efflux time series including uncertainty.}
\description{Function to determine the temperature sensitivity ($Q_{10}$ value) and time varying
basal efflux (R$_b(i)$) from a given temperature and efflux (usually respiration) time series
according the principle of "SCAle dependent Parameter Estimation, SCAPE" (Mahecha et al. 2010). }
\usage{getSens(temperature, respiration, sf, gettau, fborder = 30, M = -1,
nss = 0, method = "Fourier", weights = NULL, lag = NULL,
gapFilling = TRUE, doPlot = FALSE)}
\arguments{
\item{temperature}{numeric vector: temperature time series}
\item{respiration}{numeric vector: respiration time series}
\item{sf}{numeric: sampling rate, number of measurements (per day)}
\item{gettau}{numeric: function to transform the exponent in the sensitivity model}
\item{fborder}{numeric: boundary for dividing high- and low-frequency parts (in days)}
\item{M}{numeric vector: size of SSA window (in days)}
\item{nss}{numeric vector: number of surrogate samples}
\item{method}{String: method to be applied for signal decomposition (choose from "Fourier","SSA","MA","EMD","Spline")}
\item{weights}{numeric vector: optional vector of weights to be used for linear regression, points can be set to 0 for bad data points}
\item{lag}{numeric vector: optional vector of time lags between respiration and temprature signal}
\item{gapFilling}{Logical: Choose whether Gap-Filling should be applied}
\item{doPlot}{Logical: Choose whether Surrogates should be plotted}
}
\details{General Function to determine the temperature sensitivity ($S$ value) and time varying basal efflux (R$_b$) from a given temperature and efflux (usually respiration) time series.
The following general model is assumed:
Resp(i) = R_b exp(S/tau),
where $i$ is the time index. It has been shown, however, that this model is misleading when $R_b$ is varying over time which can be expected in many real world examples (e.g. Sampson et al. 2008).
If $R_b$ varies slowly, i.e. with some low frequency then the "scale dependent parameter estimation, SCAPE"
allows us to identify this oscillatory pattern. As a consequence, the estimation of $Q_{10}$ can be substantially stabilized (Mahecha et al. 2010). The model becomes
Resp(i) = R_b(i)Q_{10}^((T(i)-Tref)/(gamma),
where $R_b(i)$ is the time varying "basal respiration", i.e. the respiration expected at $Tref$. The convenience function getQ10 allows to extract the $Q_{10}$ value minimizing the confounding factor of the time varying $R_b$. Four different spectral methods can be used and compared. A surrogate technique (function by curtsey of Dr. Henning Rust, written in the context of Venema et al. 2006) is applied to propagate the uncertainty due to the decomposition.
The user is strongly encouraged to use the function with caution, i.e. see critique by Graf et al. (2011).}
\value{A list with elements
$SCAPE_Q10 : the estimated Q_{10} with the SCAPE principle and the method chosen.
$Conv_Q10 : the conventional Q_{10} (assuming constant Rb)
$DAT$SCAPE_R_pred : the SCAPE prediction of respiration
$DAT$SCAPE_Rb : the basal respiration based on the the SCAPE principle
$DAT$Conv_R_pred : the conventional prediction of respiration
$DAT$Conv_Rb : the conventional (constant) basal respiration}
\author{Fabian Gans, Miguel D. Mahecha, MPI BGC Jena, Germany, fgans@bgc-jena.mpg.de mmahecha@bgc-jena.mpg.de
Fabian Gans, Miguel Mahecha, Max-Planck-Institute for Biogeochemistry, Jena}
|
source("common.R")
library(ggplot2)
library(reshape2)
cordat <- as.matrix(dat[,value.cols, with=F])
cormat <- cor(cordat, use="pairwise.complete.obs")
cordt <- melt(cormat)
plt <- ggplot(cordt) + aes(x=Var2, y=Var1, fill=value) + geom_tile() +
scale_fill_gradient2(high="green", low="red") +
scale_y_discrete(limits=rev(pairscols)) +
theme(axis.text.x = element_text(angle=90))
plot(plt)
|
/colorpairs.R
|
no_license
|
ashiklom/trait-manuscript
|
R
| false
| false
| 403
|
r
|
source("common.R")
library(ggplot2)
library(reshape2)
cordat <- as.matrix(dat[,value.cols, with=F])
cormat <- cor(cordat, use="pairwise.complete.obs")
cordt <- melt(cormat)
plt <- ggplot(cordt) + aes(x=Var2, y=Var1, fill=value) + geom_tile() +
scale_fill_gradient2(high="green", low="red") +
scale_y_discrete(limits=rev(pairscols)) +
theme(axis.text.x = element_text(angle=90))
plot(plt)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/distributions.R
\name{beta_lpdf}
\alias{beta_lpdf}
\title{Log probability density function for the beta distribution}
\usage{
beta_lpdf(x, shape1, shape2)
}
\description{
Log probability density function for the beta distribution
}
\details{
Equivalent to \code{sum(dbeta(x,shape1,shape2,log=TRUE))}. For more information see \code{\link{dbeta}}.
}
|
/man/beta_lpdf.Rd
|
permissive
|
jeff324/derp
|
R
| false
| true
| 427
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/distributions.R
\name{beta_lpdf}
\alias{beta_lpdf}
\title{Log probability density function for the beta distribution}
\usage{
beta_lpdf(x, shape1, shape2)
}
\description{
Log probability density function for the beta distribution
}
\details{
Equivalent to \code{sum(dbeta(x,shape1,shape2,log=TRUE))}. For more information see \code{\link{dbeta}}.
}
|
#Script Name: colab_ca
#Author: coLAB
#Author URL: http://www.colab.uff.br
#License: GNU General Public License v2 or later
#License URL: http://www.gnu.org/licenses/gpl-2.0.html
#Reference: Script desenvolvido com a supervisão de Emerson Cervi
#Description: Plotar gráfico de análise de correspondência canônica
##Breve explicação sobre correspondência canônica:
##https://www.statmethods.net/advstats/ca.html
#The first graph is the standard symmetric representation
#of a simple correspondence analysis with rows and column represented by points.
#
#https://www.statmethods.net/advstats/images/ca3.jpg
#
#Row points (column points) that are closer together have more similar
#column profiles (row profiles). Keep in mind that you can not interpret
#the distance between row and column points directly.
#Solicitar pacotes necessários
#install.packages(ca)
library(ca)
#Criar tabela cruzada
table(meme_ca_arecat$AREA, meme_ca_arecat$CATEGORIA)
#Ou verificar o script colab_cross_table.R
#Executar análise de correspondência canônica
ca(table(meme_ca_arecat$AREA, meme_ca_arecat$CATEGORIA))
#Plotar gráfico de análise de correspondência canônica
plot(ca(table(meme_ca_arecat$AREA, meme_ca_arecat$CATEGORIA)))
#Customizar gráfico
#O parâmetro main indica o título do gráfico
#Os parâmetros ylab e xlab indicam os nomes dos eixos
#O parâmetro col indica as cores em vetores RGB
#O parâmetro col.lab indica as cores dos labels em vetores RGB
plot(ca(table(meme_ca_arecat$AREA, meme_ca_arecat$CATEGORIA)),
main="Décadas x Tipos",
ylab="Dimensão 2 (6,3%)", xlab="Dimensão 1 (89,3%)",
col = c("#433f62","#FF914B"), col.lab = c("#433f62","#FF914B"))
#Dando zoom em determinadas áreas
#Os parâmetros xlim e ylim definem os recortes
plot(ca(table(meme_ca_arecat$AREA, meme_ca_arecat$CATEGORIA)),
main="Décadas x Tipos", xlim = c(0.0,0.5), ylim = c(0.0,1.0),
ylab="Dimensão 2 (6,3%)", xlab="Dimensão 1 (89,3%)",
col = c("#433f62","#FF914B"), col.lab = c("#433f62","#FF914B"))
|
/oficina R/5 - Plotando gráficos de análise de correspondência/colab_ca.R
|
no_license
|
tsaiyijing/oficinaR
|
R
| false
| false
| 2,043
|
r
|
#Script Name: colab_ca
#Author: coLAB
#Author URL: http://www.colab.uff.br
#License: GNU General Public License v2 or later
#License URL: http://www.gnu.org/licenses/gpl-2.0.html
#Reference: Script desenvolvido com a supervisão de Emerson Cervi
#Description: Plotar gráfico de análise de correspondência canônica
##Breve explicação sobre correspondência canônica:
##https://www.statmethods.net/advstats/ca.html
#The first graph is the standard symmetric representation
#of a simple correspondence analysis with rows and column represented by points.
#
#https://www.statmethods.net/advstats/images/ca3.jpg
#
#Row points (column points) that are closer together have more similar
#column profiles (row profiles). Keep in mind that you can not interpret
#the distance between row and column points directly.
#Solicitar pacotes necessários
#install.packages(ca)
library(ca)
#Criar tabela cruzada
table(meme_ca_arecat$AREA, meme_ca_arecat$CATEGORIA)
#Ou verificar o script colab_cross_table.R
#Executar análise de correspondência canônica
ca(table(meme_ca_arecat$AREA, meme_ca_arecat$CATEGORIA))
#Plotar gráfico de análise de correspondência canônica
plot(ca(table(meme_ca_arecat$AREA, meme_ca_arecat$CATEGORIA)))
#Customizar gráfico
#O parâmetro main indica o título do gráfico
#Os parâmetros ylab e xlab indicam os nomes dos eixos
#O parâmetro col indica as cores em vetores RGB
#O parâmetro col.lab indica as cores dos labels em vetores RGB
plot(ca(table(meme_ca_arecat$AREA, meme_ca_arecat$CATEGORIA)),
main="Décadas x Tipos",
ylab="Dimensão 2 (6,3%)", xlab="Dimensão 1 (89,3%)",
col = c("#433f62","#FF914B"), col.lab = c("#433f62","#FF914B"))
#Dando zoom em determinadas áreas
#Os parâmetros xlim e ylim definem os recortes
plot(ca(table(meme_ca_arecat$AREA, meme_ca_arecat$CATEGORIA)),
main="Décadas x Tipos", xlim = c(0.0,0.5), ylim = c(0.0,1.0),
ylab="Dimensão 2 (6,3%)", xlab="Dimensão 1 (89,3%)",
col = c("#433f62","#FF914B"), col.lab = c("#433f62","#FF914B"))
|
# Copyright 2019 Battelle Memorial Institute; see the LICENSE file.
#' module_energy_L2391.gas_trade_flows
#'
#' Model input for natural gas trade by LNG and regional pipeline networks.
#'
#' @param command API command to execute
#' @param ... other optional parameters, depending on command
#' @return Depends on \code{command}: either a vector of required inputs, a vector of output names, or (if
#' \code{command} is "MAKE") all the generated outputs: \code{L2391.NG_export_calOutput_LNG},
#' \code{L2391.NG_export_calOutput_pipeline}, \code{L2391.NG_import_calOutput_LNG},
#' \code{L2391.NG_import_calOutput_pipeline}, \code{L2391.NG_import_calOutput_statdiff},
#' \code{L2391.NG_export_calOutput_statdiff}.
#' @importFrom assertthat assert_that
#' @importFrom dplyr filter if_else left_join mutate rename select
#' @importFrom tibble tibble
#' @author MTB August 2021
module_energy_L2391.gas_trade_flows <- function(command, ...) {
if(command == driver.DECLARE_INPUTS) {
return(c(FILE = "common/GCAM_region_names",
FILE = "energy/GCAM_region_pipeline_bloc_export",
FILE = "energy/GCAM_region_pipeline_bloc_import",
"L1011.ff_GrossTrade_EJ_R_Y_LNG",
"L1011.ff_GrossTrade_EJ_R_Y_NG_pipe",
"L1011.ff_BilatTrade_EJ_R_Y_NG_pipe",
"L239.Production_tra",
"L239.Production_reg_imp"))
} else if(command == driver.DECLARE_OUTPUTS) {
return(c("L2391.NG_export_calOutput_LNG",
"L2391.NG_export_calOutput_pipeline",
"L2391.NG_import_calOutput_LNG",
"L2391.NG_import_calOutput_pipeline",
"L2391.NG_import_calOutput_statdiff",
"L2391.NG_export_calOutput_statdiff"))
} else if(command == driver.MAKE) {
all_data <- list(...)[[1]]
year <- region <- supplysector <- subsector <- GCAM_Commodity <- GrossExp_EJ <-
calOutputValue <- subs.share.weight <- market.name <- minicam.energy.input <-
GrossImp_EJ <- Prod_EJ <- fuel <- technology <- primary.consumption <- PrimaryFuelCO2Coef.name <- PrimaryFuelCO2Coef <-
production <- consumption <- GCAM_region_ID <- NULL # silence package check notes
# ----------------------------------------
# Load required inputs
GCAM_region_names <- get_data(all_data, "common/GCAM_region_names", strip_attributes = TRUE)
GCAM_region_pipeline_bloc_export <- get_data(all_data, "energy/GCAM_region_pipeline_bloc_export", strip_attributes = TRUE)
GCAM_region_pipeline_bloc_import <- get_data(all_data, "energy/GCAM_region_pipeline_bloc_import", strip_attributes = TRUE)
L1011.ff_GrossTrade_EJ_R_Y_LNG <- get_data(all_data, "L1011.ff_GrossTrade_EJ_R_Y_LNG", strip_attributes = TRUE)
L1011.ff_GrossTrade_EJ_R_Y_NG_pipe <- get_data(all_data, "L1011.ff_GrossTrade_EJ_R_Y_NG_pipe", strip_attributes = TRUE)
L1011.ff_BilatTrade_EJ_R_Y_NG_pipe <- get_data(all_data, "L1011.ff_BilatTrade_EJ_R_Y_NG_pipe", strip_attributes = TRUE)
L239.Production_tra <- get_data(all_data, "L239.Production_tra", strip_attributes = TRUE) %>%
filter(grepl("gas", supplysector))
L239.Production_reg_imp <- get_data(all_data, "L239.Production_reg_imp", strip_attributes = TRUE) %>%
filter(grepl("gas", supplysector))
# ----------------------------------------
# Process data
# There are a lot of pieces to balance here. We have to maintain balance of:
# - regional natural gas imports and exports (across carriers)
# - global LNG imports and exports
# - pipeline exports and imports, globally and for inter-regional pipeline networks
# Expanded natural gas trade structure has greatest detail in terms of gas pipeline networks,
# so first calculate import and export shares by pipeline network.
# Second, disaggreate / balance imports data (between pipeline and LNG, and between pipeline networks)
# because it's the more complex piece (regions can import from multiple pipeline networks).
# Third, use share out exports (between pipeline and LNG) in a manner consistent with import data.
# Finally, re-balance regional exports with a statistical differences sector (explained below)
# to ensure that everything is balanced out.
# STEP 1: Pipeline shares
# Pipeline import shares by pipeline market
# Regions are permitted to import from multiple pipeline markets
L1011.ff_BilatTrade_EJ_R_Y_NG_pipe %>%
group_by(region = destination.region, year, GCAM_Commodity, GCAM_Commodity_traded, pipeline.market) %>%
summarise(value = sum(value)) %>%
ungroup() %>%
group_by(region, year, GCAM_Commodity, GCAM_Commodity_traded) %>%
mutate(share = value / sum(value)) %>%
ungroup() %>%
# COMPTRADE data (L1011) only extends back to 2007
# create complete set of historical years and carry 2007 shares backwards
complete(nesting(region, GCAM_Commodity, GCAM_Commodity_traded, pipeline.market), year = MODEL_BASE_YEARS) %>%
group_by(region, GCAM_Commodity, GCAM_Commodity_traded, pipeline.market) %>%
mutate(share = approx_fun(year, share, rule = 2)) %>%
ungroup() %>%
select(-value) -> L2391.NG_pipeline_import_shares
# STEP 2: Import shares
# Start with share between pipeline and LNG
# Combine pipeline & LNG imports, calculate shares
L1011.ff_GrossTrade_EJ_R_Y_LNG %>%
left_join_error_no_match(GCAM_region_names, by = "GCAM_region_ID") %>%
bind_rows(L1011.ff_GrossTrade_EJ_R_Y_NG_pipe) %>%
group_by(region, year, GCAM_Commodity, GCAM_Commodity_traded) %>%
summarise(GrossImp_EJ = sum(GrossImp_EJ)) %>%
ungroup() %>%
group_by(region, year, GCAM_Commodity) %>%
mutate(share = GrossImp_EJ / sum(GrossImp_EJ)) %>%
ungroup() %>%
# COMPTRADE data (L1011) only extends back to 2007
# create complete set of historical years and carry 2007 shares backwards
complete(nesting(region, GCAM_Commodity, GCAM_Commodity_traded), year = MODEL_BASE_YEARS) %>%
group_by(region, GCAM_Commodity, GCAM_Commodity_traded) %>%
mutate(share = approx_fun(year, share, rule = 2)) %>%
ungroup() %>%
select(-GrossImp_EJ) %>%
filter(year %in% MODEL_BASE_YEARS) -> L2391.NG_import_shares
# Partition calibrated imports by region between pipeline & LNG
# Join calibrated gross NG trade and shares, calculate calibrated value by trade vehicle
L2391.NG_import_shares%>%
left_join_error_no_match(L239.Production_reg_imp %>%
select(region, year, calOutputValue) ,
by = c("region", "year")) %>%
mutate(calOutputValue = share * calOutputValue) -> L2391.NG_import_calOutput
# Regional imports by LNG
L2391.NG_import_calOutput %>%
filter(GCAM_Commodity_traded == "LNG") %>%
select(-share) -> L2391.NG_import_calOutput_LNG
# Split out regional imports by pipeline network
# Map in pipeline market shares to get regional pipeline exports by market
# (Again, the default assumption is that regions are permitted to import from multiple pipeline markets)
L2391.NG_import_calOutput %>%
filter(GCAM_Commodity_traded == "gas pipeline") %>%
select(-share) %>%
# join in pipeline market info;
# join will duplicate rows because regions are permitted to import from multiple pipeline markets
# LJENM will throw error, so left_join is used
left_join(GCAM_region_pipeline_bloc_import %>%
select(region, pipeline.market),
by = c("region")) %>%
# regions with no pipeline exports won't have data points in L2391.NG_pipeline_import_shares
# this creates NAs when these tables are joined; LJENM throws an error, so left_join is used
left_join(L2391.NG_pipeline_import_shares,
by = c("region", "GCAM_Commodity", "GCAM_Commodity_traded",
"pipeline.market", "year")) %>%
replace_na(list(share = 0)) %>%
mutate(calOutputValue = share * calOutputValue) %>%
select(-share) -> L2391.NG_import_calOutput_pipeline
# Summarise global LNG imports
L2391.NG_import_calOutput_LNG %>%
group_by(year, GCAM_Commodity, GCAM_Commodity_traded) %>%
summarise(global_LNG_exp = sum(calOutputValue)) %>%
ungroup() -> L2391.NG_import_calOutput_LNG_global
# Summarise gas pipeline imports globally and by regional pipeline network
L2391.NG_import_calOutput_pipeline %>%
group_by(year, GCAM_Commodity, GCAM_Commodity_traded, pipeline.market) %>%
summarise(regional_pipe_exp = sum(calOutputValue)) %>%
ungroup() -> L2391.NG_import_calOutput_pipeline_network
# STEP 3: Export shares
# Start with share between pipeline and LNG
# Combine pipeline & LNG exports, calculate shares
L1011.ff_GrossTrade_EJ_R_Y_LNG %>%
left_join_error_no_match(GCAM_region_names, by = "GCAM_region_ID") %>%
bind_rows(L1011.ff_GrossTrade_EJ_R_Y_NG_pipe) %>%
group_by(region, year, GCAM_Commodity, GCAM_Commodity_traded) %>%
summarise(GrossExp_EJ = sum(GrossExp_EJ)) %>%
ungroup() %>%
group_by(region, year, GCAM_Commodity) %>%
mutate(share = GrossExp_EJ / sum(GrossExp_EJ)) %>%
ungroup() %>%
# COMPTRADE data (L1011) only extends back to 2007
# create complete set of historical years and carry 2007 shares backwards
complete(nesting(region, GCAM_Commodity, GCAM_Commodity_traded), year = MODEL_BASE_YEARS) %>%
group_by(region, GCAM_Commodity, GCAM_Commodity_traded) %>%
mutate(share = approx_fun(year, share, rule = 2)) %>%
ungroup() %>%
select(-GrossExp_EJ) %>%
filter(year %in% MODEL_BASE_YEARS) -> L2391.NG_export_shares
# Format traded data - exporting region is actually embedded in technology name
L239.Production_tra %>%
select(-region) %>%
mutate(region = gsub(" traded natural gas", "", technology)) %>%
select(region, year, calOutputValue) -> L2391.Production_tra
# Partition calibrated exports by region between pipeline & LNG
# Join calibrated gross NG trade and shares, calculate calibrated value by trade vehicle
L2391.NG_export_shares %>%
left_join_error_no_match(L2391.Production_tra, by = c("region", "year")) %>%
mutate(calOutputValue = share * calOutputValue) -> L2391.NG_export_unadjusted
# Join in import data and scale export data so import and export totals match at relevant scales
# (global for LNG, inter-regional network for pipeline)
# LNG
L2391.NG_export_unadjusted %>%
filter(GCAM_Commodity_traded == "LNG") %>%
left_join_error_no_match(L2391.NG_import_calOutput_LNG_global,
by = c("GCAM_Commodity", "GCAM_Commodity_traded", "year")) %>%
# summarize global LNG exports by year
group_by(year, GCAM_Commodity, GCAM_Commodity_traded) %>%
mutate(global_LNG_imp = sum(calOutputValue)) %>%
ungroup() %>%
# scale calOutputValue to ensure that global LNG exports = global LNG imports
mutate(calOutputValue = calOutputValue * (global_LNG_exp / global_LNG_imp)) %>%
# test that scaling worked
group_by(year, GCAM_Commodity, GCAM_Commodity_traded) %>%
mutate(global_LNG_imp = sum(calOutputValue)) %>%
ungroup() %>%
select(region, GCAM_Commodity, GCAM_Commodity_traded, year, calOutputValue) ->
L2391.NG_export_calOutput_LNG
# Pipeline
L2391.NG_export_unadjusted %>%
filter(GCAM_Commodity_traded == "gas pipeline") %>%
# join in pipeline market info
left_join_error_no_match(GCAM_region_pipeline_bloc_export, by = c("region" = "origin.region")) %>%
left_join_error_no_match(L2391.NG_import_calOutput_pipeline_network,
by = c("GCAM_Commodity", "GCAM_Commodity_traded", "year", "pipeline.market")) %>%
# summarize pipeline exports by year and network
group_by(year, GCAM_Commodity, GCAM_Commodity_traded, pipeline.market) %>%
mutate(regional_pipe_imp = sum(calOutputValue)) %>%
ungroup() %>%
# scale calOutputValue to ensure that exports = imports for each network
mutate(calOutputValue = calOutputValue * (regional_pipe_exp / regional_pipe_imp)) %>%
select(region, GCAM_Commodity, GCAM_Commodity_traded, year, calOutputValue, pipeline.market) ->
L2391.NG_export_calOutput_pipeline
# STEP 4: Check trade balances (across scales) and make final adjustments
# Combine flows with a single market (LNG imports and exports, pipeline exports)
L2391.NG_export_calOutput_pipeline %>%
select(region, year, pipeline.market, calExport_pipe = calOutputValue) %>%
left_join_error_no_match(L2391.NG_export_calOutput_LNG %>%
select(region, year, calExport_LNG = calOutputValue),
by = c("region", "year")) %>%
left_join_error_no_match(L2391.NG_import_calOutput_LNG %>%
select(region, year, calImport_LNG = calOutputValue),
by = c("region", "year")) -> L2391.LNG.exp.imp_pipe.exp
# Add in pipeline imports (which can come from multiple pipeline networks) as well as
# region total imports and exports which we need to match
L2391.NG_import_calOutput_pipeline %>%
select(region, year, pipeline.market, calImport_pipe = calOutputValue) %>%
# join will produce NAs because we don't want to duplicate LNG and pipeline export values
# by multiple import pipelines by region. LJENM throws error; left_join is used and
# NAs dealt with below
left_join(L2391.LNG.exp.imp_pipe.exp,
by = c("region", "year", "pipeline.market")) %>%
replace_na(list(calExport_pipe = 0,
calExport_LNG = 0,
calImport_LNG = 0)) %>%
left_join_error_no_match(L2391.Production_tra %>%
select(region, year, reg_NG_exports = calOutputValue),
by = c("region", "year")) %>%
left_join_error_no_match(L239.Production_reg_imp %>%
select(region, year, reg_NG_imports = calOutputValue),
by = c("region", "year")) -> L2391.LNG_pipe
# Check that all flows are balanced. At this point this is true for all flows except for regional exports.
# We'll calculate all balances here anyway for debugging purposes. We're checking:
# reg_NG_imports: sum of LNG and pipeline imports (should match calibrated value from L239 table)
# reg_NG_exports: sum of LNG and pipeline exports (should match calibrated value from L239 table)
# pipe_ntwrk: balance of imports and exports for each pipeline network (imports and exports should balance)
# global_LNG = global balance of LNG imports and exports (imports and exports should balance)
# global_pipe: global balance of pipeline inports and exports (imports and exports should balance)
L2391.LNG_pipe %>%
# total NG imports and exports
group_by(region, year) %>%
mutate(reg_NG_imports_check = sum(calImport_pipe) + sum(calImport_LNG),
reg_NG_imports_diff = round(reg_NG_imports - reg_NG_imports_check, energy.DIGITS_CALOUTPUT),
reg_NG_exports_check = sum(calExport_pipe) + sum(calExport_LNG),
reg_NG_exports_diff = round(reg_NG_exports - reg_NG_exports_check, energy.DIGITS_CALOUTPUT)) %>%
ungroup() %>%
# regional gas pipelines
group_by(pipeline.market, year) %>%
mutate(pipe_ntwrk_imp = sum(calImport_pipe),
pipe_ntwrk_exp = sum(calExport_pipe),
pipe_ntwrk_diff = round(pipe_ntwrk_imp - pipe_ntwrk_exp, energy.DIGITS_CALOUTPUT)) %>%
ungroup() %>%
# global totals by vehicle
group_by(year) %>%
mutate(global_LNG_imp = sum(calImport_LNG),
global_LNG_exp = sum(calExport_LNG),
global_LNG_diff = round(global_LNG_imp - global_LNG_exp, energy.DIGITS_CALOUTPUT),
global_pipe_imp = sum(calImport_pipe),
global_pipe_exp = sum(calExport_pipe),
global_pipe_diff = round(global_pipe_imp - global_pipe_exp, energy.DIGITS_CALOUTPUT)) %>%
ungroup() -> L2391.gas_flow_balances
# At this point everything is balanced except for regional exports.
# This is because GCAM's processing of the IEA data largely ignores stock changes
# (which are balanced globally but not at the regional / pipeline network level).
# so region X could export quantity Q to region Y, but some portion of Q increases stocks
# in region Y, rather than being consumed. So region X production and exports are correct,
# region Y imports and consumption are correct, but they don't balance because of the stock changes.
# To correct this, we'll create a "gas trade statistical differences" market where regions which
# don't export enough (positive reg_NG_exports_diff) supply and regions which export too much
# (negative reg_NG_exports_diff) demand. The latter regions will have their LNG exports reduced
# by the amount of the difference, and the global LNG market will import that amount from the
# "gas trade statistical differences" market.
L2391.gas_flow_balances %>%
distinct(region, year, reg_NG_exports_diff) -> L2391.gas_export_diff
# Regions that export more than allocated to pipeline & LNG
L2391.gas_export_diff %>%
filter(reg_NG_exports_diff > 0) %>%
complete(nesting(region), year = MODEL_BASE_YEARS) %>%
replace_na(list(reg_NG_exports_diff = 0)) %>%
rename(calOutputValue = reg_NG_exports_diff) %>%
mutate(sector = "gas trade statistical differences",
subsector = "statistical differences",
technology = paste0(region, " statistical differences"),
minicam.energy.input = "natural gas",
market.name = region,
region = gcam.USA_REGION) -> L2391.NG_export_calOutput_statdiff
# Regions that have excess LNG + pipeline exports
L2391.gas_export_diff %>%
filter(reg_NG_exports_diff < 0) %>%
complete(nesting(region), year = MODEL_BASE_YEARS) %>%
replace_na(list(reg_NG_exports_diff = 0)) %>%
rename(value = reg_NG_exports_diff) %>%
mutate(value = abs(value)) -> L2391.NG_import_calOutput_statdiff
# Divide export difference for each region between LNG & pipelines according to historical shares,
# and then adjust the corresponding calOutputValues accordingly. Thiss will keep any region's
# exports from going negative and keep LNG / pipeline shares relatively constant.
L2391.NG_import_calOutput_statdiff %>%
left_join_error_no_match(GCAM_region_pipeline_bloc_export, by = c("region" = "origin.region")) %>%
# L2391.NG_export_shares contains shares for LNG and pipeline
# join will duplicate rows by each carrier
# LJENM will error, left_join() is used
left_join(L2391.NG_export_shares, by = c("region", "year")) %>%
mutate(value = value * share) %>%
select(-share) -> L2391.NG_import_calOutput_adj
# Adjust regional export values by gas pipeline network
L2391.NG_export_calOutput_pipeline %>%
# not all regions are included in L2391.NG_import_calOutput_adj, so join produces NAs.
# LJENM throws error, so left_join is used and NAs are dealt with below
left_join(L2391.NG_import_calOutput_adj,
by = c("region", "GCAM_Commodity", "GCAM_Commodity_traded", "year", "pipeline.market")) %>%
replace_na(list(value = 0)) %>%
mutate(calOutputValue = calOutputValue - value) %>%
select(-value) -> L2391.NG_export_calOutput_pipeline
# Adjust regional LNG export values
L2391.NG_export_calOutput_LNG %>%
# not all regions are included in L2391.NG_import_calOutput_adj, so join produces NAs.
# LJENM throws error, so left_join is used and NAs are dealt with below
left_join(L2391.NG_import_calOutput_adj %>%
select(-pipeline.market),
by = c("region", "GCAM_Commodity", "GCAM_Commodity_traded", "year")) %>%
replace_na(list(value = 0)) %>%
mutate(calOutputValue = calOutputValue - value) %>%
select(-value) -> L2391.NG_export_calOutput_LNG
# Summarize regional pipeline / global LNG consumption from statistical differences sector
L2391.NG_import_calOutput_adj %>%
filter(GCAM_Commodity_traded == "gas pipeline") %>%
group_by(year, pipeline.market, GCAM_Commodity, GCAM_Commodity_traded) %>%
summarise(value = sum(value)) %>%
ungroup() %>%
mutate(technology = "statistical differences",
minicam.energy.input = "gas trade statistical differences",
market.name = gcam.USA_REGION) -> L2391.NG_import_calOutput_statdiff_pipe
L2391.NG_import_calOutput_adj %>%
filter(GCAM_Commodity_traded == "LNG") %>%
group_by(year, GCAM_Commodity, GCAM_Commodity_traded) %>%
summarise(value = sum(value)) %>%
ungroup() %>%
mutate(technology = "statistical differences",
minicam.energy.input = "gas trade statistical differences",
market.name = gcam.USA_REGION) -> L2391.NG_import_calOutput_statdiff_LNG
L2391.NG_import_calOutput_statdiff_pipe %>%
bind_rows(L2391.NG_import_calOutput_statdiff_LNG) %>%
rename(calOutputValue = value) -> L2391.NG_import_calOutput_statdiff
# # Checking global totals by year
# L2391.NG_export_calOutput_LNG %>%
# group_by(year) %>%
# summarise(value = sum(calOutputValue)) %>%
# ungroup() -> L2391.NG_export_calOutput_LNG_global
#
# L2391.NG_export_calOutput_pipeline %>%
# group_by(year) %>%
# summarise(value = sum(calOutputValue)) %>%
# ungroup() -> L2391.NG_export_calOutput_pipeline_global
#
# L2391.NG_import_calOutput_LNG %>%
# group_by(year) %>%
# summarise(value = sum(calOutputValue)) %>%
# ungroup() -> L2391.NG_import_calOutput_LNG_global
#
# L2391.NG_import_calOutput_pipeline %>%
# group_by(year) %>%
# summarise(value = sum(calOutputValue)) %>%
# ungroup() -> L2391.NG_import_calOutput_pipeline_global
#
# L2391.NG_import_calOutput_statdiff %>%
# group_by(year) %>%
# summarise(value = sum(calOutputValue)) %>%
# ungroup() -> L2391.NG_import_calOutput_statdiff_global
#
# L2391.NG_export_calOutput_statdiff %>%
# group_by(year) %>%
# summarise(value = sum(calOutputValue)) %>%
# ungroup() -> L2391.NG_export_calOutput_statdiff_global
# ----------------------------------------
# Produce outputs
L2391.NG_export_calOutput_LNG %>%
add_title("Technology calibration for LNG export") %>%
add_units("EJ") %>%
add_comments("Historical regional exports of natural gas via LNG") %>%
add_precursors("common/GCAM_region_names",
"L1011.ff_GrossTrade_EJ_R_Y_LNG",
"L1011.ff_GrossTrade_EJ_R_Y_NG_pipe",
"L239.Production_tra") ->
L2391.NG_export_calOutput_LNG
L2391.NG_export_calOutput_pipeline %>%
add_title("Technology calibration for gas pipeline export") %>%
add_units("EJ") %>%
add_comments("Regional exports of natural gas via pipeline") %>%
add_precursors("common/GCAM_region_names",
"energy/GCAM_region_pipeline_bloc_export",
"L1011.ff_GrossTrade_EJ_R_Y_LNG",
"L1011.ff_GrossTrade_EJ_R_Y_NG_pipe",
"L1011.ff_BilatTrade_EJ_R_Y_NG_pipe",
"L239.Production_tra") ->
L2391.NG_export_calOutput_pipeline
L2391.NG_import_calOutput_LNG %>%
add_title("Technology calibration for LNG import") %>%
add_units("EJ") %>%
add_comments("Regional imports of natural gas via LNG") %>%
add_precursors("common/GCAM_region_names",
"L1011.ff_GrossTrade_EJ_R_Y_LNG",
"L1011.ff_GrossTrade_EJ_R_Y_NG_pipe",
"L239.Production_reg_imp") ->
L2391.NG_import_calOutput_LNG
L2391.NG_import_calOutput_pipeline %>%
add_title("Technology calibration for gas pipeline import") %>%
add_units("EJ") %>%
add_comments("Regional imports of natural gas via pipeline") %>%
add_precursors("common/GCAM_region_names",
"energy/GCAM_region_pipeline_bloc_import",
"L1011.ff_GrossTrade_EJ_R_Y_LNG",
"L1011.ff_GrossTrade_EJ_R_Y_NG_pipe",
"L1011.ff_BilatTrade_EJ_R_Y_NG_pipe",
"L239.Production_reg_imp") ->
L2391.NG_import_calOutput_pipeline
L2391.NG_export_calOutput_statdiff %>%
add_title("Calibration values for supply to statistical differences sector") %>%
add_units("EJ") %>%
add_comments("Regions here have greater exports/year implied by IEA data compared to COMPTRADE") %>%
add_precursors("common/GCAM_region_names",
"energy/GCAM_region_pipeline_bloc_import",
"L1011.ff_GrossTrade_EJ_R_Y_LNG",
"L1011.ff_GrossTrade_EJ_R_Y_NG_pipe",
"L1011.ff_BilatTrade_EJ_R_Y_NG_pipe",
"L239.Production_reg_imp") ->
L2391.NG_export_calOutput_statdiff
L2391.NG_import_calOutput_statdiff %>%
add_title("Calibration values for consumption of statistical differences in historical LNG and regional pipeline sectors") %>%
add_units("EJ") %>%
add_comments("Regions here have fewer exports implied by IEA data compared to COMPTRADE") %>%
add_precursors("common/GCAM_region_names",
"energy/GCAM_region_pipeline_bloc_import",
"L1011.ff_GrossTrade_EJ_R_Y_LNG",
"L1011.ff_GrossTrade_EJ_R_Y_NG_pipe",
"L1011.ff_BilatTrade_EJ_R_Y_NG_pipe",
"L239.Production_reg_imp") ->
L2391.NG_import_calOutput_statdiff
return_data(L2391.NG_export_calOutput_LNG,
L2391.NG_export_calOutput_pipeline,
L2391.NG_import_calOutput_LNG,
L2391.NG_import_calOutput_pipeline,
L2391.NG_import_calOutput_statdiff,
L2391.NG_export_calOutput_statdiff)
} else {
stop("Unknown command")
}
}
|
/input/gcamdata/R/zenergy_L2391.gas_trade_flows.R
|
permissive
|
JGCRI/gcam-core
|
R
| false
| false
| 26,493
|
r
|
# Copyright 2019 Battelle Memorial Institute; see the LICENSE file.
#' module_energy_L2391.gas_trade_flows
#'
#' Model input for natural gas trade by LNG and regional pipeline networks.
#'
#' @param command API command to execute
#' @param ... other optional parameters, depending on command
#' @return Depends on \code{command}: either a vector of required inputs, a vector of output names, or (if
#' \code{command} is "MAKE") all the generated outputs: \code{L2391.NG_export_calOutput_LNG},
#' \code{L2391.NG_export_calOutput_pipeline}, \code{L2391.NG_import_calOutput_LNG},
#' \code{L2391.NG_import_calOutput_pipeline}, \code{L2391.NG_import_calOutput_statdiff},
#' \code{L2391.NG_export_calOutput_statdiff}.
#' @importFrom assertthat assert_that
#' @importFrom dplyr filter if_else left_join mutate rename select
#' @importFrom tibble tibble
#' @author MTB August 2021
module_energy_L2391.gas_trade_flows <- function(command, ...) {
if(command == driver.DECLARE_INPUTS) {
return(c(FILE = "common/GCAM_region_names",
FILE = "energy/GCAM_region_pipeline_bloc_export",
FILE = "energy/GCAM_region_pipeline_bloc_import",
"L1011.ff_GrossTrade_EJ_R_Y_LNG",
"L1011.ff_GrossTrade_EJ_R_Y_NG_pipe",
"L1011.ff_BilatTrade_EJ_R_Y_NG_pipe",
"L239.Production_tra",
"L239.Production_reg_imp"))
} else if(command == driver.DECLARE_OUTPUTS) {
return(c("L2391.NG_export_calOutput_LNG",
"L2391.NG_export_calOutput_pipeline",
"L2391.NG_import_calOutput_LNG",
"L2391.NG_import_calOutput_pipeline",
"L2391.NG_import_calOutput_statdiff",
"L2391.NG_export_calOutput_statdiff"))
} else if(command == driver.MAKE) {
all_data <- list(...)[[1]]
year <- region <- supplysector <- subsector <- GCAM_Commodity <- GrossExp_EJ <-
calOutputValue <- subs.share.weight <- market.name <- minicam.energy.input <-
GrossImp_EJ <- Prod_EJ <- fuel <- technology <- primary.consumption <- PrimaryFuelCO2Coef.name <- PrimaryFuelCO2Coef <-
production <- consumption <- GCAM_region_ID <- NULL # silence package check notes
# ----------------------------------------
# Load required inputs
GCAM_region_names <- get_data(all_data, "common/GCAM_region_names", strip_attributes = TRUE)
GCAM_region_pipeline_bloc_export <- get_data(all_data, "energy/GCAM_region_pipeline_bloc_export", strip_attributes = TRUE)
GCAM_region_pipeline_bloc_import <- get_data(all_data, "energy/GCAM_region_pipeline_bloc_import", strip_attributes = TRUE)
L1011.ff_GrossTrade_EJ_R_Y_LNG <- get_data(all_data, "L1011.ff_GrossTrade_EJ_R_Y_LNG", strip_attributes = TRUE)
L1011.ff_GrossTrade_EJ_R_Y_NG_pipe <- get_data(all_data, "L1011.ff_GrossTrade_EJ_R_Y_NG_pipe", strip_attributes = TRUE)
L1011.ff_BilatTrade_EJ_R_Y_NG_pipe <- get_data(all_data, "L1011.ff_BilatTrade_EJ_R_Y_NG_pipe", strip_attributes = TRUE)
L239.Production_tra <- get_data(all_data, "L239.Production_tra", strip_attributes = TRUE) %>%
filter(grepl("gas", supplysector))
L239.Production_reg_imp <- get_data(all_data, "L239.Production_reg_imp", strip_attributes = TRUE) %>%
filter(grepl("gas", supplysector))
# ----------------------------------------
# Process data
# There are a lot of pieces to balance here. We have to maintain balance of:
# - regional natural gas imports and exports (across carriers)
# - global LNG imports and exports
# - pipeline exports and imports, globally and for inter-regional pipeline networks
# Expanded natural gas trade structure has greatest detail in terms of gas pipeline networks,
# so first calculate import and export shares by pipeline network.
# Second, disaggreate / balance imports data (between pipeline and LNG, and between pipeline networks)
# because it's the more complex piece (regions can import from multiple pipeline networks).
# Third, use share out exports (between pipeline and LNG) in a manner consistent with import data.
# Finally, re-balance regional exports with a statistical differences sector (explained below)
# to ensure that everything is balanced out.
# STEP 1: Pipeline shares
# Pipeline import shares by pipeline market
# Regions are permitted to import from multiple pipeline markets
L1011.ff_BilatTrade_EJ_R_Y_NG_pipe %>%
group_by(region = destination.region, year, GCAM_Commodity, GCAM_Commodity_traded, pipeline.market) %>%
summarise(value = sum(value)) %>%
ungroup() %>%
group_by(region, year, GCAM_Commodity, GCAM_Commodity_traded) %>%
mutate(share = value / sum(value)) %>%
ungroup() %>%
# COMPTRADE data (L1011) only extends back to 2007
# create complete set of historical years and carry 2007 shares backwards
complete(nesting(region, GCAM_Commodity, GCAM_Commodity_traded, pipeline.market), year = MODEL_BASE_YEARS) %>%
group_by(region, GCAM_Commodity, GCAM_Commodity_traded, pipeline.market) %>%
mutate(share = approx_fun(year, share, rule = 2)) %>%
ungroup() %>%
select(-value) -> L2391.NG_pipeline_import_shares
# STEP 2: Import shares
# Start with share between pipeline and LNG
# Combine pipeline & LNG imports, calculate shares
L1011.ff_GrossTrade_EJ_R_Y_LNG %>%
left_join_error_no_match(GCAM_region_names, by = "GCAM_region_ID") %>%
bind_rows(L1011.ff_GrossTrade_EJ_R_Y_NG_pipe) %>%
group_by(region, year, GCAM_Commodity, GCAM_Commodity_traded) %>%
summarise(GrossImp_EJ = sum(GrossImp_EJ)) %>%
ungroup() %>%
group_by(region, year, GCAM_Commodity) %>%
mutate(share = GrossImp_EJ / sum(GrossImp_EJ)) %>%
ungroup() %>%
# COMPTRADE data (L1011) only extends back to 2007
# create complete set of historical years and carry 2007 shares backwards
complete(nesting(region, GCAM_Commodity, GCAM_Commodity_traded), year = MODEL_BASE_YEARS) %>%
group_by(region, GCAM_Commodity, GCAM_Commodity_traded) %>%
mutate(share = approx_fun(year, share, rule = 2)) %>%
ungroup() %>%
select(-GrossImp_EJ) %>%
filter(year %in% MODEL_BASE_YEARS) -> L2391.NG_import_shares
# Partition calibrated imports by region between pipeline & LNG
# Join calibrated gross NG trade and shares, calculate calibrated value by trade vehicle
L2391.NG_import_shares%>%
left_join_error_no_match(L239.Production_reg_imp %>%
select(region, year, calOutputValue) ,
by = c("region", "year")) %>%
mutate(calOutputValue = share * calOutputValue) -> L2391.NG_import_calOutput
# Regional imports by LNG
L2391.NG_import_calOutput %>%
filter(GCAM_Commodity_traded == "LNG") %>%
select(-share) -> L2391.NG_import_calOutput_LNG
# Split out regional imports by pipeline network
# Map in pipeline market shares to get regional pipeline exports by market
# (Again, the default assumption is that regions are permitted to import from multiple pipeline markets)
L2391.NG_import_calOutput %>%
filter(GCAM_Commodity_traded == "gas pipeline") %>%
select(-share) %>%
# join in pipeline market info;
# join will duplicate rows because regions are permitted to import from multiple pipeline markets
# LJENM will throw error, so left_join is used
left_join(GCAM_region_pipeline_bloc_import %>%
select(region, pipeline.market),
by = c("region")) %>%
# regions with no pipeline exports won't have data points in L2391.NG_pipeline_import_shares
# this creates NAs when these tables are joined; LJENM throws an error, so left_join is used
left_join(L2391.NG_pipeline_import_shares,
by = c("region", "GCAM_Commodity", "GCAM_Commodity_traded",
"pipeline.market", "year")) %>%
replace_na(list(share = 0)) %>%
mutate(calOutputValue = share * calOutputValue) %>%
select(-share) -> L2391.NG_import_calOutput_pipeline
# Summarise global LNG imports
L2391.NG_import_calOutput_LNG %>%
group_by(year, GCAM_Commodity, GCAM_Commodity_traded) %>%
summarise(global_LNG_exp = sum(calOutputValue)) %>%
ungroup() -> L2391.NG_import_calOutput_LNG_global
# Summarise gas pipeline imports globally and by regional pipeline network
L2391.NG_import_calOutput_pipeline %>%
group_by(year, GCAM_Commodity, GCAM_Commodity_traded, pipeline.market) %>%
summarise(regional_pipe_exp = sum(calOutputValue)) %>%
ungroup() -> L2391.NG_import_calOutput_pipeline_network
# STEP 3: Export shares
# Start with share between pipeline and LNG
# Combine pipeline & LNG exports, calculate shares
L1011.ff_GrossTrade_EJ_R_Y_LNG %>%
left_join_error_no_match(GCAM_region_names, by = "GCAM_region_ID") %>%
bind_rows(L1011.ff_GrossTrade_EJ_R_Y_NG_pipe) %>%
group_by(region, year, GCAM_Commodity, GCAM_Commodity_traded) %>%
summarise(GrossExp_EJ = sum(GrossExp_EJ)) %>%
ungroup() %>%
group_by(region, year, GCAM_Commodity) %>%
mutate(share = GrossExp_EJ / sum(GrossExp_EJ)) %>%
ungroup() %>%
# COMPTRADE data (L1011) only extends back to 2007
# create complete set of historical years and carry 2007 shares backwards
complete(nesting(region, GCAM_Commodity, GCAM_Commodity_traded), year = MODEL_BASE_YEARS) %>%
group_by(region, GCAM_Commodity, GCAM_Commodity_traded) %>%
mutate(share = approx_fun(year, share, rule = 2)) %>%
ungroup() %>%
select(-GrossExp_EJ) %>%
filter(year %in% MODEL_BASE_YEARS) -> L2391.NG_export_shares
# Format traded data - exporting region is actually embedded in technology name
L239.Production_tra %>%
select(-region) %>%
mutate(region = gsub(" traded natural gas", "", technology)) %>%
select(region, year, calOutputValue) -> L2391.Production_tra
# Partition calibrated exports by region between pipeline & LNG
# Join calibrated gross NG trade and shares, calculate calibrated value by trade vehicle
L2391.NG_export_shares %>%
left_join_error_no_match(L2391.Production_tra, by = c("region", "year")) %>%
mutate(calOutputValue = share * calOutputValue) -> L2391.NG_export_unadjusted
# Join in import data and scale export data so import and export totals match at relevant scales
# (global for LNG, inter-regional network for pipeline)
# LNG
L2391.NG_export_unadjusted %>%
filter(GCAM_Commodity_traded == "LNG") %>%
left_join_error_no_match(L2391.NG_import_calOutput_LNG_global,
by = c("GCAM_Commodity", "GCAM_Commodity_traded", "year")) %>%
# summarize global LNG exports by year
group_by(year, GCAM_Commodity, GCAM_Commodity_traded) %>%
mutate(global_LNG_imp = sum(calOutputValue)) %>%
ungroup() %>%
# scale calOutputValue to ensure that global LNG exports = global LNG imports
mutate(calOutputValue = calOutputValue * (global_LNG_exp / global_LNG_imp)) %>%
# test that scaling worked
group_by(year, GCAM_Commodity, GCAM_Commodity_traded) %>%
mutate(global_LNG_imp = sum(calOutputValue)) %>%
ungroup() %>%
select(region, GCAM_Commodity, GCAM_Commodity_traded, year, calOutputValue) ->
L2391.NG_export_calOutput_LNG
# Pipeline
L2391.NG_export_unadjusted %>%
filter(GCAM_Commodity_traded == "gas pipeline") %>%
# join in pipeline market info
left_join_error_no_match(GCAM_region_pipeline_bloc_export, by = c("region" = "origin.region")) %>%
left_join_error_no_match(L2391.NG_import_calOutput_pipeline_network,
by = c("GCAM_Commodity", "GCAM_Commodity_traded", "year", "pipeline.market")) %>%
# summarize pipeline exports by year and network
group_by(year, GCAM_Commodity, GCAM_Commodity_traded, pipeline.market) %>%
mutate(regional_pipe_imp = sum(calOutputValue)) %>%
ungroup() %>%
# scale calOutputValue to ensure that exports = imports for each network
mutate(calOutputValue = calOutputValue * (regional_pipe_exp / regional_pipe_imp)) %>%
select(region, GCAM_Commodity, GCAM_Commodity_traded, year, calOutputValue, pipeline.market) ->
L2391.NG_export_calOutput_pipeline
# STEP 4: Check trade balances (across scales) and make final adjustments
# Combine flows with a single market (LNG imports and exports, pipeline exports)
L2391.NG_export_calOutput_pipeline %>%
select(region, year, pipeline.market, calExport_pipe = calOutputValue) %>%
left_join_error_no_match(L2391.NG_export_calOutput_LNG %>%
select(region, year, calExport_LNG = calOutputValue),
by = c("region", "year")) %>%
left_join_error_no_match(L2391.NG_import_calOutput_LNG %>%
select(region, year, calImport_LNG = calOutputValue),
by = c("region", "year")) -> L2391.LNG.exp.imp_pipe.exp
# Add in pipeline imports (which can come from multiple pipeline networks) as well as
# region total imports and exports which we need to match
L2391.NG_import_calOutput_pipeline %>%
select(region, year, pipeline.market, calImport_pipe = calOutputValue) %>%
# join will produce NAs because we don't want to duplicate LNG and pipeline export values
# by multiple import pipelines by region. LJENM throws error; left_join is used and
# NAs dealt with below
left_join(L2391.LNG.exp.imp_pipe.exp,
by = c("region", "year", "pipeline.market")) %>%
replace_na(list(calExport_pipe = 0,
calExport_LNG = 0,
calImport_LNG = 0)) %>%
left_join_error_no_match(L2391.Production_tra %>%
select(region, year, reg_NG_exports = calOutputValue),
by = c("region", "year")) %>%
left_join_error_no_match(L239.Production_reg_imp %>%
select(region, year, reg_NG_imports = calOutputValue),
by = c("region", "year")) -> L2391.LNG_pipe
# Check that all flows are balanced. At this point this is true for all flows except for regional exports.
# We'll calculate all balances here anyway for debugging purposes. We're checking:
# reg_NG_imports: sum of LNG and pipeline imports (should match calibrated value from L239 table)
# reg_NG_exports: sum of LNG and pipeline exports (should match calibrated value from L239 table)
# pipe_ntwrk: balance of imports and exports for each pipeline network (imports and exports should balance)
# global_LNG = global balance of LNG imports and exports (imports and exports should balance)
# global_pipe: global balance of pipeline inports and exports (imports and exports should balance)
L2391.LNG_pipe %>%
# total NG imports and exports
group_by(region, year) %>%
mutate(reg_NG_imports_check = sum(calImport_pipe) + sum(calImport_LNG),
reg_NG_imports_diff = round(reg_NG_imports - reg_NG_imports_check, energy.DIGITS_CALOUTPUT),
reg_NG_exports_check = sum(calExport_pipe) + sum(calExport_LNG),
reg_NG_exports_diff = round(reg_NG_exports - reg_NG_exports_check, energy.DIGITS_CALOUTPUT)) %>%
ungroup() %>%
# regional gas pipelines
group_by(pipeline.market, year) %>%
mutate(pipe_ntwrk_imp = sum(calImport_pipe),
pipe_ntwrk_exp = sum(calExport_pipe),
pipe_ntwrk_diff = round(pipe_ntwrk_imp - pipe_ntwrk_exp, energy.DIGITS_CALOUTPUT)) %>%
ungroup() %>%
# global totals by vehicle
group_by(year) %>%
mutate(global_LNG_imp = sum(calImport_LNG),
global_LNG_exp = sum(calExport_LNG),
global_LNG_diff = round(global_LNG_imp - global_LNG_exp, energy.DIGITS_CALOUTPUT),
global_pipe_imp = sum(calImport_pipe),
global_pipe_exp = sum(calExport_pipe),
global_pipe_diff = round(global_pipe_imp - global_pipe_exp, energy.DIGITS_CALOUTPUT)) %>%
ungroup() -> L2391.gas_flow_balances
# At this point everything is balanced except for regional exports.
# This is because GCAM's processing of the IEA data largely ignores stock changes
# (which are balanced globally but not at the regional / pipeline network level).
# so region X could export quantity Q to region Y, but some portion of Q increases stocks
# in region Y, rather than being consumed. So region X production and exports are correct,
# region Y imports and consumption are correct, but they don't balance because of the stock changes.
# To correct this, we'll create a "gas trade statistical differences" market where regions which
# don't export enough (positive reg_NG_exports_diff) supply and regions which export too much
# (negative reg_NG_exports_diff) demand. The latter regions will have their LNG exports reduced
# by the amount of the difference, and the global LNG market will import that amount from the
# "gas trade statistical differences" market.
L2391.gas_flow_balances %>%
distinct(region, year, reg_NG_exports_diff) -> L2391.gas_export_diff
# Regions that export more than allocated to pipeline & LNG
L2391.gas_export_diff %>%
filter(reg_NG_exports_diff > 0) %>%
complete(nesting(region), year = MODEL_BASE_YEARS) %>%
replace_na(list(reg_NG_exports_diff = 0)) %>%
rename(calOutputValue = reg_NG_exports_diff) %>%
mutate(sector = "gas trade statistical differences",
subsector = "statistical differences",
technology = paste0(region, " statistical differences"),
minicam.energy.input = "natural gas",
market.name = region,
region = gcam.USA_REGION) -> L2391.NG_export_calOutput_statdiff
# Regions that have excess LNG + pipeline exports
L2391.gas_export_diff %>%
filter(reg_NG_exports_diff < 0) %>%
complete(nesting(region), year = MODEL_BASE_YEARS) %>%
replace_na(list(reg_NG_exports_diff = 0)) %>%
rename(value = reg_NG_exports_diff) %>%
mutate(value = abs(value)) -> L2391.NG_import_calOutput_statdiff
# Divide export difference for each region between LNG & pipelines according to historical shares,
# and then adjust the corresponding calOutputValues accordingly. Thiss will keep any region's
# exports from going negative and keep LNG / pipeline shares relatively constant.
L2391.NG_import_calOutput_statdiff %>%
left_join_error_no_match(GCAM_region_pipeline_bloc_export, by = c("region" = "origin.region")) %>%
# L2391.NG_export_shares contains shares for LNG and pipeline
# join will duplicate rows by each carrier
# LJENM will error, left_join() is used
left_join(L2391.NG_export_shares, by = c("region", "year")) %>%
mutate(value = value * share) %>%
select(-share) -> L2391.NG_import_calOutput_adj
# Adjust regional export values by gas pipeline network
L2391.NG_export_calOutput_pipeline %>%
# not all regions are included in L2391.NG_import_calOutput_adj, so join produces NAs.
# LJENM throws error, so left_join is used and NAs are dealt with below
left_join(L2391.NG_import_calOutput_adj,
by = c("region", "GCAM_Commodity", "GCAM_Commodity_traded", "year", "pipeline.market")) %>%
replace_na(list(value = 0)) %>%
mutate(calOutputValue = calOutputValue - value) %>%
select(-value) -> L2391.NG_export_calOutput_pipeline
# Adjust regional LNG export values
L2391.NG_export_calOutput_LNG %>%
# not all regions are included in L2391.NG_import_calOutput_adj, so join produces NAs.
# LJENM throws error, so left_join is used and NAs are dealt with below
left_join(L2391.NG_import_calOutput_adj %>%
select(-pipeline.market),
by = c("region", "GCAM_Commodity", "GCAM_Commodity_traded", "year")) %>%
replace_na(list(value = 0)) %>%
mutate(calOutputValue = calOutputValue - value) %>%
select(-value) -> L2391.NG_export_calOutput_LNG
# Summarize regional pipeline / global LNG consumption from statistical differences sector
L2391.NG_import_calOutput_adj %>%
filter(GCAM_Commodity_traded == "gas pipeline") %>%
group_by(year, pipeline.market, GCAM_Commodity, GCAM_Commodity_traded) %>%
summarise(value = sum(value)) %>%
ungroup() %>%
mutate(technology = "statistical differences",
minicam.energy.input = "gas trade statistical differences",
market.name = gcam.USA_REGION) -> L2391.NG_import_calOutput_statdiff_pipe
L2391.NG_import_calOutput_adj %>%
filter(GCAM_Commodity_traded == "LNG") %>%
group_by(year, GCAM_Commodity, GCAM_Commodity_traded) %>%
summarise(value = sum(value)) %>%
ungroup() %>%
mutate(technology = "statistical differences",
minicam.energy.input = "gas trade statistical differences",
market.name = gcam.USA_REGION) -> L2391.NG_import_calOutput_statdiff_LNG
L2391.NG_import_calOutput_statdiff_pipe %>%
bind_rows(L2391.NG_import_calOutput_statdiff_LNG) %>%
rename(calOutputValue = value) -> L2391.NG_import_calOutput_statdiff
# # Checking global totals by year
# L2391.NG_export_calOutput_LNG %>%
# group_by(year) %>%
# summarise(value = sum(calOutputValue)) %>%
# ungroup() -> L2391.NG_export_calOutput_LNG_global
#
# L2391.NG_export_calOutput_pipeline %>%
# group_by(year) %>%
# summarise(value = sum(calOutputValue)) %>%
# ungroup() -> L2391.NG_export_calOutput_pipeline_global
#
# L2391.NG_import_calOutput_LNG %>%
# group_by(year) %>%
# summarise(value = sum(calOutputValue)) %>%
# ungroup() -> L2391.NG_import_calOutput_LNG_global
#
# L2391.NG_import_calOutput_pipeline %>%
# group_by(year) %>%
# summarise(value = sum(calOutputValue)) %>%
# ungroup() -> L2391.NG_import_calOutput_pipeline_global
#
# L2391.NG_import_calOutput_statdiff %>%
# group_by(year) %>%
# summarise(value = sum(calOutputValue)) %>%
# ungroup() -> L2391.NG_import_calOutput_statdiff_global
#
# L2391.NG_export_calOutput_statdiff %>%
# group_by(year) %>%
# summarise(value = sum(calOutputValue)) %>%
# ungroup() -> L2391.NG_export_calOutput_statdiff_global
# ----------------------------------------
# Produce outputs
L2391.NG_export_calOutput_LNG %>%
add_title("Technology calibration for LNG export") %>%
add_units("EJ") %>%
add_comments("Historical regional exports of natural gas via LNG") %>%
add_precursors("common/GCAM_region_names",
"L1011.ff_GrossTrade_EJ_R_Y_LNG",
"L1011.ff_GrossTrade_EJ_R_Y_NG_pipe",
"L239.Production_tra") ->
L2391.NG_export_calOutput_LNG
L2391.NG_export_calOutput_pipeline %>%
add_title("Technology calibration for gas pipeline export") %>%
add_units("EJ") %>%
add_comments("Regional exports of natural gas via pipeline") %>%
add_precursors("common/GCAM_region_names",
"energy/GCAM_region_pipeline_bloc_export",
"L1011.ff_GrossTrade_EJ_R_Y_LNG",
"L1011.ff_GrossTrade_EJ_R_Y_NG_pipe",
"L1011.ff_BilatTrade_EJ_R_Y_NG_pipe",
"L239.Production_tra") ->
L2391.NG_export_calOutput_pipeline
L2391.NG_import_calOutput_LNG %>%
add_title("Technology calibration for LNG import") %>%
add_units("EJ") %>%
add_comments("Regional imports of natural gas via LNG") %>%
add_precursors("common/GCAM_region_names",
"L1011.ff_GrossTrade_EJ_R_Y_LNG",
"L1011.ff_GrossTrade_EJ_R_Y_NG_pipe",
"L239.Production_reg_imp") ->
L2391.NG_import_calOutput_LNG
L2391.NG_import_calOutput_pipeline %>%
add_title("Technology calibration for gas pipeline import") %>%
add_units("EJ") %>%
add_comments("Regional imports of natural gas via pipeline") %>%
add_precursors("common/GCAM_region_names",
"energy/GCAM_region_pipeline_bloc_import",
"L1011.ff_GrossTrade_EJ_R_Y_LNG",
"L1011.ff_GrossTrade_EJ_R_Y_NG_pipe",
"L1011.ff_BilatTrade_EJ_R_Y_NG_pipe",
"L239.Production_reg_imp") ->
L2391.NG_import_calOutput_pipeline
L2391.NG_export_calOutput_statdiff %>%
add_title("Calibration values for supply to statistical differences sector") %>%
add_units("EJ") %>%
add_comments("Regions here have greater exports/year implied by IEA data compared to COMPTRADE") %>%
add_precursors("common/GCAM_region_names",
"energy/GCAM_region_pipeline_bloc_import",
"L1011.ff_GrossTrade_EJ_R_Y_LNG",
"L1011.ff_GrossTrade_EJ_R_Y_NG_pipe",
"L1011.ff_BilatTrade_EJ_R_Y_NG_pipe",
"L239.Production_reg_imp") ->
L2391.NG_export_calOutput_statdiff
L2391.NG_import_calOutput_statdiff %>%
add_title("Calibration values for consumption of statistical differences in historical LNG and regional pipeline sectors") %>%
add_units("EJ") %>%
add_comments("Regions here have fewer exports implied by IEA data compared to COMPTRADE") %>%
add_precursors("common/GCAM_region_names",
"energy/GCAM_region_pipeline_bloc_import",
"L1011.ff_GrossTrade_EJ_R_Y_LNG",
"L1011.ff_GrossTrade_EJ_R_Y_NG_pipe",
"L1011.ff_BilatTrade_EJ_R_Y_NG_pipe",
"L239.Production_reg_imp") ->
L2391.NG_import_calOutput_statdiff
return_data(L2391.NG_export_calOutput_LNG,
L2391.NG_export_calOutput_pipeline,
L2391.NG_import_calOutput_LNG,
L2391.NG_import_calOutput_pipeline,
L2391.NG_import_calOutput_statdiff,
L2391.NG_export_calOutput_statdiff)
} else {
stop("Unknown command")
}
}
|
#' @title Convert cluster assignment to settings format suitable for target gene prediction.
#'
#' @description \code{convert_cluster_to_settings} Convert cluster assignment to settings format suitable for target gene prediction.
#'
#' @usage
#' convert_cluster_to_settings(i, cluster_vector, setting_name, setting_from, background = NULL)
#'
#' @param i The cluster number of the cluster of interest to which genes should belong
#' @param cluster_vector Named vector containing the cluster number to which every gene belongs
#' @param setting_name Base name of the setting
#' @param setting_from Active ligands for the specific setting
#' @param background NULL or a character vector of genes belonging to the background. When NULL: the background will be formed by genes belonging to other clusters that the cluster of interest. Default NULL. If not NULL and genes present in the cluster of interest are in this vector of background gene names, these genes will be removed from the background.
#'
#' @return A list with following elements: $name (indicating the cluster id), $from, $response. $response is a gene-named logical vector indicating whether the gene is part of the respective cluster.
#'
#' @examples
#' \dontrun{
#' genes_clusters = c("TGFB1" = 1,"TGFB2" = 1,"TGFB3" = 2)
#' cluster_settings = lapply(seq(length(unique(genes_clusters))), convert_cluster_to_settings, cluster_vector = genes_clusters, setting_name = "example", setting_from = "BMP2")
#' }
#'
#' @export
#'
convert_cluster_to_settings = function(i, cluster_vector, setting_name, setting_from, background = NULL){
# input check
if(!is.numeric(i) | length(i) != 1 | i <= 0)
stop("i should be a number higher than 0")
if(!is.numeric(cluster_vector) | is.null(names(cluster_vector)))
stop("cluster_vector should be a named numeric vector")
if(!is.character(setting_name))
stop("setting_name should be a character vector")
if(!is.character(setting_from))
stop("setting_from should be a character vector")
if(!is.character(background) & !is.null(background))
stop("background should be a character vector or NULL")
requireNamespace("dplyr")
genes_cluster_oi = cluster_vector[cluster_vector == i] %>% names()
if (is.null(background)){
response = names(cluster_vector) %in% genes_cluster_oi
names(response) = names(cluster_vector)
} else {
background = background[(background %in% genes_cluster_oi) == FALSE]
background_logical = rep(FALSE,times = length(background))
names(background_logical) = background
cluster_logical = rep(TRUE,times = length(genes_cluster_oi))
names(cluster_logical) = genes_cluster_oi
response = c(background_logical,cluster_logical)
}
return(list(name = paste0(setting_name,"_cluster_",i), from = setting_from, response = response))
}
#' @title Predict activities of ligands in regulating expression of a gene set of interest
#'
#' @description \code{predict_ligand_activities} Predict activities of ligands in regulating expression of a gene set of interest. Ligand activities are defined as how well they predict the observed transcriptional response (i.e. gene set) according to the NicheNet model.
#'
#' @usage
#' predict_ligand_activities(geneset, background_expressed_genes,ligand_target_matrix, potential_ligands, single = TRUE,...)
#'
#' @param geneset Character vector of the gene symbols of genes of which the expression is potentially affected by ligands from the interacting cell.
#' @param background_expressed_genes Character vector of gene symbols of the background, non-affected, genes (can contain the symbols of the affected genes as well).
#' @param ligand_target_matrix The NicheNet ligand-target matrix denoting regulatory potential scores between ligands and targets (ligands in columns).
#' @param potential_ligands Character vector giving the gene symbols of the potentially active ligands you want to define ligand activities for.
#' @param single TRUE if you want to calculate ligand activity scores by considering every ligand individually (recommended). FALSE if you want to calculate ligand activity scores as variable importances of a multi-ligand classification model.
#' @param ... Additional parameters for get_multi_ligand_importances if single = FALSE.
#'
#' @return A tibble giving several ligand activity scores. Following columns in the tibble: $test_ligand, $auroc, $aupr and $pearson.
#'
#' @examples
#' \dontrun{
#' weighted_networks = construct_weighted_networks(lr_network, sig_network, gr_network,source_weights_df)
#' ligands = list("TNF","BMP2","IL4")
#' ligand_target_matrix = construct_ligand_target_matrix(weighted_networks, ligands, ltf_cutoff = 0, algorithm = "PPR", damping_factor = 0.5, secondary_targets = FALSE)
#' potential_ligands = c("TNF","BMP2","IL4")
#' geneset = c("SOCS2","SOCS3", "IRF1")
#' background_expressed_genes = c("SOCS2","SOCS3","IRF1","ICAM1","ID1","ID2","ID3")
#' ligand_activities = predict_ligand_activities(geneset = geneset, background_expressed_genes = background_expressed_genes, ligand_target_matrix = ligand_target_matrix, potential_ligands = potential_ligands)
#' }
#'
#' @export
#'
predict_ligand_activities = function(geneset,background_expressed_genes,ligand_target_matrix, potential_ligands, single = TRUE,...){
setting = list(geneset) %>%
lapply(convert_gene_list_settings_evaluation, name = "gene set", ligands_oi = potential_ligands, background = background_expressed_genes)
if (single == TRUE){
settings_ligand_prediction = setting %>%
convert_settings_ligand_prediction(all_ligands = potential_ligands, validation = FALSE, single = TRUE)
ligand_importances = settings_ligand_prediction %>% lapply(get_single_ligand_importances,ligand_target_matrix = ligand_target_matrix, known = FALSE) %>% bind_rows()
} else {
settings_ligand_prediction = setting %>%
convert_settings_ligand_prediction(all_ligands = potential_ligands, validation = FALSE, single = FALSE)
ligand_importances = settings_ligand_prediction %>% lapply(get_multi_ligand_importances,ligand_target_matrix = ligand_target_matrix, known = FALSE, ...) %>% bind_rows()
}
return(ligand_importances %>% select(test_ligand,auroc,aupr,aupr_corrected, pearson))
}
#' @title Infer weighted active ligand-target links between a possible ligand and target genes of interest
#'
#' @description \code{get_weighted_ligand_target_links} Infer active ligand target links between possible lignands and genes belonging to a gene set of interest: consider the intersect between the top n targets of a ligand and the gene set.
#'
#' @usage
#' get_weighted_ligand_target_links(ligand, geneset,ligand_target_matrix,n = 250)
#'
#' @param geneset Character vector of the gene symbols of genes of which the expression is potentially affected by ligands from the interacting cell.
#' @param ligand Character vector giving the gene symbols of the potentially active ligand for which you want to find target genes.
#' @param n The top n of targets per ligand that will be considered. Default: 250.
#' @inheritParams predict_ligand_activities
#'
#' @return A tibble with columns ligand, target and weight (i.e. regulatory potential score).
#'
#' @examples
#' \dontrun{
#' weighted_networks = construct_weighted_networks(lr_network, sig_network, gr_network,source_weights_df)
#' ligands = list("TNF","BMP2","IL4")
#' ligand_target_matrix = construct_ligand_target_matrix(weighted_networks, ligands, ltf_cutoff = 0, algorithm = "PPR", damping_factor = 0.5, secondary_targets = FALSE)
#' potential_ligand = "TNF"
#' geneset = c("SOCS2","SOCS3", "IRF1")
#' active_ligand_target_links_df = get_weighted_ligand_target_links(ligand = potential_ligand, geneset = geneset, ligand_target_matrix = ligand_target_matrix, n = 250)
#' }
#'
#' @export
#'
get_weighted_ligand_target_links = function(ligand, geneset,ligand_target_matrix,n = 250){
top_n_score = ligand_target_matrix[,ligand] %>% sort(decreasing = T) %>% head(n) %>% min()
targets = intersect(ligand_target_matrix[,ligand] %>% .[. >= top_n_score ] %>% names(),geneset)
if (length(targets) == 0){
ligand_target_weighted_df = tibble(ligand = ligand, target = NA, weight = NA)
} else if (length(targets) == 1) {
ligand_target_weighted_df = tibble(ligand = ligand, target = targets, weight = ligand_target_matrix[targets,ligand])
} else {
ligand_target_weighted_df = tibble(ligand = ligand, target = names(ligand_target_matrix[targets,ligand])) %>% inner_join(tibble(target = names(ligand_target_matrix[targets,ligand]), weight = ligand_target_matrix[targets,ligand]), by = "target")
}
return(ligand_target_weighted_df)
}
#' @title Prepare heatmap visualization of the ligand-target links starting from a ligand-target tibble.
#'
#' @description \code{prepare_ligand_target_visualization} Prepare heatmap visualization of the ligand-target links starting from a ligand-target tibble. Get regulatory potential scores between all pairs of ligands and targets documented in this tibble. For better visualization, we propose to define a quantile cutoff on the ligand-target scores.
#'
#' @usage
#' prepare_ligand_target_visualization(ligand_target_df, ligand_target_matrix, cutoff = 0.25)
#'
#' @param cutoff Quantile cutoff on the ligand-target scores of the input weighted ligand-target network. Scores under this cutoff will be set to 0.
#' @param ligand_target_df Tibble with columns 'ligand', 'target' and 'weight' to indicate ligand-target regulatory potential scores of interest.
#' @inheritParams predict_ligand_activities
#'
#' @return A matrix giving the ligand-target regulatory potential scores between ligands of interest and their targets genes part of the gene set of interest.
#'
#' @examples
#' \dontrun{
#' weighted_networks = construct_weighted_networks(lr_network, sig_network, gr_network,source_weights_df)
#' ligands = list("TNF","BMP2","IL4")
#' ligand_target_matrix = construct_ligand_target_matrix(weighted_networks, ligands, ltf_cutoff = 0, algorithm = "PPR", damping_factor = 0.5, secondary_targets = FALSE)
#' geneset = c("SOCS2","SOCS3", "IRF1")
#' background_expressed_genes = c("SOCS2","SOCS3","IRF1","ICAM1","ID1","ID2","ID3")
#' active_ligand_target_links_df = potential_ligands %>% lapply(get_weighted_ligand_target_links, geneset = geneset, ligand_target_matrix = ligand_target_matrix, n = 250) %>% bind_rows()
#' active_ligand_target_links = prepare_ligand_target_visualization(ligand_target_df = active_ligand_target_links_df, ligand_target_matrix = ligand_target_matrix, cutoff = 0.25)
#' }
#'
#' @export
#'
prepare_ligand_target_visualization = function(ligand_target_df, ligand_target_matrix, cutoff = 0.25){
# define a cutoff on the ligand-target links
cutoff_include_all_ligands = ligand_target_df$weight %>% quantile(cutoff)
# give a score of 0 to ligand-target links not higher than the defined cutoff
ligand_target_matrix_oi = ligand_target_matrix
ligand_target_matrix_oi[ligand_target_matrix_oi < cutoff_include_all_ligands] = 0
# consider only targets belonging to the top250 targets of individual ligands and with at least one ligand-link with score higher than the defined cutoff
ligand_target_vis = ligand_target_matrix_oi[ligand_target_df$target %>% unique(),ligand_target_df$ligand %>% unique()]
dim(ligand_target_vis) = c(length(ligand_target_df$target %>% unique()), length(ligand_target_df$ligand %>% unique()))
all_targets = ligand_target_df$target %>% unique()
all_ligands = ligand_target_df$ligand %>% unique()
rownames(ligand_target_vis) = all_targets
colnames(ligand_target_vis) = all_ligands
keep_targets = all_targets[ligand_target_vis %>% apply(1,sum) > 0]
keep_ligands = all_ligands[ligand_target_vis %>% apply(2,sum) > 0]
ligand_target_vis_filtered = ligand_target_vis[keep_targets,keep_ligands]
if(is.matrix(ligand_target_vis_filtered)){
rownames(ligand_target_vis_filtered) = keep_targets
colnames(ligand_target_vis_filtered) = keep_ligands
} else {
dim(ligand_target_vis_filtered) = c(length(keep_targets), length(keep_ligands))
rownames(ligand_target_vis_filtered) = keep_targets
colnames(ligand_target_vis_filtered) = keep_ligands
}
if(nrow(ligand_target_vis_filtered) > 1 & ncol(ligand_target_vis_filtered) > 1){
distoi = dist(1-cor(t(ligand_target_vis_filtered)))
hclust_obj = hclust(distoi, method = "ward.D2")
order_targets = hclust_obj$labels[hclust_obj$order]
distoi_targets = dist(1-cor(ligand_target_vis_filtered))
hclust_obj = hclust(distoi_targets, method = "ward.D2")
order_ligands = hclust_obj$labels[hclust_obj$order]
} else {
order_targets = rownames(ligand_target_vis_filtered)
order_ligands = colnames(ligand_target_vis_filtered)
}
vis_ligand_target_network = ligand_target_vis_filtered[order_targets,order_ligands]
dim(vis_ligand_target_network) = c(length(order_targets), length(order_ligands))
rownames(vis_ligand_target_network) = order_targets
colnames(vis_ligand_target_network) = order_ligands
return(vis_ligand_target_network)
}
#' @title Assess probability that a target gene belongs to the geneset based on a multi-ligand random forest model
#'
#' @description \code{assess_rf_class_probabilities} Assess probability that a target gene belongs to the geneset based on a multi-ligand random forest model (with cross-validation). Target genes and background genes will be split in different groups in a stratified way.
#'
#' @usage
#' assess_rf_class_probabilities(round,folds,geneset,background_expressed_genes,ligands_oi,ligand_target_matrix)
#'
#' @param ligands_oi Character vector giving the gene symbols of the ligands you want to build the multi-ligand with.
#' @param round Integer describing which fold of the cross-validation scheme it is.
#' @param folds Integer describing how many folds should be used.
#' @inheritParams predict_ligand_activities
#'
#' @return A tibble with columns: $gene, $response, $prediction. Response indicates whether the gene belongs to the geneset of interest, prediction gives the probability this gene belongs to the geneset according to the random forest model.
#'
#' @examples
#' \dontrun{
#' weighted_networks = construct_weighted_networks(lr_network, sig_network, gr_network,source_weights_df)
#' ligands = list("TNF","BMP2","IL4")
#' ligand_target_matrix = construct_ligand_target_matrix(weighted_networks, ligands, ltf_cutoff = 0, algorithm = "PPR", damping_factor = 0.5, secondary_targets = FALSE)
#' potential_ligands = c("TNF","BMP2","IL4")
#' geneset = c("SOCS2","SOCS3", "IRF1")
#' background_expressed_genes = c("SOCS2","SOCS3","IRF1","ICAM1","ID1","ID2","ID3")
#' fold1_rf_prob = assess_rf_class_probabilities(round = 1,folds = 2,geneset = geneset,background_expressed_genes = background_expressed_genes ,ligands_oi = potential_ligands,ligand_target_matrix = ligand_target_matrix)
#' }
#'
#' @export
#'
assess_rf_class_probabilities = function(round,folds,geneset,background_expressed_genes,ligands_oi, ligand_target_matrix){
set.seed(round)
geneset_shuffled = sample(geneset, size = length(geneset))
geneset_grouped = split(geneset_shuffled,1:folds)
strict_background_expressed_genes = background_expressed_genes[!background_expressed_genes %in% geneset]
set.seed(round)
strict_background_expressed_genes_shuffled = sample(strict_background_expressed_genes, size = length(strict_background_expressed_genes))
strict_background_expressed_genes_grouped = split(strict_background_expressed_genes_shuffled,1:folds)
geneset_predictions_all = seq(length(geneset_grouped)) %>% lapply(rf_target_prediction,geneset_grouped,strict_background_expressed_genes_grouped,ligands_oi,ligand_target_matrix) %>% bind_rows()
geneset_predictions_all = geneset_predictions_all %>% mutate(response = gsub("\\.","",response) %>% as.logical())
}
#' @title Assess how well classification predictions accord to the expected response
#'
#' @description \code{classification_evaluation_continuous_pred_wrapper} Assess how well classification predictions accord to the expected response.
#'
#' @usage
#' classification_evaluation_continuous_pred_wrapper(response_prediction_tibble)
#'
#' @param response_prediction_tibble Tibble with columns "response" and "prediction" (e.g. output of function `assess_rf_class_probabilities`)
#'
#' @return A tibble showing several classification evaluation metrics.
#'
#' @examples
#' \dontrun{
#' weighted_networks = construct_weighted_networks(lr_network, sig_network, gr_network,source_weights_df)
#' ligands = list("TNF","BMP2","IL4")
#' ligand_target_matrix = construct_ligand_target_matrix(weighted_networks, ligands, ltf_cutoff = 0, algorithm = "PPR", damping_factor = 0.5, secondary_targets = FALSE)
#' potential_ligands = c("TNF","BMP2","IL4")
#' geneset = c("SOCS2","SOCS3", "IRF1")
#' background_expressed_genes = c("SOCS2","SOCS3","IRF1","ICAM1","ID1","ID2","ID3")
#' fold1_rf_prob = assess_rf_class_probabilities(round = 1,folds = 2,geneset = geneset,background_expressed_genes = background_expressed_genes ,ligands_oi = potential_ligands,ligand_target_matrix = ligand_target_matrix)
# classification_evaluation_continuous_pred_wrapper(fold1_rf_prob)
#' }
#'
#' @export
#'
classification_evaluation_continuous_pred_wrapper = function(response_prediction_tibble) {
prediction_performances = classification_evaluation_continuous_pred(response_prediction_tibble$prediction, response_prediction_tibble$response, iregulon = FALSE)
return(prediction_performances)
}
#' @title Find which genes were among the top-predicted targets genes in a specific cross-validation round and see whether these genes belong to the gene set of interest as well.
#'
#' @description \code{get_top_predicted_genes} Find which genes were among the top-predicted targets genes in a specific cross-validation round and see whether these genes belong to the gene set of interest as well.
#'
#' @usage
#' get_top_predicted_genes(round,gene_prediction_list, quantile_cutoff = 0.95)
#'
#' @param gene_prediction_list List with per round of cross-validation: a tibble with columns "gene", "prediction" and "response" (e.g. output of function `assess_rf_class_probabilities`)
#' @param round Integer describing which fold of the cross-validation scheme it is.
#' @param quantile_cutoff Quantile of which genes should be considered as top-predicted targets. Default: 0.95, thus considering the top 5 percent predicted genes as predicted targets.
#'
#' @return A tibble indicating for every gene whether it belongs to the geneset and whether it belongs to the top-predicted genes in a specific cross-validation round.
#'
#' @examples
#' \dontrun{
#' weighted_networks = construct_weighted_networks(lr_network, sig_network, gr_network,source_weights_df)
#' ligands = list("TNF","BMP2","IL4")
#' ligand_target_matrix = construct_ligand_target_matrix(weighted_networks, ligands, ltf_cutoff = 0, algorithm = "PPR", damping_factor = 0.5, secondary_targets = FALSE)
#' potential_ligands = c("TNF","BMP2","IL4")
#' geneset = c("SOCS2","SOCS3", "IRF1")
#' background_expressed_genes = c("SOCS2","SOCS3","IRF1","ICAM1","ID1","ID2","ID3")
#' gene_predictions_list = seq(2) %>% lapply(assess_rf_class_probabilities,2, geneset = geneset,background_expressed_genes = background_expressed_genes,ligands_oi = potential_ligands,ligand_target_matrix = ligand_target_matrix)
#' seq(length(gene_predictions_list)) %>% lapply(get_top_predicted_genes,gene_predictions_list)
#' }
#'
#' @export
#'
get_top_predicted_genes = function(round,gene_prediction_list, quantile_cutoff = 0.95){
affected_gene_predictions = gene_prediction_list[[round]]
predicted_positive = affected_gene_predictions %>%
arrange(-prediction) %>%
mutate(predicted_top_target = prediction >= quantile(prediction,quantile_cutoff)) %>%
filter(predicted_top_target) %>% rename(true_target = response) %>%
select(gene,true_target,predicted_top_target)
colnames(predicted_positive) = c("gene","true_target",paste0("predicted_top_target_round",round))
return(predicted_positive)
}
#' @title Determine the fraction of genes belonging to the geneset or background and to the top-predicted genes.
#'
#' @description \code{calculate_fraction_top_predicted} Defines the fraction of genes belonging to the geneset or background and to the top-predicted genes.
#'
#' @usage
#' calculate_fraction_top_predicted(affected_gene_predictions, quantile_cutoff = 0.95)
#'
#' @param affected_gene_predictions Tibble with columns "gene", "prediction" and "response" (e.g. output of function `assess_rf_class_probabilities`)
#' @param quantile_cutoff Quantile of which genes should be considered as top-predicted targets. Default: 0.95, thus considering the top 5 percent predicted genes as predicted targets.
#'
#' @return A tibble indicating the number of genes belonging to the gene set of interest or background (true_target column), the number and fraction of genes of these gruops that were part of the top predicted targets in a specific cross-validation round.
#'
#' @examples
#' \dontrun{
#' weighted_networks = construct_weighted_networks(lr_network, sig_network, gr_network,source_weights_df)
#' ligands = list("TNF","BMP2","IL4")
#' ligand_target_matrix = construct_ligand_target_matrix(weighted_networks, ligands, ltf_cutoff = 0, algorithm = "PPR", damping_factor = 0.5, secondary_targets = FALSE)
#' potential_ligands = c("TNF","BMP2","IL4")
#' geneset = c("SOCS2","SOCS3", "IRF1")
#' background_expressed_genes = c("SOCS2","SOCS3","IRF1","ICAM1","ID1","ID2","ID3")
#' gene_predictions_list = seq(2) %>% lapply(assess_rf_class_probabilities,2, geneset = geneset,background_expressed_genes = background_expressed_genes,ligands_oi = potential_ligands,ligand_target_matrix = ligand_target_matrix)
#' target_prediction_performances_discrete_cv = gene_predictions_list %>% lapply(calculate_fraction_top_predicted) %>% bind_rows() %>% ungroup() %>% mutate(round=rep(1:length(gene_predictions_list), each = 2))
#' }
#'
#' @export
#'
calculate_fraction_top_predicted = function(affected_gene_predictions, quantile_cutoff = 0.95){
predicted_positive = affected_gene_predictions %>% arrange(-prediction) %>% filter(prediction >= quantile(prediction,quantile_cutoff)) %>% group_by(response) %>% count() %>% rename(positive_prediction = n) %>% rename(true_target = response)
all = affected_gene_predictions %>% arrange(-prediction) %>% rename(true_target = response) %>% group_by(true_target) %>% count()
inner_join(all,predicted_positive, by = "true_target") %>% mutate(fraction_positive_predicted = positive_prediction/n)
}
#' @title Perform a Fisher's exact test to determine whether genes belonging to the gene set of interest are more likely to be part of the top-predicted targets.
#'
#' @description \code{calculate_fraction_top_predicted_fisher} Performs a Fisher's exact test to determine whether genes belonging to the gene set of interest are more likely to be part of the top-predicted targets.
#'
#' @usage
#' calculate_fraction_top_predicted_fisher(affected_gene_predictions, quantile_cutoff = 0.95, p_value_output = TRUE)
#'
#' @param p_value_output Should total summary or p-value be returned as output? Default: TRUE.
#' @inheritParams calculate_fraction_top_predicted
#'
#' @return Summary of the Fisher's exact test or just the p-value
#'
#' @examples
#' \dontrun{
#' weighted_networks = construct_weighted_networks(lr_network, sig_network, gr_network,source_weights_df)
#' ligands = list("TNF","BMP2","IL4")
#' ligand_target_matrix = construct_ligand_target_matrix(weighted_networks, ligands, ltf_cutoff = 0, algorithm = "PPR", damping_factor = 0.5, secondary_targets = FALSE)
#' potential_ligands = c("TNF","BMP2","IL4")
#' geneset = c("SOCS2","SOCS3", "IRF1")
#' background_expressed_genes = c("SOCS2","SOCS3","IRF1","ICAM1","ID1","ID2","ID3")
#' gene_predictions_list = seq(2) %>% lapply(assess_rf_class_probabilities,2, geneset = geneset,background_expressed_genes = background_expressed_genes,ligands_oi = potential_ligands,ligand_target_matrix = ligand_target_matrix)
#' target_prediction_performances_fisher_pval = gene_predictions_list %>% lapply(calculate_fraction_top_predicted_fisher) %>% unlist() %>% mean()
#' }
#'
#' @export
#'
calculate_fraction_top_predicted_fisher = function(affected_gene_predictions, quantile_cutoff = 0.95, p_value_output = TRUE){
predicted_positive = affected_gene_predictions %>% arrange(-prediction) %>% filter(prediction >= quantile(prediction,quantile_cutoff)) %>% group_by(response) %>% count() %>% rename(positive_prediction = n)
all = affected_gene_predictions %>% arrange(-prediction) %>% group_by(response) %>% count()
results_df = left_join(all, predicted_positive, by="response") %>% mutate(positive_prediction = replace_na(positive_prediction, 0))
tp = results_df %>% filter(response == TRUE) %>% .$positive_prediction
fp = results_df %>% filter(response == FALSE) %>% .$positive_prediction
fn = (results_df %>% filter(response == TRUE) %>% .$n) - (results_df %>% filter(response == TRUE) %>% .$positive_prediction)
tn = (results_df %>% filter(response == FALSE) %>% .$n) - (results_df %>% filter(response == FALSE) %>% .$positive_prediction)
contingency_table = matrix(c(tp,fp,fn,tn), nrow = 2,dimnames = list(c("geneset", "background"), c("top-predicted", "no-top-predicted")))
summary = fisher.test(contingency_table, alternative = "greater")
if(p_value_output == TRUE){
return(summary$p.value)
} else {
return(summary)
}
}
#' @title Cut off outer quantiles and rescale to a [0, 1] range
#'
#' @description \code{scale_quantile} Cut off outer quantiles and rescale to a [0, 1] range
#'
#' @usage
#' scale_quantile(x, outlier_cutoff = .05)
#'
#' @param x A numeric vector, matrix or data frame.
#' @param outlier_cutoff The quantile cutoff for outliers (default 0.05).
#'
#' @return The centered, scaled matrix or vector. The numeric centering and scalings used are returned as attributes.
#'
#' @examples
#' \dontrun{
#' ## Generate a matrix from a normal distribution
#' ## with a large standard deviation, centered at c(5, 5)
#' x <- matrix(rnorm(200*2, sd = 10, mean = 5), ncol = 2)
#'
#' ## Scale the dataset between [0,1]
#' x_scaled <- scale_quantile(x)
#'
#' ## Show ranges of each column
#' apply(x_scaled, 2, range)
#' }
#' @export
scale_quantile <- function(x, outlier_cutoff = .05) {
# same function as scale_quantile from dynutils (copied here for use in vignette to avoid having dynutils as dependency)
# credits to the amazing (w/z)outer and r(obrecht)cannood(t) from dynverse (https://github.com/dynverse)!
if (is.null(dim(x))) {
sc <- scale_quantile(matrix(x, ncol = 1), outlier_cutoff = outlier_cutoff)
out <- sc[,1]
names(out) <- names(x)
attr(out, "addend") <- attr(sc, "addend")
attr(out, "multiplier") <- attr(sc, "multiplier")
out
} else {
quants <- apply(x, 2, stats::quantile, c(outlier_cutoff, 1 - outlier_cutoff), na.rm = TRUE)
addend <- -quants[1,]
divisor <- apply(quants, 2, diff)
divisor[divisor == 0] <- 1
apply_quantile_scale(x, addend, 1 / divisor)
}
}
#' @title Prepare single-cell expression data to perform ligand activity analysis
#'
#' @description \code{convert_single_cell_expression_to_settings} Prepare single-cell expression data to perform ligand activity analysis
#'
#' @usage
#' convert_single_cell_expression_to_settings(cell_id, expression_matrix, setting_name, setting_from, regression = FALSE)
#'
#' @param cell_id Identity of the cell of interest
#' @param setting_name Name of the dataset
#' @param expression_matrix Gene expression matrix of single-cells
#' @param setting_from Character vector giving the gene symbols of the potentially active ligands you want to define ligand activities for.
#' @param regression Perform regression-based ligand activity analysis (TRUE) or classification-based ligand activity analysis (FALSE) by considering the genes expressed higher than the 0.975 quantiles as genes of interest. Default: FALSE.
#'
#' @return A list with slots $name, $from and $response respectively containing the setting name, potentially active ligands and the response to predict (whether genes belong to gene set of interest; i.e. most strongly expressed genes in a cell)
#'
#' @examples
#' \dontrun{
#' weighted_networks = construct_weighted_networks(lr_network, sig_network, gr_network,source_weights_df)
#' ligands = list("TNF","BMP2","IL4")
#' ligand_target_matrix = construct_ligand_target_matrix(weighted_networks, ligands, ltf_cutoff = 0, algorithm = "PPR", damping_factor = 0.5, secondary_targets = FALSE)
#' potential_ligands = c("TNF","BMP2","IL4")
#' genes = c("SOCS2","SOCS3","IRF1","ICAM1","ID1","ID2","ID3")
#' cell_ids = c("cell1","cell2")
#' expression_scaled = matrix(rnorm(length(genes)*2, sd = 0.5, mean = 0.5), nrow = 2)
#' rownames(expression_scaled) = cell_ids
#' colnames(expression_scaled) = genes
#' settings = convert_single_cell_expression_to_settings(cell_id = cell_ids[1], expression_matrix = expression_scaled, setting_name = "test", setting_from = potential_ligands)
#' }
#'
#' @export
#'
convert_single_cell_expression_to_settings = function(cell_id, expression_matrix, setting_name, setting_from, regression = FALSE){
# input check
requireNamespace("dplyr")
if (regression == TRUE){
response = expression_matrix[cell_id,]
} else {
response_continuous = expression_matrix[cell_id,]
response = response_continuous >= quantile(response_continuous,0.975)
}
return(list(name = paste0(setting_name,"_",cell_id), from = setting_from, response = response))
}
#' @title Single-cell ligand activity prediction
#'
#' @description \code{predict_single_cell_ligand_activities} For every individual cell of interest, predict activities of ligands in regulating expression of genes that are stronger expressed in that cell compared to other cells (0.975 quantile). Ligand activities are defined as how well they predict the observed transcriptional response (i.e. gene set) according to the NicheNet model.
#'
#' @usage
#' predict_single_cell_ligand_activities(cell_ids, expression_scaled,ligand_target_matrix, potential_ligands, single = TRUE,...)
#'
#' @param cell_ids Identities of cells for which the ligand activities should be calculated.
#' @param expression_scaled Scaled expression matrix of single-cells (scaled such that high values indicate that a gene is stronger expressed in that cell compared to others)
#' @param ligand_target_matrix The NicheNet ligand-target matrix denoting regulatory potential scores between ligands and targets (ligands in columns).
#' @param potential_ligands Character vector giving the gene symbols of the potentially active ligands you want to define ligand activities for.
#' @param single TRUE if you want to calculate ligand activity scores by considering every ligand individually (recommended). FALSE if you want to calculate ligand activity scores as variable importances of a multi-ligand classification model.
#' @param ... Additional parameters for get_multi_ligand_importances if single = FALSE.
#'
#' @return A tibble giving several ligand activity scores for single cells. Following columns in the tibble: $setting, $test_ligand, $auroc, $aupr and $pearson.
#'
#' @examples
#' \dontrun{
#' weighted_networks = construct_weighted_networks(lr_network, sig_network, gr_network,source_weights_df)
#' ligands = list("TNF","BMP2","IL4")
#' ligand_target_matrix = construct_ligand_target_matrix(weighted_networks, ligands, ltf_cutoff = 0, algorithm = "PPR", damping_factor = 0.5, secondary_targets = FALSE)
#' potential_ligands = c("TNF","BMP2","IL4")
#' genes = c("SOCS2","SOCS3","IRF1","ICAM1","ID1","ID2","ID3")
#' cell_ids = c("cell1","cell2")
#' expression_scaled = matrix(rnorm(length(genes)*2, sd = 0.5, mean = 0.5), nrow = 2)
#' rownames(expression_scaled) = cell_ids
#' colnames(expression_scaled) = genes
#' ligand_activities = predict_single_cell_ligand_activities(cell_ids = cell_ids, expression_scaled = expression_scaled, ligand_target_matrix = ligand_target_matrix, potential_ligands = potential_ligands)
#' }
#'
#' @export
#'
predict_single_cell_ligand_activities = function(cell_ids, expression_scaled,ligand_target_matrix, potential_ligands, single = TRUE,...){
settings_single_cell_ligand_pred = cell_ids %>% lapply(convert_single_cell_expression_to_settings, expression_scaled, "", potential_ligands)
if (single == TRUE){
settings_ligand_prediction = settings_single_cell_ligand_pred %>% convert_settings_ligand_prediction(all_ligands = potential_ligands, validation = FALSE, single = TRUE)
ligand_importances = settings_ligand_prediction %>% lapply(get_single_ligand_importances,ligand_target_matrix = ligand_target_matrix, known = FALSE) %>% bind_rows() %>% mutate(setting = gsub("^_","",setting))
} else {
settings_ligand_prediction = settings_single_cell_ligand_pred %>% convert_settings_ligand_prediction(all_ligands = potential_ligands, validation = FALSE, single = FALSE)
ligand_importances = settings_ligand_prediction %>% lapply(get_multi_ligand_importances,ligand_target_matrix = ligand_target_matrix, known = FALSE, ...) %>% bind_rows() %>% mutate(setting = gsub("^_","",setting))
}
return(ligand_importances %>% select(setting,test_ligand,auroc,aupr,pearson))
}
#' @title Normalize single-cell ligand activities
#'
#' @description \code{normalize_single_cell_ligand_activities} Normalize single-cell ligand activities to make ligand activities over different cells comparable.
#' @usage
#' normalize_single_cell_ligand_activities(ligand_activities)
#'
#' @param ligand_activities Output from the function `predict_single_cell_ligand_activities`.
#'
#' @return A tibble giving the normalized ligand activity scores for single cells. Following columns in the tibble: $cell, $ligand, $pearson, which is the normalized ligand activity value.
#'
#' @examples
#' \dontrun{
#' weighted_networks = construct_weighted_networks(lr_network, sig_network, gr_network,source_weights_df)
#' ligands = list("TNF","BMP2","IL4")
#' ligand_target_matrix = construct_ligand_target_matrix(weighted_networks, ligands, ltf_cutoff = 0, algorithm = "PPR", damping_factor = 0.5, secondary_targets = FALSE)
#' potential_ligands = c("TNF","BMP2","IL4")
#' genes = c("SOCS2","SOCS3","IRF1","ICAM1","ID1","ID2","ID3")
#' cell_ids = c("cell1","cell2")
#' expression_scaled = matrix(rnorm(length(genes)*2, sd = 0.5, mean = 0.5), nrow = 2)
#' rownames(expression_scaled) = cell_ids
#' colnames(expression_scaled) = genes
#' ligand_activities = predict_single_cell_ligand_activities(cell_ids = cell_ids, expression_scaled = expression_scaled, ligand_target_matrix = ligand_target_matrix, potential_ligands = potential_ligands)
#' normalized_ligand_activities = normalize_single_cell_ligand_activities(ligand_activities)
#' }
#'
#' @export
#'
normalize_single_cell_ligand_activities = function(ligand_activities){
single_ligand_activities_aupr_norm = ligand_activities %>%
group_by(setting) %>%
mutate(aupr = nichenetr::scaling_modified_zscore(aupr)) %>%
ungroup() %>%
rename(cell = setting, ligand = test_ligand) %>%
distinct(cell,ligand,aupr)
single_ligand_activities_aupr_norm_df = single_ligand_activities_aupr_norm %>%
spread(cell, aupr,fill = min(.$aupr))
single_ligand_activities_aupr_norm_matrix = single_ligand_activities_aupr_norm_df %>%
select(-ligand) %>%
t() %>%
magrittr::set_colnames(single_ligand_activities_aupr_norm_df$ligand)
single_ligand_activities_aupr_norm_df = single_ligand_activities_aupr_norm_matrix %>%
data.frame() %>%
rownames_to_column("cell") %>%
as_tibble()
}
#' @title Perform a correlation and regression analysis between cells' ligand activities and property scores of interest
#'
#' @description \code{single_ligand_activity_score_regression} Performs a correlation and regression analysis between cells' ligand activities and property scores of interest.
#' @usage
#' single_ligand_activity_score_regression(ligand_activities, scores_tbl)
#'
#' @param ligand_activities Output from the function `normalize_single_cell_ligand_activities`.
#' @param scores_tbl a tibble containing scores for every cell (columns: $cell and $score). The score should correspond to the property of interest
#'
#' @return A tibble giving for every ligand, the correlation/regression coefficients giving information about the relation between its activity and the property of interest.
#'
#' @examples
#' \dontrun{
#' weighted_networks = construct_weighted_networks(lr_network, sig_network, gr_network,source_weights_df)
#' ligands = list("TNF","BMP2","IL4")
#' ligand_target_matrix = construct_ligand_target_matrix(weighted_networks, ligands, ltf_cutoff = 0, algorithm = "PPR", damping_factor = 0.5, secondary_targets = FALSE)
#' potential_ligands = c("TNF","BMP2","IL4")
#' genes = c("SOCS2","SOCS3","IRF1","ICAM1","ID1","ID2","ID3")
#' cell_ids = c("cell1","cell2")
#' expression_scaled = matrix(rnorm(length(genes)*2, sd = 0.5, mean = 0.5), nrow = 2)
#' rownames(expression_scaled) = cell_ids
#' colnames(expression_scaled) = genes
#' ligand_activities = predict_single_cell_ligand_activities(cell_ids = cell_ids, expression_scaled = expression_scaled, ligand_target_matrix = ligand_target_matrix, potential_ligands = potential_ligands)
#' normalized_ligand_activities = normalize_single_cell_ligand_activities(ligand_activities)
#' cell_scores_tbl = tibble(cell = cell_ids, score = c(1,4))
#' regression_analysis_output = single_ligand_activity_score_regression(normalized_ligand_activities,cell_scores_tbl)
#' }
#'
#' @export
#'
single_ligand_activity_score_regression = function(ligand_activities, scores_tbl){
combined = inner_join(scores_tbl,ligand_activities)
output = lapply(combined %>% select(-cell, -score), function(activity_prediction, combined){
geneset_score = combined$score
metrics = regression_evaluation(activity_prediction,geneset_score)
}, combined)
ligands = names(output)
output_df = output %>% bind_rows() %>% mutate(ligand = ligands)
return(output_df)
}
#' @title Assess how well cells' ligand activities predict a binary property of interest of cells.
#'
#' @description \code{single_ligand_activity_score_classification} Evaluates classification performances: it assesses how well cells' ligand activities can predict a binary property of interest.
#' @usage
#' single_ligand_activity_score_classification(ligand_activities, scores_tbl)
#'
#' @param ligand_activities Output from the function `normalize_single_cell_ligand_activities`.
#' @param scores_tbl a tibble indicating for every cell whether the property of interests holds TRUE or FALSE (columns: $cell: character vector with cell ids and $score: logical vector according to property of interest).
#'
#' @return A tibble giving for every ligand, the classification performance metrics giving information about the relation between its activity and the property of interest.
#'
#' @examples
#' \dontrun{
#' weighted_networks = construct_weighted_networks(lr_network, sig_network, gr_network,source_weights_df)
#' ligands = list("TNF","BMP2","IL4")
#' ligand_target_matrix = construct_ligand_target_matrix(weighted_networks, ligands, ltf_cutoff = 0, algorithm = "PPR", damping_factor = 0.5, secondary_targets = FALSE)
#' potential_ligands = c("TNF","BMP2","IL4")
#' genes = c("SOCS2","SOCS3","IRF1","ICAM1","ID1","ID2","ID3")
#' cell_ids = c("cell1","cell2")
#' expression_scaled = matrix(rnorm(length(genes)*2, sd = 0.5, mean = 0.5), nrow = 2)
#' rownames(expression_scaled) = cell_ids
#' colnames(expression_scaled) = genes
#' ligand_activities = predict_single_cell_ligand_activities(cell_ids = cell_ids, expression_scaled = expression_scaled, ligand_target_matrix = ligand_target_matrix, potential_ligands = potential_ligands)
#' normalized_ligand_activities = normalize_single_cell_ligand_activities(ligand_activities)
#' cell_scores_tbl = tibble(cell = cell_ids, score = c(TRUE,FALSE))
#' classification_analysis_output = single_ligand_activity_score_classification(normalized_ligand_activities,cell_scores_tbl)
#' }
#'
#' @export
#'
single_ligand_activity_score_classification = function(ligand_activities, scores_tbl){
combined = inner_join(scores_tbl, ligand_activities)
output = lapply(combined %>% select(-cell, -score), function(activity_prediction,
combined) {
geneset_score = combined$score
metrics = classification_evaluation_continuous_pred(activity_prediction,
geneset_score, iregulon = F)
}, combined)
ligands = names(output)
output_df = output %>% bind_rows() %>% mutate(ligand = ligands)
return(output_df)
}
single_ligand_activity_score_regression = function(ligand_activities, scores_tbl){
combined = inner_join(scores_tbl,ligand_activities)
output = lapply(combined %>% select(-cell, -score), function(activity_prediction, combined){
geneset_score = combined$score
metrics = regression_evaluation(activity_prediction,geneset_score)
}, combined)
ligands = names(output)
output_df = output %>% bind_rows() %>% mutate(ligand = ligands)
return(output_df)
}
#' @title Perform NicheNet analysis on Seurat object: explain DE between conditions
#'
#' @description \code{nichenet_seuratobj_aggregate} Perform NicheNet analysis on Seurat object: explain differential expression (DE) in a receiver celltype between two different conditions by ligands expressed by sender cells
#' @usage
#' nichenet_seuratobj_aggregate(receiver, seurat_obj, condition_colname, condition_oi, condition_reference, sender = "all",ligand_target_matrix,lr_network,weighted_networks,expression_pct = 0.10, lfc_cutoff = 0.25, geneset = "DE", filter_top_ligands = TRUE, top_n_ligands = 30,top_n_targets = 200, cutoff_visualization = 0.33,verbose = TRUE, assay_oi = NULL)
#'
#' @param receiver Name of cluster identity/identities of cells that are presumably affected by intercellular communication with other cells
#' @param seurat_obj Single-cell expression dataset as Seurat object https://satijalab.org/seurat/.
#' @param condition_colname Name of the column in the meta data dataframe that indicates which condition/sample cells were coming from.
#' @param condition_oi Condition of interest in which receiver cells were presumably affected by other cells. Should be a name present in the `condition_colname` column of the metadata.
#' @param condition_reference The second condition (e.g. reference or steady-state condition). Should be a name present in the `condition_colname` column of the metadata.
#' @param sender Determine the potential sender cells. Name of cluster identity/identities of cells that presumably affect expression in the receiver cell type. In case you want to look at all possible sender cell types in the data, you can give this argument the value "all". "all" indicates thus that all cell types in the dataset will be considered as possible sender cells. As final option, you could give this argument the value "undefined"."undefined" won't look at ligands expressed by sender cells, but at all ligands for which a corresponding receptor is expressed. This could be useful if the presumably active sender cell is not profiled. Default: "all".
#' @param expression_pct To determine ligands and receptors expressed by sender and receiver cells, we consider genes expressed if they are expressed in at least a specific fraction of cells of a cluster. This number indicates this fraction. Default: 0.10
#' @param lfc_cutoff Cutoff on log fold change in the wilcoxon differential expression test. Default: 0.25.
#' @param geneset Indicate whether to consider all DE genes between condition 1 and 2 ("DE"), or only genes upregulated in condition 1 ("up"), or only genes downregulad in condition 1 ("down").
#' @param filter_top_ligands Indicate whether output tables for ligand-target and ligand-receptor networks should be done for a filtered set of top ligands (TRUE) or for all ligands (FALSE). Default: TRUE.
#' @param top_n_ligands Indicate how many ligands should be extracted as top-ligands after ligand activity analysis. Only for these ligands, target genes and receptors will be returned. Default: 30.
#' @param top_n_targets To predict active, affected targets of the prioritized ligands, consider only DE genes if they also belong to the a priori top n ("top_n_targets") targets of a ligand. Default = 200.
#' @param cutoff_visualization Because almost no ligand-target scores have a regulatory potential score of 0, we clarify the heatmap visualization by giving the links with the lowest scores a score of 0. The cutoff_visualization paramter indicates this fraction of links that are given a score of zero. Default = 0.33.
#' @param ligand_target_matrix The NicheNet ligand-target matrix of the organism of interest denoting regulatory potential scores between ligands and targets (ligands in columns).
#' @param lr_network The ligand-receptor network (columns that should be present: $from, $to) of the organism of interest.
#' @param weighted_networks The NicheNet weighted networks of the organism of interest denoting interactions and their weights/confidences in the ligand-signaling and gene regulatory network.
#' @param verbose Print out the current analysis stage. Default: TRUE.
#' @inheritParams get_expressed_genes
#'
#' @return A list with the following elements:
#' $ligand_activities: data frame with output ligand activity analysis;
#' $top_ligands: top_n ligands based on ligand activity;
#' $top_targets: active, affected target genes of these ligands;
#' $top_receptors: receptors of these ligands;
#' $ligand_target_matrix: matrix indicating regulatory potential scores between active ligands and their predicted targets;
#' $ligand_target_heatmap: heatmap of ligand-target regulatory potential;
#' $ligand_target_df: data frame showing regulatory potential scores of predicted active ligand-target network;
#' $ligand_activity_target_heatmap: heatmap showing both ligand activity scores and target genes of these top ligands;
#' $ligand_expression_dotplot: expression dotplot of the top ligands;
#' $ligand_differential_expression_heatmap = differential expression heatmap of the top ligands;
#' $ligand_receptor_matrix: matrix of ligand-receptor interactions;
#' $ligand_receptor_heatmap: heatmap showing ligand-receptor interactions;
#' $ligand_receptor_df: data frame of ligand-receptor interactions;
#' $geneset_oi: a vector containing the set of genes used as input for the ligand activity analysis;
#' $background_expressed_genes: the background of genes to which the geneset will be compared in the ligand activity analysis.
#'
#' @import Seurat
#' @import dplyr
#' @importFrom magrittr set_rownames set_colnames
#'
#' @examples
#' \dontrun{
#' seuratObj = readRDS(url("https://zenodo.org/record/3531889/files/seuratObj_test.rds"))
#' lr_network = readRDS(url("https://zenodo.org/record/7074291/files/lr_network_mouse_21122021.rds"))
#' ligand_target_matrix = readRDS(url("https://zenodo.org/record/7074291/files/ligand_target_matrix_nsga2r_final_mouse.rds"))
#' weighted_networks = readRDS(url("https://zenodo.org/record/7074291/files/weighted_networks_nsga2r_final_mouse.rds"))
#' nichenet_seuratobj_aggregate(receiver = "CD8 T", seurat_obj = seuratObj, condition_colname = "aggregate", condition_oi = "LCMV", condition_reference = "SS", sender = "Mono", ligand_target_matrix = ligand_target_matrix, lr_network = lr_network, weighted_networks = weighted_networks)
#' }
#'
#' @export
#'
nichenet_seuratobj_aggregate = function(receiver, seurat_obj, condition_colname, condition_oi, condition_reference, sender = "all",ligand_target_matrix,lr_network,weighted_networks,
expression_pct = 0.10, lfc_cutoff = 0.25, geneset = "DE", filter_top_ligands = TRUE ,top_n_ligands = 30,
top_n_targets = 200, cutoff_visualization = 0.33,
verbose = TRUE, assay_oi = NULL)
{
requireNamespace("Seurat")
requireNamespace("dplyr")
# input check
if(! "RNA" %in% names(seurat_obj@assays)){
if ("Spatial" %in% names(seurat_obj@assays)){
warning("You are going to apply NicheNet on a spatial seurat object. Be sure it's ok to use NicheNet the way you are planning to do it. So this means: you should have changes in gene expression in receiver cells caused by cell-cell interactions. Note that in the case of spatial transcriptomics, you are not dealing with single cells but with 'spots' containing multiple cells of the same of different cell types.")
if (class(seurat_obj@assays$Spatial@data) != "matrix" & class(seurat_obj@assays$Spatial@data) != "dgCMatrix") {
warning("Spatial Seurat object should contain a matrix of normalized expression data. Check 'seurat_obj@assays$Spatial@data' for default or 'seurat_obj@assays$SCT@data' for when the single-cell transform pipeline was applied")
}
if (sum(dim(seurat_obj@assays$Spatial@data)) == 0) {
stop("Seurat object should contain normalized expression data (numeric matrix). Check 'seurat_obj@assays$Spatial@data'")
}
}} else {
if (class(seurat_obj@assays$RNA@data) != "matrix" &
class(seurat_obj@assays$RNA@data) != "dgCMatrix") {
warning("Seurat object should contain a matrix of normalized expression data. Check 'seurat_obj@assays$RNA@data' for default or 'seurat_obj@assays$integrated@data' for integrated data or seurat_obj@assays$SCT@data for when the single-cell transform pipeline was applied")
}
if ("integrated" %in% names(seurat_obj@assays)) {
if (sum(dim(seurat_obj@assays$RNA@data)) == 0 & sum(dim(seurat_obj@assays$integrated@data)) ==
0)
stop("Seurat object should contain normalized expression data (numeric matrix). Check 'seurat_obj@assays$RNA@data' for default or 'seurat_obj@assays$integrated@data' for integrated data")
}
else if ("SCT" %in% names(seurat_obj@assays)) {
if (sum(dim(seurat_obj@assays$RNA@data)) == 0 & sum(dim(seurat_obj@assays$SCT@data)) ==
0) {
stop("Seurat object should contain normalized expression data (numeric matrix). Check 'seurat_obj@assays$RNA@data' for default or 'seurat_obj@assays$SCT@data' for data corrected via SCT")
}
}
else {
if (sum(dim(seurat_obj@assays$RNA@data)) == 0) {
stop("Seurat object should contain normalized expression data (numeric matrix). Check 'seurat_obj@assays$RNA@data'")
}
}
}
if(!condition_colname %in% colnames(seurat_obj@meta.data))
stop("Your column indicating the conditions/samples of interest should be in the metadata dataframe")
if(sum(condition_oi %in% c(seurat_obj[[condition_colname]] %>% unlist() %>% as.character() %>% unique())) != length(condition_oi))
stop("condition_oi should be in the condition-indicating column")
if(sum(condition_reference %in% c(seurat_obj[[condition_colname]] %>% unlist() %>% as.character() %>% unique())) != length(condition_reference))
stop("condition_reference should be in the condition-indicating column")
if(sum(receiver %in% unique(Idents(seurat_obj))) != length(receiver))
stop("The defined receiver cell type should be an identity class of your seurat object")
if(length(sender) == 1){
if(sender != "all" & sender != "undefined"){
if(sum(sender %in% unique(Idents(seurat_obj))) != length(sender)){
stop("The sender argument should be 'all' or 'undefined' or an identity class of your seurat object")
}
}
} else {
if(sum(sender %in% unique(Idents(seurat_obj))) != length(sender)){
stop("The sender argument should be 'all' or 'undefined' or an identity class of your seurat object")
}
}
if(geneset != "DE" & geneset != "up" & geneset != "down")
stop("geneset should be 'DE', 'up' or 'down'")
if("integrated" %in% names(seurat_obj@assays)){
warning("Seurat object is result from the Seurat integration workflow. Make sure that the way of defining expressed and differentially expressed genes in this wrapper is appropriate for your integrated data.")
}
# Read in and process NicheNet networks, define ligands and receptors
if (verbose == TRUE){print("Read in and process NicheNet's networks")}
weighted_networks_lr = weighted_networks$lr_sig %>% inner_join(lr_network %>% distinct(from,to), by = c("from","to"))
ligands = lr_network %>% pull(from) %>% unique()
receptors = lr_network %>% pull(to) %>% unique()
if (verbose == TRUE){print("Define expressed ligands and receptors in receiver and sender cells")}
# step1 nichenet analysis: get expressed genes in sender and receiver cells
## receiver
list_expressed_genes_receiver = receiver %>% unique() %>% lapply(get_expressed_genes, seurat_obj, expression_pct, assay_oi)
names(list_expressed_genes_receiver) = receiver %>% unique()
expressed_genes_receiver = list_expressed_genes_receiver %>% unlist() %>% unique()
## sender
if (length(sender) == 1){
if (sender == "all"){
sender_celltypes = Idents(seurat_obj) %>% levels()
list_expressed_genes_sender = sender_celltypes %>% lapply(get_expressed_genes, seurat_obj, expression_pct, assay_oi)
names(list_expressed_genes_sender) = sender_celltypes
expressed_genes_sender = list_expressed_genes_sender %>% unlist() %>% unique()
} else if (sender == "undefined") {
if("integrated" %in% names(seurat_obj@assays)){
expressed_genes_sender = union(seurat_obj@assays$integrated@data %>% rownames(),rownames(ligand_target_matrix)) %>% union(colnames(ligand_target_matrix))
} else {
expressed_genes_sender = union(seurat_obj@assays$RNA@data %>% rownames(),rownames(ligand_target_matrix)) %>% union(colnames(ligand_target_matrix))
}
} else if (sender != "all" & sender != "undefined") {
sender_celltypes = sender
list_expressed_genes_sender = sender_celltypes %>% unique() %>% lapply(get_expressed_genes, seurat_obj, expression_pct, assay_oi)
names(list_expressed_genes_sender) = sender_celltypes %>% unique()
expressed_genes_sender = list_expressed_genes_sender %>% unlist() %>% unique()
}
} else {
sender_celltypes = sender
list_expressed_genes_sender = sender_celltypes %>% unique() %>% lapply(get_expressed_genes, seurat_obj, expression_pct, assay_oi)
names(list_expressed_genes_sender) = sender_celltypes %>% unique()
expressed_genes_sender = list_expressed_genes_sender %>% unlist() %>% unique()
}
# step2 nichenet analysis: define background and gene list of interest: here differential expression between two conditions of cell type of interest
if (verbose == TRUE){print("Perform DE analysis in receiver cell")}
seurat_obj_receiver= subset(seurat_obj, idents = receiver)
seurat_obj_receiver = SetIdent(seurat_obj_receiver, value = seurat_obj_receiver[[condition_colname]])
DE_table_receiver = FindMarkers(object = seurat_obj_receiver, ident.1 = condition_oi, ident.2 = condition_reference, min.pct = expression_pct) %>% rownames_to_column("gene")
SeuratV4 = c("avg_log2FC") %in% colnames(DE_table_receiver)
if(SeuratV4 == TRUE){
if (geneset == "DE"){
geneset_oi = DE_table_receiver %>% filter(p_val_adj <= 0.05 & abs(avg_log2FC) >= lfc_cutoff) %>% pull(gene)
} else if (geneset == "up") {
geneset_oi = DE_table_receiver %>% filter(p_val_adj <= 0.05 & avg_log2FC >= lfc_cutoff) %>% pull(gene)
} else if (geneset == "down") {
geneset_oi = DE_table_receiver %>% filter(p_val_adj <= 0.05 & avg_log2FC <= lfc_cutoff) %>% pull(gene)
}
} else {
if (geneset == "DE"){
geneset_oi = DE_table_receiver %>% filter(p_val_adj <= 0.05 & abs(avg_logFC) >= lfc_cutoff) %>% pull(gene)
} else if (geneset == "up") {
geneset_oi = DE_table_receiver %>% filter(p_val_adj <= 0.05 & avg_logFC >= lfc_cutoff) %>% pull(gene)
} else if (geneset == "down") {
geneset_oi = DE_table_receiver %>% filter(p_val_adj <= 0.05 & avg_logFC <= lfc_cutoff) %>% pull(gene)
}
}
geneset_oi = geneset_oi %>% .[. %in% rownames(ligand_target_matrix)]
if (length(geneset_oi) == 0){
stop("No genes were differentially expressed")
}
background_expressed_genes = expressed_genes_receiver %>% .[. %in% rownames(ligand_target_matrix)]
# step3 nichenet analysis: define potential ligands
expressed_ligands = intersect(ligands,expressed_genes_sender)
expressed_receptors = intersect(receptors,expressed_genes_receiver)
if (length(expressed_ligands) == 0){
stop("No ligands expressed in sender cell")
}
if (length(expressed_receptors) == 0){
stop("No receptors expressed in receiver cell")
}
potential_ligands = lr_network %>% filter(from %in% expressed_ligands & to %in% expressed_receptors) %>% pull(from) %>% unique()
if (length(potential_ligands) == 0){
stop("No potentially active ligands")
}
if (verbose == TRUE){print("Perform NicheNet ligand activity analysis")}
# step4 perform NicheNet's ligand activity analysis
ligand_activities = predict_ligand_activities(geneset = geneset_oi, background_expressed_genes = background_expressed_genes, ligand_target_matrix = ligand_target_matrix, potential_ligands = potential_ligands)
ligand_activities = ligand_activities %>%
arrange(-aupr_corrected) %>%
mutate(rank = rank(desc(aupr_corrected)))
if(filter_top_ligands == TRUE){
best_upstream_ligands = ligand_activities %>% top_n(top_n_ligands, aupr_corrected) %>% arrange(-aupr_corrected) %>% pull(test_ligand) %>% unique()
} else {
best_upstream_ligands = ligand_activities %>% arrange(-aupr_corrected) %>% pull(test_ligand) %>% unique()
}
if (verbose == TRUE){print("Infer active target genes of the prioritized ligands")}
# step5 infer target genes of the top-ranked ligands
active_ligand_target_links_df = best_upstream_ligands %>% lapply(get_weighted_ligand_target_links,geneset = geneset_oi, ligand_target_matrix = ligand_target_matrix, n = top_n_targets) %>% bind_rows() %>% drop_na()
if(nrow(active_ligand_target_links_df) > 0){
active_ligand_target_links = prepare_ligand_target_visualization(ligand_target_df = active_ligand_target_links_df, ligand_target_matrix = ligand_target_matrix, cutoff = cutoff_visualization)
order_ligands = intersect(best_upstream_ligands, colnames(active_ligand_target_links)) %>% rev() %>% make.names()
order_targets = active_ligand_target_links_df$target %>% unique() %>% intersect(rownames(active_ligand_target_links)) %>% make.names()
rownames(active_ligand_target_links) = rownames(active_ligand_target_links) %>% make.names()
colnames(active_ligand_target_links) = colnames(active_ligand_target_links) %>% make.names()
order_targets = order_targets %>% intersect(rownames(active_ligand_target_links))
order_ligands = order_ligands %>% intersect(colnames(active_ligand_target_links))
vis_ligand_target = active_ligand_target_links[order_targets,order_ligands,drop=FALSE] %>% t()
p_ligand_target_network = vis_ligand_target %>% make_heatmap_ggplot("Prioritized ligands","Predicted target genes", color = "purple",legend_position = "top", x_axis_position = "top",legend_title = "Regulatory potential") + theme(axis.text.x = element_text(face = "italic")) #+ scale_fill_gradient2(low = "whitesmoke", high = "purple", breaks = c(0,0.006,0.012))
} else {
vis_ligand_target = NULL
p_ligand_target_network = NULL
print("no highly likely active targets found for top ligands")
}
# combined heatmap: overlay ligand activities
ligand_aupr_matrix = ligand_activities %>% select(aupr_corrected) %>% as.matrix() %>% magrittr::set_rownames(ligand_activities$test_ligand)
rownames(ligand_aupr_matrix) = rownames(ligand_aupr_matrix) %>% make.names()
colnames(ligand_aupr_matrix) = colnames(ligand_aupr_matrix) %>% make.names()
vis_ligand_aupr = ligand_aupr_matrix[order_ligands, ] %>% as.matrix(ncol = 1) %>% magrittr::set_colnames("AUPR")
p_ligand_aupr = vis_ligand_aupr %>% make_heatmap_ggplot("Prioritized ligands","Ligand activity", color = "darkorange",legend_position = "top", x_axis_position = "top", legend_title = "AUPR\n(target gene prediction ability)") + theme(legend.text = element_text(size = 9))
p_ligand_aupr
figures_without_legend = cowplot::plot_grid(
p_ligand_aupr + theme(legend.position = "none", axis.ticks = element_blank()) + theme(axis.title.x = element_text()),
p_ligand_target_network + theme(legend.position = "none", axis.ticks = element_blank()) + ylab(""),
align = "hv",
nrow = 1,
rel_widths = c(ncol(vis_ligand_aupr)+10, ncol(vis_ligand_target)))
legends = cowplot::plot_grid(
ggpubr::as_ggplot(ggpubr::get_legend(p_ligand_aupr)),
ggpubr::as_ggplot(ggpubr::get_legend(p_ligand_target_network)),
nrow = 1,
align = "h")
combined_plot = cowplot::plot_grid(figures_without_legend,
legends,
rel_heights = c(10,2), nrow = 2, align = "hv")
# ligand-receptor plot
# get the ligand-receptor network of the top-ranked ligands
if (verbose == TRUE){print("Infer receptors of the prioritized ligands")}
lr_network_top = lr_network %>% filter(from %in% best_upstream_ligands & to %in% expressed_receptors) %>% distinct(from,to)
best_upstream_receptors = lr_network_top %>% pull(to) %>% unique()
lr_network_top_df_large = weighted_networks_lr %>% filter(from %in% best_upstream_ligands & to %in% best_upstream_receptors)
lr_network_top_df = lr_network_top_df_large %>% spread("from","weight",fill = 0)
lr_network_top_matrix = lr_network_top_df %>% select(-to) %>% as.matrix() %>% magrittr::set_rownames(lr_network_top_df$to)
if (nrow(lr_network_top_matrix) > 1){
dist_receptors = dist(lr_network_top_matrix, method = "binary")
hclust_receptors = hclust(dist_receptors, method = "ward.D2")
order_receptors = hclust_receptors$labels[hclust_receptors$order]
} else {
order_receptors = rownames(lr_network_top_matrix)
}
if (ncol(lr_network_top_matrix) > 1) {
dist_ligands = dist(lr_network_top_matrix %>% t(), method = "binary")
hclust_ligands = hclust(dist_ligands, method = "ward.D2")
order_ligands_receptor = hclust_ligands$labels[hclust_ligands$order]
} else {
order_ligands_receptor = colnames(lr_network_top_matrix)
}
order_receptors = order_receptors %>% intersect(rownames(lr_network_top_matrix))
order_ligands_receptor = order_ligands_receptor %>% intersect(colnames(lr_network_top_matrix))
vis_ligand_receptor_network = lr_network_top_matrix[order_receptors, order_ligands_receptor]
dim(vis_ligand_receptor_network) = c(length(order_receptors), length(order_ligands_receptor))
rownames(vis_ligand_receptor_network) = order_receptors %>% make.names()
colnames(vis_ligand_receptor_network) = order_ligands_receptor %>% make.names()
p_ligand_receptor_network = vis_ligand_receptor_network %>% t() %>% make_heatmap_ggplot("Ligands","Receptors", color = "mediumvioletred", x_axis_position = "top",legend_title = "Prior interaction potential")
# DE analysis for each sender cell type -- of course only possible when having sender cell types
if (length(sender) > 1){
are_there_senders = TRUE
}
if(length(sender) == 1){
if(sender != "undefined"){
are_there_senders = TRUE
} else {
are_there_senders = FALSE
}
}
if (are_there_senders == TRUE){
if (verbose == TRUE){print("Perform DE analysis in sender cells")}
seurat_obj = subset(seurat_obj, features= potential_ligands)
DE_table_all = Idents(seurat_obj) %>% levels() %>% intersect(sender_celltypes) %>% lapply(get_lfc_celltype, seurat_obj = seurat_obj, condition_colname = condition_colname, condition_oi = condition_oi, condition_reference = condition_reference, expression_pct = expression_pct, celltype_col = NULL) %>% reduce(full_join, by = "gene") # use this if cell type labels are the identities of your Seurat object -- if not: indicate the celltype_col properly
DE_table_all[is.na(DE_table_all)] = 0
# Combine ligand activities with DE information
ligand_activities_de = ligand_activities %>% select(test_ligand, pearson) %>% rename(ligand = test_ligand) %>% left_join(DE_table_all %>% rename(ligand = gene), by = "ligand")
ligand_activities_de[is.na(ligand_activities_de)] = 0
# make LFC heatmap
lfc_matrix = ligand_activities_de %>% select(-ligand, -pearson) %>% as.matrix() %>% magrittr::set_rownames(ligand_activities_de$ligand)
rownames(lfc_matrix) = rownames(lfc_matrix) %>% make.names()
order_ligands = order_ligands[order_ligands %in% rownames(lfc_matrix)]
vis_ligand_lfc = lfc_matrix[order_ligands,]
vis_ligand_lfc = vis_ligand_lfc %>% as.matrix(ncol = length(Idents(seurat_obj) %>% levels() %>% intersect(sender_celltypes)))
colnames(vis_ligand_lfc) = vis_ligand_lfc %>% colnames() %>% make.names()
p_ligand_lfc = vis_ligand_lfc %>% make_threecolor_heatmap_ggplot("Prioritized ligands","LFC in Sender", low_color = "midnightblue",mid_color = "white", mid = median(vis_ligand_lfc), high_color = "red",legend_position = "top", x_axis_position = "top", legend_title = "LFC") + theme(axis.text.y = element_text(face = "italic"))
# ligand expression Seurat dotplot
real_makenames_conversion = lr_network$from %>% unique() %>% magrittr::set_names(lr_network$from %>% unique() %>% make.names())
order_ligands_adapted = real_makenames_conversion[order_ligands]
names(order_ligands_adapted) = NULL
seurat_obj_subset = seurat_obj %>% subset(idents = sender_celltypes)
seurat_obj_subset = SetIdent(seurat_obj_subset, value = seurat_obj_subset[[condition_colname]]) %>% subset(idents = condition_oi) ## only shows cells of the condition of interest
rotated_dotplot = DotPlot(seurat_obj %>% subset(cells = Cells(seurat_obj_subset)), features = order_ligands_adapted, cols = "RdYlBu") + coord_flip() + theme(legend.text = element_text(size = 10), legend.title = element_text(size = 12)) # flip of coordinates necessary because we want to show ligands in the rows when combining all plots
rm(seurat_obj_subset)
# combined plot
figures_without_legend = cowplot::plot_grid(
p_ligand_aupr + theme(legend.position = "none", axis.ticks = element_blank()) + theme(axis.title.x = element_text()),
rotated_dotplot + theme(legend.position = "none", axis.ticks = element_blank(), axis.title.x = element_text(size = 12), axis.text.y = element_text(face = "italic", size = 9), axis.text.x = element_text(size = 9, angle = 90,hjust = 0)) + ylab("Expression in Sender") + xlab("") + scale_y_discrete(position = "right"),
p_ligand_lfc + theme(legend.position = "none", axis.ticks = element_blank()) + theme(axis.title.x = element_text()) + ylab(""),
p_ligand_target_network + theme(legend.position = "none", axis.ticks = element_blank()) + ylab(""),
align = "hv",
nrow = 1,
rel_widths = c(ncol(vis_ligand_aupr)+6, ncol(vis_ligand_lfc) + 7, ncol(vis_ligand_lfc) + 8, ncol(vis_ligand_target)))
legends = cowplot::plot_grid(
ggpubr::as_ggplot(ggpubr::get_legend(p_ligand_aupr)),
ggpubr::as_ggplot(ggpubr::get_legend(rotated_dotplot)),
ggpubr::as_ggplot(ggpubr::get_legend(p_ligand_lfc)),
ggpubr::as_ggplot(ggpubr::get_legend(p_ligand_target_network)),
nrow = 1,
align = "h", rel_widths = c(1.5, 1, 1, 1))
combined_plot = cowplot::plot_grid(figures_without_legend, legends, rel_heights = c(10,5), nrow = 2, align = "hv")
combined_plot
} else {
rotated_dotplot = NULL
p_ligand_lfc = NULL
}
return(list(
ligand_activities = ligand_activities,
top_ligands = best_upstream_ligands,
top_targets = active_ligand_target_links_df$target %>% unique(),
top_receptors = lr_network_top_df_large$to %>% unique(),
ligand_target_matrix = vis_ligand_target,
ligand_target_heatmap = p_ligand_target_network,
ligand_target_df = active_ligand_target_links_df,
ligand_expression_dotplot = rotated_dotplot,
ligand_differential_expression_heatmap = p_ligand_lfc,
ligand_activity_target_heatmap = combined_plot,
ligand_receptor_matrix = vis_ligand_receptor_network,
ligand_receptor_heatmap = p_ligand_receptor_network,
ligand_receptor_df = lr_network_top_df_large %>% rename(ligand = from, receptor = to),
geneset_oi = geneset_oi,
background_expressed_genes = background_expressed_genes
))
}
#' @title Determine expressed genes of a cell type from a Seurat object single-cell RNA seq dataset or Seurat spatial transcriptomics dataset
#'
#' @description \code{get_expressed_genes} Return the genes that are expressed in a given cell cluster based on the fraction of cells in that cluster that should express the cell.
#' @usage
#' get_expressed_genes(ident, seurat_obj, pct = 0.10, assay_oi = NULL)
#'
#' @param ident Name of cluster identity/identities of cells
#' @param seurat_obj Single-cell expression dataset as Seurat object https://satijalab.org/seurat/.
#' @param pct We consider genes expressed if they are expressed in at least a specific fraction of cells of a cluster. This number indicates this fraction. Default: 0.10. Choice of this parameter is important and depends largely on the used sequencing platform. We recommend to require a lower fraction (like the default 0.10) for 10X data than for e.g. Smart-seq2 data.
#' @param assay_oi If wanted: specify yourself which assay to look for. Default this value is NULL and as a consequence the 'most advanced' assay will be used to define expressed genes.
#'
#' @return A character vector with the gene symbols of the expressed genes
#'
#' @import Seurat
#' @import dplyr
#'
#' @examples
#' \dontrun{
#' get_expressed_genes(ident = "CD8 T", seurat_obj = seuratObj, pct = 0.10)
#' }
#'
#' @export
#'
get_expressed_genes = function(ident, seurat_obj, pct = 0.1, assay_oi = NULL){
requireNamespace("Seurat")
requireNamespace("dplyr")
# input check
if (!"RNA" %in% names(seurat_obj@assays)) {
if ("Spatial" %in% names(seurat_obj@assays)) {
if (class(seurat_obj@assays$Spatial@data) != "matrix" &
class(seurat_obj@assays$Spatial@data) != "dgCMatrix") {
warning("Spatial Seurat object should contain a matrix of normalized expression data. Check 'seurat_obj@assays$Spatial@data' for default or 'seurat_obj@assays$SCT@data' for when the single-cell transform pipeline was applied")
}
if (sum(dim(seurat_obj@assays$Spatial@data)) == 0) {
stop("Seurat object should contain normalized expression data (numeric matrix). Check 'seurat_obj@assays$Spatial@data'")
}
}
}
else {
if (class(seurat_obj@assays$RNA@data) != "matrix" &
class(seurat_obj@assays$RNA@data) != "dgCMatrix") {
warning("Seurat object should contain a matrix of normalized expression data. Check 'seurat_obj@assays$RNA@data' for default or 'seurat_obj@assays$integrated@data' for integrated data or seurat_obj@assays$SCT@data for when the single-cell transform pipeline was applied")
}
if ("integrated" %in% names(seurat_obj@assays)) {
if (sum(dim(seurat_obj@assays$RNA@data)) == 0 & sum(dim(seurat_obj@assays$integrated@data)) ==
0)
stop("Seurat object should contain normalized expression data (numeric matrix). Check 'seurat_obj@assays$RNA@data' for default or 'seurat_obj@assays$integrated@data' for integrated data")
}
else if ("SCT" %in% names(seurat_obj@assays)) {
if (sum(dim(seurat_obj@assays$RNA@data)) == 0 & sum(dim(seurat_obj@assays$SCT@data)) ==
0) {
stop("Seurat object should contain normalized expression data (numeric matrix). Check 'seurat_obj@assays$RNA@data' for default or 'seurat_obj@assays$SCT@data' for data corrected via SCT")
}
}
else {
if (sum(dim(seurat_obj@assays$RNA@data)) == 0) {
stop("Seurat object should contain normalized expression data (numeric matrix). Check 'seurat_obj@assays$RNA@data'")
}
}
}
if (sum(ident %in% unique(Idents(seurat_obj))) != length(ident)) {
stop("One or more provided cell clusters is not part of the 'Idents' of your Seurat object")
}
if(!is.null(assay_oi)){
if(! assay_oi %in% Seurat::Assays(seurat_obj)){
stop("assay_oi should be an assay of your Seurat object")
}
}
# Get cell identities of cluster of interest
cells_oi = Idents(seurat_obj) %>% .[Idents(seurat_obj) %in%
ident] %>% names()
# Get exprs matrix: from assay oi or from most advanced assay if assay oi not specifcied
if(!is.null(assay_oi)){
cells_oi_in_matrix = intersect(colnames(seurat_obj[[assay_oi]]@data), cells_oi)
exprs_mat = seurat_obj[[assay_oi]]@data %>% .[, cells_oi_in_matrix]
} else {
if ("integrated" %in% names(seurat_obj@assays)) {
warning("Seurat object is result from the Seurat integration workflow. The expressed genes are now defined based on the integrated slot. You can change this via the assay_oi parameter of the get_expressed_genes() functions. Recommended assays: RNA or SCT")
cells_oi_in_matrix = intersect(colnames(seurat_obj@assays$integrated@data),
cells_oi)
if (length(cells_oi_in_matrix) != length(cells_oi))
stop("Not all cells of interest are in your expression matrix (seurat_obj@assays$integrated@data). Please check that the expression matrix contains cells in columns and genes in rows.")
exprs_mat = seurat_obj@assays$integrated@data %>% .[,
cells_oi_in_matrix]
}
else if ("SCT" %in% names(seurat_obj@assays) & !"Spatial" %in%
names(seurat_obj@assays)) {
warning("Seurat object is result from the Seurat single-cell transform workflow. The expressed genes are defined based on the SCT slot. You can change this via the assay_oi parameter of the get_expressed_genes() functions. Recommended assays: RNA or SCT")
cells_oi_in_matrix = intersect(colnames(seurat_obj@assays$SCT@data),
cells_oi)
if (length(cells_oi_in_matrix) != length(cells_oi))
stop("Not all cells of interest are in your expression matrix (seurat_obj@assays$SCT@data). Please check that the expression matrix contains cells in columns and genes in rows.")
exprs_mat = seurat_obj@assays$SCT@data %>% .[, cells_oi_in_matrix]
}
else if ("Spatial" %in% names(seurat_obj@assays) &
!"SCT" %in% names(seurat_obj@assays)) {
warning("Seurat object is result from the Seurat spatial object. The expressed genes are defined based on the Spatial slot. If the spatial data is spot-based (mixture of cells) and not single-cell resolution, we recommend against directly using nichenetr on spot-based data (because you want to look at cell-cell interactions, and not at spot-spot interactions! ;-) )")
cells_oi_in_matrix = intersect(colnames(seurat_obj@assays$Spatial@data),
cells_oi)
if (length(cells_oi_in_matrix) != length(cells_oi))
stop("Not all cells of interest are in your expression matrix (seurat_obj@assays$Spatial@data). Please check that the expression matrix contains cells in columns and genes in rows.")
exprs_mat = seurat_obj@assays$Spatial@data %>% .[, cells_oi_in_matrix]
}
else if ("Spatial" %in% names(seurat_obj@assays) &
"SCT" %in% names(seurat_obj@assays)) {
warning("Seurat object is result from the Seurat spatial object, followed by the SCT workflow. If the spatial data is spot-based (mixture of cells) and not single-cell resolution, we recommend against directly using nichenetr on spot-based data (because you want to look at cell-cell interactions, and not at spot-spot interactions! The expressed genes are defined based on the SCT slot, but this can be changed via the assay_oi parameter.")
cells_oi_in_matrix = intersect(colnames(seurat_obj@assays$SCT@data),
cells_oi)
if (length(cells_oi_in_matrix) != length(cells_oi))
stop("Not all cells of interest are in your expression matrix (seurat_obj@assays$Spatial@data). Please check that the expression matrix contains cells in columns and genes in rows.")
exprs_mat = seurat_obj@assays$SCT@data %>% .[, cells_oi_in_matrix]
}
else {
if (sum(cells_oi %in% colnames(seurat_obj@assays$RNA@data)) ==
0)
stop("None of the cells are in colnames of 'seurat_obj@assays$RNA@data'. The expression matrix should contain cells in columns and genes in rows.")
cells_oi_in_matrix = intersect(colnames(seurat_obj@assays$RNA@data),
cells_oi)
if (length(cells_oi_in_matrix) != length(cells_oi))
stop("Not all cells of interest are in your expression matrix (seurat_obj@assays$RNA@data). Please check that the expression matrix contains cells in columns and genes in rows.")
exprs_mat = seurat_obj@assays$RNA@data %>% .[, cells_oi_in_matrix]
}
}
# use defined cells and exprs matrix to get expressed genes
n_cells_oi_in_matrix = length(cells_oi_in_matrix)
if (n_cells_oi_in_matrix < 5000) {
genes = exprs_mat %>% apply(1, function(x) {
sum(x > 0)/n_cells_oi_in_matrix
}) %>% .[. >= pct] %>% names()
}
else {
splits = split(1:nrow(exprs_mat), ceiling(seq_along(1:nrow(exprs_mat))/100))
genes = splits %>% lapply(function(genes_indices, exprs,
pct, n_cells_oi_in_matrix) {
begin_i = genes_indices[1]
end_i = genes_indices[length(genes_indices)]
exprs = exprs[begin_i:end_i, , drop = FALSE]
genes = exprs %>% apply(1, function(x) {
sum(x > 0)/n_cells_oi_in_matrix
}) %>% .[. >= pct] %>% names()
}, exprs_mat, pct, n_cells_oi_in_matrix) %>% unlist() %>%
unname()
}
return(genes)
}
#' @title Perform NicheNet analysis on Seurat object: explain DE between two cell clusters
#'
#' @description \code{nichenet_seuratobj_cluster_de} Perform NicheNet analysis on Seurat object: explain differential expression (DE) between two 'receiver' cell clusters by ligands expressed by neighboring cells.
#' @usage
#' nichenet_seuratobj_cluster_de(seurat_obj, receiver_affected, receiver_reference, sender = "all",ligand_target_matrix,lr_network,weighted_networks,expression_pct = 0.10, lfc_cutoff = 0.25, geneset = "DE", filter_top_ligands = TRUE, top_n_ligands = 30,top_n_targets = 200, cutoff_visualization = 0.33,verbose = TRUE, assay_oi = NULL)
#'
#' @param seurat_obj Single-cell expression dataset as Seurat object https://satijalab.org/seurat/.
#' @param receiver_reference Name of cluster identity/identities of "steady-state" cells, before they are affected by intercellular communication with other cells
#' @param receiver_affected Name of cluster identity/identities of "affected" cells that were presumably affected by intercellular communication with other cells
#' @param sender Determine the potential sender cells. Name of cluster identity/identities of cells that presumably affect expression in the receiver cell type. In case you want to look at all possible sender cell types in the data, you can give this argument the value "all". "all" indicates thus that all cell types in the dataset will be considered as possible sender cells. As final option, you could give this argument the value "undefined"."undefined" won't look at ligands expressed by sender cells, but at all ligands for which a corresponding receptor is expressed. This could be useful if the presumably active sender cell is not profiled. Default: "all".
#' @param expression_pct To determine ligands and receptors expressed by sender and receiver cells, we consider genes expressed if they are expressed in at least a specific fraction of cells of a cluster. This number indicates this fraction. Default: 0.10
#' @param lfc_cutoff Cutoff on log fold change in the wilcoxon differential expression test. Default: 0.25.
#' @param geneset Indicate whether to consider all DE genes between condition 1 and 2 ("DE"), or only genes upregulated in condition 1 ("up"), or only genes downregulad in condition 1 ("down").
#' @param filter_top_ligands Indicate whether output tables for ligand-target and ligand-receptor networks should be done for a filtered set of top ligands (TRUE) or for all ligands (FALSE). Default: TRUE.
#' @param top_n_ligands Indicate how many ligands should be extracted as top-ligands after ligand activity analysis. Only for these ligands, target genes and receptors will be returned. Default: 30.
#' @param top_n_targets To predict active, affected targets of the prioritized ligands, consider only DE genes if they also belong to the a priori top n ("top_n_targets") targets of a ligand. Default = 200.
#' @param cutoff_visualization Because almost no ligand-target scores have a regulatory potential score of 0, we clarify the heatmap visualization by giving the links with the lowest scores a score of 0. The cutoff_visualization paramter indicates this fraction of links that are given a score of zero. Default = 0.33.
#' @param ligand_target_matrix The NicheNet ligand-target matrix denoting regulatory potential scores between ligands and targets (ligands in columns).
#' @param lr_network The ligand-receptor network (columns that should be present: $from, $to).
#' @param weighted_networks The NicheNet weighted networks denoting interactions and their weights/confidences in the ligand-signaling and gene regulatory network.
#' @param verbose Print out the current analysis stage. Default: TRUE.
#' @inheritParams get_expressed_genes
#'
#' @return A list with the following elements:
#' $ligand_activities: data frame with output ligand activity analysis;
#' $top_ligands: top_n ligands based on ligand activity;
#' $top_targets: active, affected target genes of these ligands;
#' $top_receptors: receptors of these ligands;
#' $ligand_target_matrix: matrix indicating regulatory potential scores between active ligands and their predicted targets;
#' $ligand_target_heatmap: heatmap of ligand-target regulatory potential;
#' $ligand_target_df: data frame showing regulatory potential scores of predicted active ligand-target network;
#' $ligand_activity_target_heatmap: heatmap showing both ligand activity scores and target genes of these top ligands;
#' $ligand_expression_dotplot: expression dotplot of the top ligands;
#' $ligand_receptor_matrix: matrix of ligand-receptor interactions;
#' $ligand_receptor_heatmap: heatmap showing ligand-receptor interactions;
#' $ligand_receptor_df: data frame of ligand-receptor interactions;
#' $geneset_oi: a vector containing the set of genes used as input for the ligand activity analysis;
#' $background_expressed_genes: the background of genes to which the geneset will be compared in the ligand activity analysis.
#'
#' @import Seurat
#' @import dplyr
#' @importFrom magrittr set_rownames set_colnames
#'
#' @examples
#' \dontrun{
#' seuratObj = readRDS(url("https://zenodo.org/record/3531889/files/seuratObj_test.rds"))
#' lr_network = readRDS(url("https://zenodo.org/record/7074291/files/lr_network_mouse_21122021.rds"))
#' ligand_target_matrix = readRDS(url("https://zenodo.org/record/7074291/files/ligand_target_matrix_nsga2r_final_mouse.rds"))
#' weighted_networks = readRDS(url("https://zenodo.org/record/7074291/files/weighted_networks_nsga2r_final_mouse.rds"))
#' # works, but does not make sense
#' nichenet_seuratobj_cluster_de(seurat_obj = seuratObj, receiver_affected = "CD8 T", receiver_reference = "Mono", sender = "Mono", ligand_target_matrix = ligand_target_matrix, lr_network = lr_network, weighted_networks = weighted_networks)
#' # type of analysis for which this would make sense
#' nichenet_seuratobj_cluster_de(seurat_obj = seuratObj, receiver_affected = "p-EMT-pos-cancer", receiver_reference = "p-EMT-neg-cancer", sender = "Fibroblast", ligand_target_matrix = ligand_target_matrix, lr_network = lr_network, weighted_networks = weighted_networks)
#' }
#'
#' @export
#'
nichenet_seuratobj_cluster_de = function(seurat_obj, receiver_affected, receiver_reference, sender = "all",ligand_target_matrix,lr_network,weighted_networks,
expression_pct = 0.10, lfc_cutoff = 0.25, geneset = "DE", filter_top_ligands = TRUE, top_n_ligands = 30,
top_n_targets = 200, cutoff_visualization = 0.33,
verbose = TRUE, assay_oi = NULL)
{
requireNamespace("Seurat")
requireNamespace("dplyr")
# input check
# input check
if(! "RNA" %in% names(seurat_obj@assays)){
if ("Spatial" %in% names(seurat_obj@assays)){
warning("You are going to apply NicheNet on a spatial seurat object. Be sure it's ok to use NicheNet the way you are planning to do it. So this means: you should have changes in gene expression in receiver cells caused by cell-cell interactions. Note that in the case of spatial transcriptomics, you are not dealing with single cells but with 'spots' containing multiple cells of the same of different cell types.")
if (class(seurat_obj@assays$Spatial@data) != "matrix" & class(seurat_obj@assays$Spatial@data) != "dgCMatrix") {
warning("Spatial Seurat object should contain a matrix of normalized expression data. Check 'seurat_obj@assays$Spatial@data' for default or 'seurat_obj@assays$SCT@data' for when the single-cell transform pipeline was applied")
}
if (sum(dim(seurat_obj@assays$Spatial@data)) == 0) {
stop("Seurat object should contain normalized expression data (numeric matrix). Check 'seurat_obj@assays$Spatial@data'")
}
}} else {
if (class(seurat_obj@assays$RNA@data) != "matrix" &
class(seurat_obj@assays$RNA@data) != "dgCMatrix") {
warning("Seurat object should contain a matrix of normalized expression data. Check 'seurat_obj@assays$RNA@data' for default or 'seurat_obj@assays$integrated@data' for integrated data or seurat_obj@assays$SCT@data for when the single-cell transform pipeline was applied")
}
if ("integrated" %in% names(seurat_obj@assays)) {
if (sum(dim(seurat_obj@assays$RNA@data)) == 0 & sum(dim(seurat_obj@assays$integrated@data)) ==
0)
stop("Seurat object should contain normalized expression data (numeric matrix). Check 'seurat_obj@assays$RNA@data' for default or 'seurat_obj@assays$integrated@data' for integrated data")
}
else if ("SCT" %in% names(seurat_obj@assays)) {
if (sum(dim(seurat_obj@assays$RNA@data)) == 0 & sum(dim(seurat_obj@assays$SCT@data)) ==
0) {
stop("Seurat object should contain normalized expression data (numeric matrix). Check 'seurat_obj@assays$RNA@data' for default or 'seurat_obj@assays$SCT@data' for data corrected via SCT")
}
}
else {
if (sum(dim(seurat_obj@assays$RNA@data)) == 0) {
stop("Seurat object should contain normalized expression data (numeric matrix). Check 'seurat_obj@assays$RNA@data'")
}
}
}
if(sum(receiver_affected %in% unique(Idents(seurat_obj))) != length(receiver_affected))
stop("The defined receiver_affected cell type should be an identity class of your seurat object")
if(sum(receiver_reference %in% unique(Idents(seurat_obj))) != length(receiver_reference))
stop("The defined receiver_reference cell type should be an identity class of your seurat object")
if(length(sender) == 1){
if(sender != "all" & sender != "undefined"){
if(sum(sender %in% unique(Idents(seurat_obj))) != length(sender)){
stop("The sender argument should be 'all' or 'undefined' or an identity class of your seurat object")
}
}
} else {
if(sum(sender %in% unique(Idents(seurat_obj))) != length(sender)){
stop("The sender argument should be 'all' or 'undefined' or an identity class of your seurat object")
}
}
if(geneset != "DE" & geneset != "up" & geneset != "down")
stop("geneset should be 'DE', 'up' or 'down'")
if("integrated" %in% names(seurat_obj@assays)){
warning("Seurat object is result from the Seurat integration workflow. Make sure that the way of defining expressed and differentially expressed genes in this wrapper is appropriate for your integrated data.")
}
# Read in and process NicheNet networks, define ligands and receptors
if (verbose == TRUE){print("Read in and process NicheNet's networks")}
weighted_networks_lr = weighted_networks$lr_sig %>% inner_join(lr_network %>% distinct(from,to), by = c("from","to"))
ligands = lr_network %>% pull(from) %>% unique()
receptors = lr_network %>% pull(to) %>% unique()
if (verbose == TRUE){print("Define expressed ligands and receptors in receiver and sender cells")}
# step1 nichenet analysis: get expressed genes in sender and receiver cells
## receiver
# expressed genes: only in steady state population (for determining receptors)
list_expressed_genes_receiver_ss = c(receiver_reference) %>% unique() %>% lapply(get_expressed_genes, seurat_obj, expression_pct, assay_oi)
names(list_expressed_genes_receiver_ss) = c(receiver_reference) %>% unique()
expressed_genes_receiver_ss = list_expressed_genes_receiver_ss %>% unlist() %>% unique()
# expressed genes: both in steady state and affected population (for determining background of expressed genes)
list_expressed_genes_receiver = c(receiver_reference,receiver_affected) %>% unique() %>% lapply(get_expressed_genes, seurat_obj, expression_pct, assay_oi)
names(list_expressed_genes_receiver) = c(receiver_reference,receiver_affected) %>% unique()
expressed_genes_receiver = list_expressed_genes_receiver %>% unlist() %>% unique()
## sender
if (length(sender) == 1){
if (sender == "all"){
sender_celltypes = Idents(seurat_obj) %>% levels()
list_expressed_genes_sender = sender_celltypes %>% lapply(get_expressed_genes, seurat_obj, expression_pct, assay_oi)
names(list_expressed_genes_sender) = sender_celltypes
expressed_genes_sender = list_expressed_genes_sender %>% unlist() %>% unique()
} else if (sender == "undefined") {
if("integrated" %in% names(seurat_obj@assays)){
expressed_genes_sender = union(seurat_obj@assays$integrated@data %>% rownames(),rownames(ligand_target_matrix)) %>% union(colnames(ligand_target_matrix))
} else {
expressed_genes_sender = union(seurat_obj@assays$RNA@data %>% rownames(),rownames(ligand_target_matrix)) %>% union(colnames(ligand_target_matrix))
}
} else if (sender != "all" & sender != "undefined") {
sender_celltypes = sender
list_expressed_genes_sender = sender_celltypes %>% unique() %>% lapply(get_expressed_genes, seurat_obj, expression_pct, assay_oi)
names(list_expressed_genes_sender) = sender_celltypes %>% unique()
expressed_genes_sender = list_expressed_genes_sender %>% unlist() %>% unique()
}
} else {
sender_celltypes = sender
list_expressed_genes_sender = sender_celltypes %>% unique() %>% lapply(get_expressed_genes, seurat_obj, expression_pct, assay_oi)
names(list_expressed_genes_sender) = sender_celltypes %>% unique()
expressed_genes_sender = list_expressed_genes_sender %>% unlist() %>% unique()
}
# step2 nichenet analysis: define background and gene list of interest: here differential expression between two conditions of cell type of interest
if (verbose == TRUE){print("Perform DE analysis between two receiver cell clusters")}
DE_table_receiver = FindMarkers(object = seurat_obj, ident.1 = receiver_affected, ident.2 = receiver_reference, min.pct = expression_pct) %>% rownames_to_column("gene")
SeuratV4 = c("avg_log2FC") %in% colnames(DE_table_receiver)
if(SeuratV4 == TRUE){
if (geneset == "DE"){
geneset_oi = DE_table_receiver %>% filter(p_val_adj <= 0.05 & abs(avg_log2FC) >= lfc_cutoff) %>% pull(gene)
} else if (geneset == "up") {
geneset_oi = DE_table_receiver %>% filter(p_val_adj <= 0.05 & avg_log2FC >= lfc_cutoff) %>% pull(gene)
} else if (geneset == "down") {
geneset_oi = DE_table_receiver %>% filter(p_val_adj <= 0.05 & avg_log2FC <= lfc_cutoff) %>% pull(gene)
}
} else {
if (geneset == "DE"){
geneset_oi = DE_table_receiver %>% filter(p_val_adj <= 0.05 & abs(avg_logFC) >= lfc_cutoff) %>% pull(gene)
} else if (geneset == "up") {
geneset_oi = DE_table_receiver %>% filter(p_val_adj <= 0.05 & avg_logFC >= lfc_cutoff) %>% pull(gene)
} else if (geneset == "down") {
geneset_oi = DE_table_receiver %>% filter(p_val_adj <= 0.05 & avg_logFC <= lfc_cutoff) %>% pull(gene)
}
}
geneset_oi = geneset_oi %>% .[. %in% rownames(ligand_target_matrix)]
if (length(geneset_oi) == 0){
stop("No genes were differentially expressed")
}
background_expressed_genes = expressed_genes_receiver %>% .[. %in% rownames(ligand_target_matrix)]
# step3 nichenet analysis: define potential ligands
expressed_ligands = intersect(ligands,expressed_genes_sender)
expressed_receptors = intersect(receptors,expressed_genes_receiver)
if (length(expressed_ligands) == 0){
stop("No ligands expressed in sender cell")
}
if (length(expressed_receptors) == 0){
stop("No receptors expressed in receiver cell")
}
potential_ligands = lr_network %>% filter(from %in% expressed_ligands & to %in% expressed_receptors) %>% pull(from) %>% unique()
if (length(potential_ligands) == 0){
stop("No potentially active ligands")
}
if (verbose == TRUE){print("Perform NicheNet ligand activity analysis")}
# step4 perform NicheNet's ligand activity analysis
ligand_activities = predict_ligand_activities(geneset = geneset_oi, background_expressed_genes = background_expressed_genes, ligand_target_matrix = ligand_target_matrix, potential_ligands = potential_ligands)
ligand_activities = ligand_activities %>%
arrange(-aupr_corrected) %>%
mutate(rank = rank(desc(aupr_corrected)))
if(filter_top_ligands == TRUE){
best_upstream_ligands = ligand_activities %>% top_n(top_n_ligands, aupr_corrected) %>% arrange(-aupr_corrected) %>% pull(test_ligand) %>% unique()
} else {
best_upstream_ligands = ligand_activities %>% arrange(-aupr_corrected) %>% pull(test_ligand) %>% unique()
}
if (verbose == TRUE){print("Infer active target genes of the prioritized ligands")}
# step5 infer target genes of the top-ranked ligands
active_ligand_target_links_df = best_upstream_ligands %>% lapply(get_weighted_ligand_target_links,geneset = geneset_oi, ligand_target_matrix = ligand_target_matrix, n = top_n_targets) %>% bind_rows() %>% drop_na()
if(nrow(active_ligand_target_links_df) > 0){
active_ligand_target_links = prepare_ligand_target_visualization(ligand_target_df = active_ligand_target_links_df, ligand_target_matrix = ligand_target_matrix, cutoff = cutoff_visualization)
order_ligands = intersect(best_upstream_ligands, colnames(active_ligand_target_links)) %>% rev() %>% make.names()
order_targets = active_ligand_target_links_df$target %>% unique() %>% intersect(rownames(active_ligand_target_links)) %>% make.names()
rownames(active_ligand_target_links) = rownames(active_ligand_target_links) %>% make.names()
colnames(active_ligand_target_links) = colnames(active_ligand_target_links) %>% make.names()
order_targets = order_targets %>% intersect(rownames(active_ligand_target_links))
order_ligands = order_ligands %>% intersect(colnames(active_ligand_target_links))
vis_ligand_target = active_ligand_target_links[order_targets,order_ligands] %>% t()
p_ligand_target_network = vis_ligand_target %>% make_heatmap_ggplot("Prioritized ligands","Predicted target genes", color = "purple",legend_position = "top", x_axis_position = "top",legend_title = "Regulatory potential") + theme(axis.text.x = element_text(face = "italic")) #+ scale_fill_gradient2(low = "whitesmoke", high = "purple", breaks = c(0,0.006,0.012))
} else {
vis_ligand_target = NULL
p_ligand_target_network = NULL
print("no highly likely active targets found for top ligands")
}
# combined heatmap: overlay ligand activities
ligand_aupr_matrix = ligand_activities %>% select(aupr_corrected) %>% as.matrix() %>% magrittr::set_rownames(ligand_activities$test_ligand)
rownames(ligand_aupr_matrix) = rownames(ligand_aupr_matrix) %>% make.names()
colnames(ligand_aupr_matrix) = colnames(ligand_aupr_matrix) %>% make.names()
vis_ligand_aupr = ligand_aupr_matrix[order_ligands, ] %>% as.matrix(ncol = 1) %>% magrittr::set_colnames("AUPR")
p_ligand_aupr = vis_ligand_aupr %>% make_heatmap_ggplot("Prioritized ligands","Ligand activity", color = "darkorange",legend_position = "top", x_axis_position = "top", legend_title = "AUPR\n(target gene prediction ability)") + theme(legend.text = element_text(size = 9))
p_ligand_aupr
figures_without_legend = cowplot::plot_grid(
p_ligand_aupr + theme(legend.position = "none", axis.ticks = element_blank()) + theme(axis.title.x = element_text()),
p_ligand_target_network + theme(legend.position = "none", axis.ticks = element_blank()) + ylab(""),
align = "hv",
nrow = 1,
rel_widths = c(ncol(vis_ligand_aupr)+10, ncol(vis_ligand_target)))
legends = cowplot::plot_grid(
ggpubr::as_ggplot(ggpubr::get_legend(p_ligand_aupr)),
ggpubr::as_ggplot(ggpubr::get_legend(p_ligand_target_network)),
nrow = 1,
align = "h")
combined_plot = cowplot::plot_grid(figures_without_legend,
legends,
rel_heights = c(10,2), nrow = 2, align = "hv")
# ligand-receptor plot
# get the ligand-receptor network of the top-ranked ligands
if (verbose == TRUE){print("Infer receptors of the prioritized ligands")}
lr_network_top = lr_network %>% filter(from %in% best_upstream_ligands & to %in% expressed_receptors) %>% distinct(from,to)
best_upstream_receptors = lr_network_top %>% pull(to) %>% unique()
lr_network_top_df_large = weighted_networks_lr %>% filter(from %in% best_upstream_ligands & to %in% best_upstream_receptors)
lr_network_top_df = lr_network_top_df_large %>% spread("from","weight",fill = 0)
lr_network_top_matrix = lr_network_top_df %>% select(-to) %>% as.matrix() %>% magrittr::set_rownames(lr_network_top_df$to)
if (nrow(lr_network_top_matrix) > 1){
dist_receptors = dist(lr_network_top_matrix, method = "binary")
hclust_receptors = hclust(dist_receptors, method = "ward.D2")
order_receptors = hclust_receptors$labels[hclust_receptors$order]
} else {
order_receptors = rownames(lr_network_top_matrix)
}
if (ncol(lr_network_top_matrix) > 1) {
dist_ligands = dist(lr_network_top_matrix %>% t(), method = "binary")
hclust_ligands = hclust(dist_ligands, method = "ward.D2")
order_ligands_receptor = hclust_ligands$labels[hclust_ligands$order]
} else {
order_ligands_receptor = colnames(lr_network_top_matrix)
}
order_receptors = order_receptors %>% intersect(rownames(lr_network_top_matrix))
order_ligands_receptor = order_ligands_receptor %>% intersect(colnames(lr_network_top_matrix))
vis_ligand_receptor_network = lr_network_top_matrix[order_receptors, order_ligands_receptor]
dim(vis_ligand_receptor_network) = c(length(order_receptors), length(order_ligands_receptor))
rownames(vis_ligand_receptor_network) = order_receptors %>% make.names()
colnames(vis_ligand_receptor_network) = order_ligands_receptor %>% make.names()
p_ligand_receptor_network = vis_ligand_receptor_network %>% t() %>% make_heatmap_ggplot("Ligands","Receptors", color = "mediumvioletred", x_axis_position = "top",legend_title = "Prior interaction potential")
# ligand expression Seurat dotplot
if (length(sender) > 1){
are_there_senders = TRUE
}
if(length(sender) == 1){
if(sender != "undefined"){
are_there_senders = TRUE
} else {
are_there_senders = FALSE
}
}
if (are_there_senders == TRUE){
real_makenames_conversion = lr_network$from %>% unique() %>% magrittr::set_names(lr_network$from %>% unique() %>% make.names())
order_ligands_adapted = real_makenames_conversion[order_ligands]
names(order_ligands_adapted) = NULL
rotated_dotplot = DotPlot(seurat_obj %>% subset(idents = sender_celltypes), features = order_ligands_adapted, cols = "RdYlBu") + coord_flip() + theme(legend.text = element_text(size = 10), legend.title = element_text(size = 12)) # flip of coordinates necessary because we want to show ligands in the rows when combining all plots
} else {
rotated_dotplot = NULL
}
return(list(
ligand_activities = ligand_activities,
top_ligands = best_upstream_ligands,
top_targets = active_ligand_target_links_df$target %>% unique(),
top_receptors = lr_network_top_df_large$to %>% unique(),
ligand_target_matrix = vis_ligand_target,
ligand_target_heatmap = p_ligand_target_network,
ligand_target_df = active_ligand_target_links_df,
ligand_expression_dotplot = rotated_dotplot,
ligand_activity_target_heatmap = combined_plot,
ligand_receptor_matrix = vis_ligand_receptor_network,
ligand_receptor_heatmap = p_ligand_receptor_network,
ligand_receptor_df = lr_network_top_df_large %>% rename(ligand = from, receptor = to),
geneset_oi = geneset_oi,
background_expressed_genes = background_expressed_genes
))
}
#' @title Perform NicheNet analysis on Seurat object: explain DE between two cell clusters from separate conditions
#'
#' @description \code{nichenet_seuratobj_aggregate_cluster_de} Perform NicheNet analysis on Seurat object: explain differential expression (DE) between two 'receiver' cell clusters coming from different conditions, by ligands expressed by neighboring cells.
#' @usage
#' nichenet_seuratobj_aggregate_cluster_de(seurat_obj, receiver_affected, receiver_reference, condition_colname, condition_oi, condition_reference, sender = "all",ligand_target_matrix,lr_network,weighted_networks,expression_pct = 0.10, lfc_cutoff = 0.25, geneset = "DE", filter_top_ligands = TRUE, top_n_ligands = 30,top_n_targets = 200, cutoff_visualization = 0.33,verbose = TRUE, assay_oi = NULL)
#'
#' @param seurat_obj Single-cell expression dataset as Seurat object https://satijalab.org/seurat/.
#' @param receiver_reference Name of cluster identity/identities of "steady-state" cells, before they are affected by intercellular communication with other cells
#' @param receiver_affected Name of cluster identity/identities of "affected" cells that were presumably affected by intercellular communication with other cells
#' @param condition_colname Name of the column in the meta data dataframe that indicates which condition/sample cells were coming from.
#' @param condition_oi Condition of interest in which receiver cells were presumably affected by other cells. Should be a name present in the `condition_colname` column of the metadata.
#' @param condition_reference The second condition (e.g. reference or steady-state condition). Should be a name present in the `condition_colname` column of the metadata.
#' @param sender Determine the potential sender cells. Name of cluster identity/identities of cells that presumably affect expression in the receiver cell type. In case you want to look at all possible sender cell types in the data, you can give this argument the value "all". "all" indicates thus that all cell types in the dataset will be considered as possible sender cells. As final option, you could give this argument the value "undefined"."undefined" won't look at ligands expressed by sender cells, but at all ligands for which a corresponding receptor is expressed. This could be useful if the presumably active sender cell is not profiled. Default: "all".
#' @param expression_pct To determine ligands and receptors expressed by sender and receiver cells, we consider genes expressed if they are expressed in at least a specific fraction of cells of a cluster. This number indicates this fraction. Default: 0.10
#' @param lfc_cutoff Cutoff on log fold change in the wilcoxon differential expression test. Default: 0.25.
#' @param geneset Indicate whether to consider all DE genes between condition 1 and 2 ("DE"), or only genes upregulated in condition 1 ("up"), or only genes downregulad in condition 1 ("down").
#' @param filter_top_ligands Indicate whether output tables for ligand-target and ligand-receptor networks should be done for a filtered set of top ligands (TRUE) or for all ligands (FALSE). Default: TRUE.
#' @param top_n_ligands Indicate how many ligands should be extracted as top-ligands after ligand activity analysis. Only for these ligands, target genes and receptors will be returned. Default: 30.
#' @param top_n_targets To predict active, affected targets of the prioritized ligands, consider only DE genes if they also belong to the a priori top n ("top_n_targets") targets of a ligand. Default = 200.
#' @param cutoff_visualization Because almost no ligand-target scores have a regulatory potential score of 0, we clarify the heatmap visualization by giving the links with the lowest scores a score of 0. The cutoff_visualization paramter indicates this fraction of links that are given a score of zero. Default = 0.33.
#' @param ligand_target_matrix The NicheNet ligand-target matrix of the organism of interest denoting regulatory potential scores between ligands and targets (ligands in columns).
#' @param lr_network The ligand-receptor network (columns that should be present: $from, $to) of the organism of interest.
#' @param weighted_networks The NicheNet weighted networks of the organism of interest denoting interactions and their weights/confidences in the ligand-signaling and gene regulatory network.
#' @param verbose Print out the current analysis stage. Default: TRUE.
#' @inheritParams get_expressed_genes
#'
#' @return A list with the following elements:
#' $ligand_activities: data frame with output ligand activity analysis;
#' $top_ligands: top_n ligands based on ligand activity;
#' $top_targets: active, affected target genes of these ligands;
#' $top_receptors: receptors of these ligands;
#' $ligand_target_matrix: matrix indicating regulatory potential scores between active ligands and their predicted targets;
#' $ligand_target_heatmap: heatmap of ligand-target regulatory potential;
#' $ligand_target_df: data frame showing regulatory potential scores of predicted active ligand-target network;
#' $ligand_activity_target_heatmap: heatmap showing both ligand activity scores and target genes of these top ligands;
#' $ligand_expression_dotplot: expression dotplot of the top ligands;
#' $ligand_receptor_matrix: matrix of ligand-receptor interactions;
#' $ligand_receptor_heatmap: heatmap showing ligand-receptor interactions;
#' $ligand_receptor_df: data frame of ligand-receptor interactions;
#' $geneset_oi: a vector containing the set of genes used as input for the ligand activity analysis;
#' $background_expressed_genes: the background of genes to which the geneset will be compared in the ligand activity analysis.
#'
#' @import Seurat
#' @import dplyr
#' @importFrom magrittr set_rownames set_colnames
#'
#' @examples
#' \dontrun{
#' seuratObj = readRDS(url("https://zenodo.org/record/3531889/files/seuratObj_test.rds"))
#' lr_network = readRDS(url("https://zenodo.org/record/7074291/files/lr_network_mouse_21122021.rds"))
#' ligand_target_matrix = readRDS(url("https://zenodo.org/record/7074291/files/ligand_target_matrix_nsga2r_final_mouse.rds"))
#' weighted_networks = readRDS(url("https://zenodo.org/record/7074291/files/weighted_networks_nsga2r_final_mouse.rds"))
#' nichenet_seuratobj_aggregate_cluster_de(seurat_obj = seuratObj, receiver_affected = "CD8 T", receiver_reference = "CD8 T", condition_colname = "aggregate", condition_oi = "LCMV", condition_reference = "SS", sender = "Mono", ligand_target_matrix = ligand_target_matrix, lr_network = lr_network, weighted_networks = weighted_networks)
#' }
#'
#' @export
#'
nichenet_seuratobj_aggregate_cluster_de = function(seurat_obj, receiver_affected, receiver_reference,
condition_colname, condition_oi, condition_reference, sender = "all",
ligand_target_matrix,lr_network,weighted_networks,
expression_pct = 0.10, lfc_cutoff = 0.25, geneset = "DE", filter_top_ligands = TRUE, top_n_ligands = 30,
top_n_targets = 200, cutoff_visualization = 0.33,
verbose = TRUE, assay_oi = NULL)
{
requireNamespace("Seurat")
requireNamespace("dplyr")
# input check
if(! "RNA" %in% names(seurat_obj@assays)){
if ("Spatial" %in% names(seurat_obj@assays)){
warning("You are going to apply NicheNet on a spatial seurat object. Be sure it's ok to use NicheNet the way you are planning to do it. So this means: you should have changes in gene expression in receiver cells caused by cell-cell interactions. Note that in the case of spatial transcriptomics, you are not dealing with single cells but with 'spots' containing multiple cells of the same of different cell types.")
if (class(seurat_obj@assays$Spatial@data) != "matrix" & class(seurat_obj@assays$Spatial@data) != "dgCMatrix") {
warning("Spatial Seurat object should contain a matrix of normalized expression data. Check 'seurat_obj@assays$Spatial@data' for default or 'seurat_obj@assays$SCT@data' for when the single-cell transform pipeline was applied")
}
if (sum(dim(seurat_obj@assays$Spatial@data)) == 0) {
stop("Seurat object should contain normalized expression data (numeric matrix). Check 'seurat_obj@assays$Spatial@data'")
}
}} else {
if (class(seurat_obj@assays$RNA@data) != "matrix" &
class(seurat_obj@assays$RNA@data) != "dgCMatrix") {
warning("Seurat object should contain a matrix of normalized expression data. Check 'seurat_obj@assays$RNA@data' for default or 'seurat_obj@assays$integrated@data' for integrated data or seurat_obj@assays$SCT@data for when the single-cell transform pipeline was applied")
}
if ("integrated" %in% names(seurat_obj@assays)) {
if (sum(dim(seurat_obj@assays$RNA@data)) == 0 & sum(dim(seurat_obj@assays$integrated@data)) ==
0)
stop("Seurat object should contain normalized expression data (numeric matrix). Check 'seurat_obj@assays$RNA@data' for default or 'seurat_obj@assays$integrated@data' for integrated data")
}
else if ("SCT" %in% names(seurat_obj@assays)) {
if (sum(dim(seurat_obj@assays$RNA@data)) == 0 & sum(dim(seurat_obj@assays$SCT@data)) ==
0) {
stop("Seurat object should contain normalized expression data (numeric matrix). Check 'seurat_obj@assays$RNA@data' for default or 'seurat_obj@assays$SCT@data' for data corrected via SCT")
}
}
else {
if (sum(dim(seurat_obj@assays$RNA@data)) == 0) {
stop("Seurat object should contain normalized expression data (numeric matrix). Check 'seurat_obj@assays$RNA@data'")
}
}
}
if(sum(receiver_affected %in% unique(Idents(seurat_obj))) != length(receiver_affected))
stop("The defined receiver_affected cell type should be an identity class of your seurat object")
if(sum(receiver_reference %in% unique(Idents(seurat_obj))) != length(receiver_reference))
stop("The defined receiver_reference cell type should be an identity class of your seurat object")
if(!condition_colname %in% colnames(seurat_obj@meta.data))
stop("Your column indicating the conditions/samples of interest should be in the metadata dataframe")
if(sum(condition_oi %in% c(seurat_obj[[condition_colname]] %>% unlist() %>% as.character() %>% unique())) != length(condition_oi))
stop("condition_oi should be in the condition-indicating column")
if(sum(condition_reference %in% c(seurat_obj[[condition_colname]] %>% unlist() %>% as.character() %>% unique())) != length(condition_reference))
stop("condition_reference should be in the condition-indicating column")
if(length(sender) == 1){
if(sender != "all" & sender != "undefined"){
if(sum(sender %in% unique(Idents(seurat_obj))) != length(sender)){
stop("The sender argument should be 'all' or 'undefined' or an identity class of your seurat object")
}
}
} else {
if(sum(sender %in% unique(Idents(seurat_obj))) != length(sender)){
stop("The sender argument should be 'all' or 'undefined' or an identity class of your seurat object")
}
}
if(geneset != "DE" & geneset != "up" & geneset != "down")
stop("geneset should be 'DE', 'up' or 'down'")
if("integrated" %in% names(seurat_obj@assays)){
warning("Seurat object is result from the Seurat integration workflow. Make sure that the way of defining expressed and differentially expressed genes in this wrapper is appropriate for your integrated data.")
}
# Read in and process NicheNet networks, define ligands and receptors
if (verbose == TRUE){print("Read in and process NicheNet's networks")}
weighted_networks_lr = weighted_networks$lr_sig %>% inner_join(lr_network %>% distinct(from,to), by = c("from","to"))
ligands = lr_network %>% pull(from) %>% unique()
receptors = lr_network %>% pull(to) %>% unique()
if (verbose == TRUE){print("Define expressed ligands and receptors in receiver and sender cells")}
# step1 nichenet analysis: get expressed genes in sender and receiver cells
## receiver
# expressed genes: only in steady state population (for determining receptors)
list_expressed_genes_receiver_ss = c(receiver_reference) %>% unique() %>% lapply(get_expressed_genes, seurat_obj, expression_pct, assay_oi)
names(list_expressed_genes_receiver_ss) = c(receiver_reference) %>% unique()
expressed_genes_receiver_ss = list_expressed_genes_receiver_ss %>% unlist() %>% unique()
# expressed genes: both in steady state and affected population (for determining background of expressed genes)
list_expressed_genes_receiver = c(receiver_reference,receiver_affected) %>% unique() %>% lapply(get_expressed_genes, seurat_obj, expression_pct, assay_oi)
names(list_expressed_genes_receiver) = c(receiver_reference,receiver_affected) %>% unique()
expressed_genes_receiver = list_expressed_genes_receiver %>% unlist() %>% unique()
## sender
if (length(sender) == 1){
if (sender == "all"){
sender_celltypes = Idents(seurat_obj) %>% levels()
list_expressed_genes_sender = sender_celltypes %>% lapply(get_expressed_genes, seurat_obj, expression_pct, assay_oi)
names(list_expressed_genes_sender) = sender_celltypes
expressed_genes_sender = list_expressed_genes_sender %>% unlist() %>% unique()
} else if (sender == "undefined") {
if("integrated" %in% names(seurat_obj@assays)){
expressed_genes_sender = union(seurat_obj@assays$integrated@data %>% rownames(),rownames(ligand_target_matrix)) %>% union(colnames(ligand_target_matrix))
} else {
expressed_genes_sender = union(seurat_obj@assays$RNA@data %>% rownames(),rownames(ligand_target_matrix)) %>% union(colnames(ligand_target_matrix))
}
} else if (sender != "all" & sender != "undefined") {
sender_celltypes = sender
list_expressed_genes_sender = sender_celltypes %>% unique() %>% lapply(get_expressed_genes, seurat_obj, expression_pct, assay_oi)
names(list_expressed_genes_sender) = sender_celltypes %>% unique()
expressed_genes_sender = list_expressed_genes_sender %>% unlist() %>% unique()
}
} else {
sender_celltypes = sender
list_expressed_genes_sender = sender_celltypes %>% unique() %>% lapply(get_expressed_genes, seurat_obj, expression_pct, assay_oi)
names(list_expressed_genes_sender) = sender_celltypes %>% unique()
expressed_genes_sender = list_expressed_genes_sender %>% unlist() %>% unique()
}
# step2 nichenet analysis: define background and gene list of interest: here differential expression between two conditions of cell type of interest
if (verbose == TRUE){print("Perform DE analysis between two receiver cell clusters")}
seurat_obj_receiver_affected= subset(seurat_obj, idents = receiver_affected)
seurat_obj_receiver_affected = SetIdent(seurat_obj_receiver_affected, value = seurat_obj_receiver_affected[[condition_colname]])
seurat_obj_receiver_affected= subset(seurat_obj_receiver_affected, idents = condition_oi)
seurat_obj_receiver_reference= subset(seurat_obj, idents = receiver_reference)
seurat_obj_receiver_reference = SetIdent(seurat_obj_receiver_reference, value = seurat_obj_receiver_reference[[condition_colname]])
seurat_obj_receiver_reference= subset(seurat_obj_receiver_reference, idents = condition_reference)
seurat_obj_receiver = merge(seurat_obj_receiver_affected, seurat_obj_receiver_reference)
DE_table_receiver = FindMarkers(object = seurat_obj_receiver, ident.1 = condition_oi, ident.2 = condition_reference, min.pct = expression_pct) %>% rownames_to_column("gene")
SeuratV4 = c("avg_log2FC") %in% colnames(DE_table_receiver)
if(SeuratV4 == TRUE){
if (geneset == "DE"){
geneset_oi = DE_table_receiver %>% filter(p_val_adj <= 0.05 & abs(avg_log2FC) >= lfc_cutoff) %>% pull(gene)
} else if (geneset == "up") {
geneset_oi = DE_table_receiver %>% filter(p_val_adj <= 0.05 & avg_log2FC >= lfc_cutoff) %>% pull(gene)
} else if (geneset == "down") {
geneset_oi = DE_table_receiver %>% filter(p_val_adj <= 0.05 & avg_log2FC <= lfc_cutoff) %>% pull(gene)
}
} else {
if (geneset == "DE"){
geneset_oi = DE_table_receiver %>% filter(p_val_adj <= 0.05 & abs(avg_logFC) >= lfc_cutoff) %>% pull(gene)
} else if (geneset == "up") {
geneset_oi = DE_table_receiver %>% filter(p_val_adj <= 0.05 & avg_logFC >= lfc_cutoff) %>% pull(gene)
} else if (geneset == "down") {
geneset_oi = DE_table_receiver %>% filter(p_val_adj <= 0.05 & avg_logFC <= lfc_cutoff) %>% pull(gene)
}
}
geneset_oi = geneset_oi %>% .[. %in% rownames(ligand_target_matrix)]
if (length(geneset_oi) == 0){
stop("No genes were differentially expressed")
}
background_expressed_genes = expressed_genes_receiver %>% .[. %in% rownames(ligand_target_matrix)]
# step3 nichenet analysis: define potential ligands
expressed_ligands = intersect(ligands,expressed_genes_sender)
expressed_receptors = intersect(receptors,expressed_genes_receiver)
if (length(expressed_ligands) == 0){
stop("No ligands expressed in sender cell")
}
if (length(expressed_receptors) == 0){
stop("No receptors expressed in receiver cell")
}
potential_ligands = lr_network %>% filter(from %in% expressed_ligands & to %in% expressed_receptors) %>% pull(from) %>% unique()
if (length(potential_ligands) == 0){
stop("No potentially active ligands")
}
if (verbose == TRUE){print("Perform NicheNet ligand activity analysis")}
# step4 perform NicheNet's ligand activity analysis
ligand_activities = predict_ligand_activities(geneset = geneset_oi, background_expressed_genes = background_expressed_genes, ligand_target_matrix = ligand_target_matrix, potential_ligands = potential_ligands)
ligand_activities = ligand_activities %>%
arrange(-aupr_corrected) %>%
mutate(rank = rank(desc(aupr_corrected)))
if(filter_top_ligands == TRUE){
best_upstream_ligands = ligand_activities %>% top_n(top_n_ligands, aupr_corrected) %>% arrange(-aupr_corrected) %>% pull(test_ligand) %>% unique()
} else {
best_upstream_ligands = ligand_activities %>% arrange(-aupr_corrected) %>% pull(test_ligand) %>% unique()
}
if (verbose == TRUE){print("Infer active target genes of the prioritized ligands")}
# step5 infer target genes of the top-ranked ligands
active_ligand_target_links_df = best_upstream_ligands %>% lapply(get_weighted_ligand_target_links,geneset = geneset_oi, ligand_target_matrix = ligand_target_matrix, n = top_n_targets) %>% bind_rows() %>% drop_na()
if(nrow(active_ligand_target_links_df) > 0){
active_ligand_target_links = prepare_ligand_target_visualization(ligand_target_df = active_ligand_target_links_df, ligand_target_matrix = ligand_target_matrix, cutoff = cutoff_visualization)
order_ligands = intersect(best_upstream_ligands, colnames(active_ligand_target_links)) %>% rev() %>% make.names()
order_targets = active_ligand_target_links_df$target %>% unique() %>% intersect(rownames(active_ligand_target_links)) %>% make.names()
rownames(active_ligand_target_links) = rownames(active_ligand_target_links) %>% make.names()
colnames(active_ligand_target_links) = colnames(active_ligand_target_links) %>% make.names()
order_targets = order_targets %>% intersect(rownames(active_ligand_target_links))
order_ligands = order_ligands %>% intersect(colnames(active_ligand_target_links))
vis_ligand_target = active_ligand_target_links[order_targets,order_ligands] %>% t()
p_ligand_target_network = vis_ligand_target %>% make_heatmap_ggplot("Prioritized ligands","Predicted target genes", color = "purple",legend_position = "top", x_axis_position = "top",legend_title = "Regulatory potential") + theme(axis.text.x = element_text(face = "italic")) #+ scale_fill_gradient2(low = "whitesmoke", high = "purple", breaks = c(0,0.006,0.012))
} else {
vis_ligand_target = NULL
p_ligand_target_network = NULL
print("no highly likely active targets found for top ligands")
}
# combined heatmap: overlay ligand activities
ligand_aupr_matrix = ligand_activities %>% select(aupr_corrected) %>% as.matrix() %>% magrittr::set_rownames(ligand_activities$test_ligand)
rownames(ligand_aupr_matrix) = rownames(ligand_aupr_matrix) %>% make.names()
colnames(ligand_aupr_matrix) = colnames(ligand_aupr_matrix) %>% make.names()
vis_ligand_aupr = ligand_aupr_matrix[order_ligands, ] %>% as.matrix(ncol = 1) %>% magrittr::set_colnames("AUPR")
p_ligand_aupr = vis_ligand_aupr %>% make_heatmap_ggplot("Prioritized ligands","Ligand activity", color = "darkorange",legend_position = "top", x_axis_position = "top", legend_title = "AUPR\n(target gene prediction ability)") + theme(legend.text = element_text(size = 9))
p_ligand_aupr
figures_without_legend = cowplot::plot_grid(
p_ligand_aupr + theme(legend.position = "none", axis.ticks = element_blank()) + theme(axis.title.x = element_text()),
p_ligand_target_network + theme(legend.position = "none", axis.ticks = element_blank()) + ylab(""),
align = "hv",
nrow = 1,
rel_widths = c(ncol(vis_ligand_aupr)+10, ncol(vis_ligand_target)))
legends = cowplot::plot_grid(
ggpubr::as_ggplot(ggpubr::get_legend(p_ligand_aupr)),
ggpubr::as_ggplot(ggpubr::get_legend(p_ligand_target_network)),
nrow = 1,
align = "h")
combined_plot = cowplot::plot_grid(figures_without_legend,
legends,
rel_heights = c(10,2), nrow = 2, align = "hv")
# ligand-receptor plot
# get the ligand-receptor network of the top-ranked ligands
if (verbose == TRUE){print("Infer receptors of the prioritized ligands")}
lr_network_top = lr_network %>% filter(from %in% best_upstream_ligands & to %in% expressed_receptors) %>% distinct(from,to)
best_upstream_receptors = lr_network_top %>% pull(to) %>% unique()
lr_network_top_df_large = weighted_networks_lr %>% filter(from %in% best_upstream_ligands & to %in% best_upstream_receptors)
lr_network_top_df = lr_network_top_df_large %>% spread("from","weight",fill = 0)
lr_network_top_matrix = lr_network_top_df %>% select(-to) %>% as.matrix() %>% magrittr::set_rownames(lr_network_top_df$to)
if (nrow(lr_network_top_matrix) > 1){
dist_receptors = dist(lr_network_top_matrix, method = "binary")
hclust_receptors = hclust(dist_receptors, method = "ward.D2")
order_receptors = hclust_receptors$labels[hclust_receptors$order]
} else {
order_receptors = rownames(lr_network_top_matrix)
}
if (ncol(lr_network_top_matrix) > 1) {
dist_ligands = dist(lr_network_top_matrix %>% t(), method = "binary")
hclust_ligands = hclust(dist_ligands, method = "ward.D2")
order_ligands_receptor = hclust_ligands$labels[hclust_ligands$order]
} else {
order_ligands_receptor = colnames(lr_network_top_matrix)
}
order_receptors = order_receptors %>% intersect(rownames(lr_network_top_matrix))
order_ligands_receptor = order_ligands_receptor %>% intersect(colnames(lr_network_top_matrix))
vis_ligand_receptor_network = lr_network_top_matrix[order_receptors, order_ligands_receptor]
dim(vis_ligand_receptor_network) = c(length(order_receptors), length(order_ligands_receptor))
rownames(vis_ligand_receptor_network) = order_receptors %>% make.names()
colnames(vis_ligand_receptor_network) = order_ligands_receptor %>% make.names()
p_ligand_receptor_network = vis_ligand_receptor_network %>% t() %>% make_heatmap_ggplot("Ligands","Receptors", color = "mediumvioletred", x_axis_position = "top",legend_title = "Prior interaction potential")
# ligand expression Seurat dotplot
if (length(sender) > 1){
are_there_senders = TRUE
}
if(length(sender) == 1){
if(sender != "undefined"){
are_there_senders = TRUE
} else {
are_there_senders = FALSE
}
}
if (are_there_senders == TRUE){
real_makenames_conversion = lr_network$from %>% unique() %>% magrittr::set_names(lr_network$from %>% unique() %>% make.names())
order_ligands_adapted = real_makenames_conversion[order_ligands]
names(order_ligands_adapted) = NULL
rotated_dotplot = DotPlot(seurat_obj %>% subset(idents = sender_celltypes), features = order_ligands_adapted, cols = "RdYlBu") + coord_flip() + theme(legend.text = element_text(size = 10), legend.title = element_text(size = 12)) # flip of coordinates necessary because we want to show ligands in the rows when combining all plots
} else {
rotated_dotplot = NULL
}
return(list(
ligand_activities = ligand_activities,
top_ligands = best_upstream_ligands,
top_targets = active_ligand_target_links_df$target %>% unique(),
top_receptors = lr_network_top_df_large$to %>% unique(),
ligand_target_matrix = vis_ligand_target,
ligand_target_heatmap = p_ligand_target_network,
ligand_target_df = active_ligand_target_links_df,
ligand_expression_dotplot = rotated_dotplot,
ligand_activity_target_heatmap = combined_plot,
ligand_receptor_matrix = vis_ligand_receptor_network,
ligand_receptor_heatmap = p_ligand_receptor_network,
ligand_receptor_df = lr_network_top_df_large %>% rename(ligand = from, receptor = to),
geneset_oi = geneset_oi,
background_expressed_genes = background_expressed_genes
))
}
#' @title Get log fold change values of genes in cell type of interest
#'
#' @description \code{get_lfc_celltype} Get log fold change of genes between two conditions in cell type of interest when using a Seurat single-cell object.
#'
#' @usage
#' get_lfc_celltype(celltype_oi, seurat_obj, condition_colname, condition_oi, condition_reference, celltype_col = "celltype", expression_pct = 0.10)
#' #'
#' @param seurat_obj Single-cell expression dataset as Seurat object https://satijalab.org/seurat/.
#' @param celltype_oi Name of celltype of interest. Should be present in the celltype metadata dataframe.
#' @param condition_colname Name of the column in the meta data dataframe that indicates which condition/sample cells were coming from.
#' @param condition_oi Condition of interest. Should be a name present in the "condition_colname" column of the metadata.
#' @param condition_reference The second condition (e.g. reference or steady-state condition). Should be a name present in the "condition_colname" column of the metadata.
#' @param celltype_col Metadata colum name where the cell type identifier is stored. Default: "celltype". If this is NULL, the Idents() of the seurat object will be considered as your cell type identifier.
#' @param expression_pct To consider only genes if they are expressed in at least a specific fraction of cells of a cluster. This number indicates this fraction. Default: 0.10
#'
#' @return A tbl with the log fold change values of genes. Positive lfc values: higher in condition_oi compared to condition_reference.
#'
#' @import Seurat
#' @import dplyr
#'
#' @examples
#' \dontrun{
#' requireNamespace("dplyr")
#' seuratObj = readRDS(url("https://zenodo.org/record/3531889/files/seuratObj_test.rds"))
#' get_lfc_celltype(seurat_obj = seuratObj, celltype_oi = "CD8 T", condition_colname = "aggregate", condition_oi = "LCMV", condition_reference = "SS", celltype_col = "celltype", expression_pct = 0.10)
#' }
#' @export
#'
get_lfc_celltype = function(celltype_oi, seurat_obj, condition_colname, condition_oi, condition_reference, celltype_col = "celltype", expression_pct = 0.10){
requireNamespace("Seurat")
requireNamespace("dplyr")
if(!is.null(celltype_col)){
seurat_obj_celltype = SetIdent(seurat_obj, value = seurat_obj[[celltype_col]])
seuratObj_sender = subset(seurat_obj_celltype, idents = celltype_oi)
} else {
seuratObj_sender = subset(seurat_obj, idents = celltype_oi)
}
seuratObj_sender = SetIdent(seuratObj_sender, value = seuratObj_sender[[condition_colname]])
DE_table_sender = FindMarkers(object = seuratObj_sender, ident.1 = condition_oi, ident.2 = condition_reference, min.pct = expression_pct, logfc.threshold = 0.05) %>% rownames_to_column("gene")
SeuratV4 = c("avg_log2FC") %in% colnames(DE_table_sender)
if(SeuratV4 == TRUE){
DE_table_sender = DE_table_sender %>% as_tibble() %>% select(-p_val) %>% select(gene, avg_log2FC)
} else {
DE_table_sender = DE_table_sender %>% as_tibble() %>% select(-p_val) %>% select(gene, avg_logFC)
}
colnames(DE_table_sender) = c("gene",celltype_oi)
return(DE_table_sender)
}
|
/R/application_prediction.R
|
no_license
|
saeyslab/nichenetr
|
R
| false
| false
| 130,153
|
r
|
#' @title Convert cluster assignment to settings format suitable for target gene prediction.
#'
#' @description \code{convert_cluster_to_settings} Convert cluster assignment to settings format suitable for target gene prediction.
#'
#' @usage
#' convert_cluster_to_settings(i, cluster_vector, setting_name, setting_from, background = NULL)
#'
#' @param i The cluster number of the cluster of interest to which genes should belong
#' @param cluster_vector Named vector containing the cluster number to which every gene belongs
#' @param setting_name Base name of the setting
#' @param setting_from Active ligands for the specific setting
#' @param background NULL or a character vector of genes belonging to the background. When NULL: the background will be formed by genes belonging to other clusters that the cluster of interest. Default NULL. If not NULL and genes present in the cluster of interest are in this vector of background gene names, these genes will be removed from the background.
#'
#' @return A list with following elements: $name (indicating the cluster id), $from, $response. $response is a gene-named logical vector indicating whether the gene is part of the respective cluster.
#'
#' @examples
#' \dontrun{
#' genes_clusters = c("TGFB1" = 1,"TGFB2" = 1,"TGFB3" = 2)
#' cluster_settings = lapply(seq(length(unique(genes_clusters))), convert_cluster_to_settings, cluster_vector = genes_clusters, setting_name = "example", setting_from = "BMP2")
#' }
#'
#' @export
#'
convert_cluster_to_settings = function(i, cluster_vector, setting_name, setting_from, background = NULL){
# input check
if(!is.numeric(i) | length(i) != 1 | i <= 0)
stop("i should be a number higher than 0")
if(!is.numeric(cluster_vector) | is.null(names(cluster_vector)))
stop("cluster_vector should be a named numeric vector")
if(!is.character(setting_name))
stop("setting_name should be a character vector")
if(!is.character(setting_from))
stop("setting_from should be a character vector")
if(!is.character(background) & !is.null(background))
stop("background should be a character vector or NULL")
requireNamespace("dplyr")
genes_cluster_oi = cluster_vector[cluster_vector == i] %>% names()
if (is.null(background)){
response = names(cluster_vector) %in% genes_cluster_oi
names(response) = names(cluster_vector)
} else {
background = background[(background %in% genes_cluster_oi) == FALSE]
background_logical = rep(FALSE,times = length(background))
names(background_logical) = background
cluster_logical = rep(TRUE,times = length(genes_cluster_oi))
names(cluster_logical) = genes_cluster_oi
response = c(background_logical,cluster_logical)
}
return(list(name = paste0(setting_name,"_cluster_",i), from = setting_from, response = response))
}
#' @title Predict activities of ligands in regulating expression of a gene set of interest
#'
#' @description \code{predict_ligand_activities} Predict activities of ligands in regulating expression of a gene set of interest. Ligand activities are defined as how well they predict the observed transcriptional response (i.e. gene set) according to the NicheNet model.
#'
#' @usage
#' predict_ligand_activities(geneset, background_expressed_genes,ligand_target_matrix, potential_ligands, single = TRUE,...)
#'
#' @param geneset Character vector of the gene symbols of genes of which the expression is potentially affected by ligands from the interacting cell.
#' @param background_expressed_genes Character vector of gene symbols of the background, non-affected, genes (can contain the symbols of the affected genes as well).
#' @param ligand_target_matrix The NicheNet ligand-target matrix denoting regulatory potential scores between ligands and targets (ligands in columns).
#' @param potential_ligands Character vector giving the gene symbols of the potentially active ligands you want to define ligand activities for.
#' @param single TRUE if you want to calculate ligand activity scores by considering every ligand individually (recommended). FALSE if you want to calculate ligand activity scores as variable importances of a multi-ligand classification model.
#' @param ... Additional parameters for get_multi_ligand_importances if single = FALSE.
#'
#' @return A tibble giving several ligand activity scores. Following columns in the tibble: $test_ligand, $auroc, $aupr and $pearson.
#'
#' @examples
#' \dontrun{
#' weighted_networks = construct_weighted_networks(lr_network, sig_network, gr_network,source_weights_df)
#' ligands = list("TNF","BMP2","IL4")
#' ligand_target_matrix = construct_ligand_target_matrix(weighted_networks, ligands, ltf_cutoff = 0, algorithm = "PPR", damping_factor = 0.5, secondary_targets = FALSE)
#' potential_ligands = c("TNF","BMP2","IL4")
#' geneset = c("SOCS2","SOCS3", "IRF1")
#' background_expressed_genes = c("SOCS2","SOCS3","IRF1","ICAM1","ID1","ID2","ID3")
#' ligand_activities = predict_ligand_activities(geneset = geneset, background_expressed_genes = background_expressed_genes, ligand_target_matrix = ligand_target_matrix, potential_ligands = potential_ligands)
#' }
#'
#' @export
#'
predict_ligand_activities = function(geneset,background_expressed_genes,ligand_target_matrix, potential_ligands, single = TRUE,...){
setting = list(geneset) %>%
lapply(convert_gene_list_settings_evaluation, name = "gene set", ligands_oi = potential_ligands, background = background_expressed_genes)
if (single == TRUE){
settings_ligand_prediction = setting %>%
convert_settings_ligand_prediction(all_ligands = potential_ligands, validation = FALSE, single = TRUE)
ligand_importances = settings_ligand_prediction %>% lapply(get_single_ligand_importances,ligand_target_matrix = ligand_target_matrix, known = FALSE) %>% bind_rows()
} else {
settings_ligand_prediction = setting %>%
convert_settings_ligand_prediction(all_ligands = potential_ligands, validation = FALSE, single = FALSE)
ligand_importances = settings_ligand_prediction %>% lapply(get_multi_ligand_importances,ligand_target_matrix = ligand_target_matrix, known = FALSE, ...) %>% bind_rows()
}
return(ligand_importances %>% select(test_ligand,auroc,aupr,aupr_corrected, pearson))
}
#' @title Infer weighted active ligand-target links between a possible ligand and target genes of interest
#'
#' @description \code{get_weighted_ligand_target_links} Infer active ligand target links between possible lignands and genes belonging to a gene set of interest: consider the intersect between the top n targets of a ligand and the gene set.
#'
#' @usage
#' get_weighted_ligand_target_links(ligand, geneset,ligand_target_matrix,n = 250)
#'
#' @param geneset Character vector of the gene symbols of genes of which the expression is potentially affected by ligands from the interacting cell.
#' @param ligand Character vector giving the gene symbols of the potentially active ligand for which you want to find target genes.
#' @param n The top n of targets per ligand that will be considered. Default: 250.
#' @inheritParams predict_ligand_activities
#'
#' @return A tibble with columns ligand, target and weight (i.e. regulatory potential score).
#'
#' @examples
#' \dontrun{
#' weighted_networks = construct_weighted_networks(lr_network, sig_network, gr_network,source_weights_df)
#' ligands = list("TNF","BMP2","IL4")
#' ligand_target_matrix = construct_ligand_target_matrix(weighted_networks, ligands, ltf_cutoff = 0, algorithm = "PPR", damping_factor = 0.5, secondary_targets = FALSE)
#' potential_ligand = "TNF"
#' geneset = c("SOCS2","SOCS3", "IRF1")
#' active_ligand_target_links_df = get_weighted_ligand_target_links(ligand = potential_ligand, geneset = geneset, ligand_target_matrix = ligand_target_matrix, n = 250)
#' }
#'
#' @export
#'
get_weighted_ligand_target_links = function(ligand, geneset,ligand_target_matrix,n = 250){
top_n_score = ligand_target_matrix[,ligand] %>% sort(decreasing = T) %>% head(n) %>% min()
targets = intersect(ligand_target_matrix[,ligand] %>% .[. >= top_n_score ] %>% names(),geneset)
if (length(targets) == 0){
ligand_target_weighted_df = tibble(ligand = ligand, target = NA, weight = NA)
} else if (length(targets) == 1) {
ligand_target_weighted_df = tibble(ligand = ligand, target = targets, weight = ligand_target_matrix[targets,ligand])
} else {
ligand_target_weighted_df = tibble(ligand = ligand, target = names(ligand_target_matrix[targets,ligand])) %>% inner_join(tibble(target = names(ligand_target_matrix[targets,ligand]), weight = ligand_target_matrix[targets,ligand]), by = "target")
}
return(ligand_target_weighted_df)
}
#' @title Prepare heatmap visualization of the ligand-target links starting from a ligand-target tibble.
#'
#' @description \code{prepare_ligand_target_visualization} Prepare heatmap visualization of the ligand-target links starting from a ligand-target tibble. Get regulatory potential scores between all pairs of ligands and targets documented in this tibble. For better visualization, we propose to define a quantile cutoff on the ligand-target scores.
#'
#' @usage
#' prepare_ligand_target_visualization(ligand_target_df, ligand_target_matrix, cutoff = 0.25)
#'
#' @param cutoff Quantile cutoff on the ligand-target scores of the input weighted ligand-target network. Scores under this cutoff will be set to 0.
#' @param ligand_target_df Tibble with columns 'ligand', 'target' and 'weight' to indicate ligand-target regulatory potential scores of interest.
#' @inheritParams predict_ligand_activities
#'
#' @return A matrix giving the ligand-target regulatory potential scores between ligands of interest and their targets genes part of the gene set of interest.
#'
#' @examples
#' \dontrun{
#' weighted_networks = construct_weighted_networks(lr_network, sig_network, gr_network,source_weights_df)
#' ligands = list("TNF","BMP2","IL4")
#' ligand_target_matrix = construct_ligand_target_matrix(weighted_networks, ligands, ltf_cutoff = 0, algorithm = "PPR", damping_factor = 0.5, secondary_targets = FALSE)
#' geneset = c("SOCS2","SOCS3", "IRF1")
#' background_expressed_genes = c("SOCS2","SOCS3","IRF1","ICAM1","ID1","ID2","ID3")
#' active_ligand_target_links_df = potential_ligands %>% lapply(get_weighted_ligand_target_links, geneset = geneset, ligand_target_matrix = ligand_target_matrix, n = 250) %>% bind_rows()
#' active_ligand_target_links = prepare_ligand_target_visualization(ligand_target_df = active_ligand_target_links_df, ligand_target_matrix = ligand_target_matrix, cutoff = 0.25)
#' }
#'
#' @export
#'
prepare_ligand_target_visualization = function(ligand_target_df, ligand_target_matrix, cutoff = 0.25){
# define a cutoff on the ligand-target links
cutoff_include_all_ligands = ligand_target_df$weight %>% quantile(cutoff)
# give a score of 0 to ligand-target links not higher than the defined cutoff
ligand_target_matrix_oi = ligand_target_matrix
ligand_target_matrix_oi[ligand_target_matrix_oi < cutoff_include_all_ligands] = 0
# consider only targets belonging to the top250 targets of individual ligands and with at least one ligand-link with score higher than the defined cutoff
ligand_target_vis = ligand_target_matrix_oi[ligand_target_df$target %>% unique(),ligand_target_df$ligand %>% unique()]
dim(ligand_target_vis) = c(length(ligand_target_df$target %>% unique()), length(ligand_target_df$ligand %>% unique()))
all_targets = ligand_target_df$target %>% unique()
all_ligands = ligand_target_df$ligand %>% unique()
rownames(ligand_target_vis) = all_targets
colnames(ligand_target_vis) = all_ligands
keep_targets = all_targets[ligand_target_vis %>% apply(1,sum) > 0]
keep_ligands = all_ligands[ligand_target_vis %>% apply(2,sum) > 0]
ligand_target_vis_filtered = ligand_target_vis[keep_targets,keep_ligands]
if(is.matrix(ligand_target_vis_filtered)){
rownames(ligand_target_vis_filtered) = keep_targets
colnames(ligand_target_vis_filtered) = keep_ligands
} else {
dim(ligand_target_vis_filtered) = c(length(keep_targets), length(keep_ligands))
rownames(ligand_target_vis_filtered) = keep_targets
colnames(ligand_target_vis_filtered) = keep_ligands
}
if(nrow(ligand_target_vis_filtered) > 1 & ncol(ligand_target_vis_filtered) > 1){
distoi = dist(1-cor(t(ligand_target_vis_filtered)))
hclust_obj = hclust(distoi, method = "ward.D2")
order_targets = hclust_obj$labels[hclust_obj$order]
distoi_targets = dist(1-cor(ligand_target_vis_filtered))
hclust_obj = hclust(distoi_targets, method = "ward.D2")
order_ligands = hclust_obj$labels[hclust_obj$order]
} else {
order_targets = rownames(ligand_target_vis_filtered)
order_ligands = colnames(ligand_target_vis_filtered)
}
vis_ligand_target_network = ligand_target_vis_filtered[order_targets,order_ligands]
dim(vis_ligand_target_network) = c(length(order_targets), length(order_ligands))
rownames(vis_ligand_target_network) = order_targets
colnames(vis_ligand_target_network) = order_ligands
return(vis_ligand_target_network)
}
#' @title Assess probability that a target gene belongs to the geneset based on a multi-ligand random forest model
#'
#' @description \code{assess_rf_class_probabilities} Assess probability that a target gene belongs to the geneset based on a multi-ligand random forest model (with cross-validation). Target genes and background genes will be split in different groups in a stratified way.
#'
#' @usage
#' assess_rf_class_probabilities(round,folds,geneset,background_expressed_genes,ligands_oi,ligand_target_matrix)
#'
#' @param ligands_oi Character vector giving the gene symbols of the ligands you want to build the multi-ligand with.
#' @param round Integer describing which fold of the cross-validation scheme it is.
#' @param folds Integer describing how many folds should be used.
#' @inheritParams predict_ligand_activities
#'
#' @return A tibble with columns: $gene, $response, $prediction. Response indicates whether the gene belongs to the geneset of interest, prediction gives the probability this gene belongs to the geneset according to the random forest model.
#'
#' @examples
#' \dontrun{
#' weighted_networks = construct_weighted_networks(lr_network, sig_network, gr_network,source_weights_df)
#' ligands = list("TNF","BMP2","IL4")
#' ligand_target_matrix = construct_ligand_target_matrix(weighted_networks, ligands, ltf_cutoff = 0, algorithm = "PPR", damping_factor = 0.5, secondary_targets = FALSE)
#' potential_ligands = c("TNF","BMP2","IL4")
#' geneset = c("SOCS2","SOCS3", "IRF1")
#' background_expressed_genes = c("SOCS2","SOCS3","IRF1","ICAM1","ID1","ID2","ID3")
#' fold1_rf_prob = assess_rf_class_probabilities(round = 1,folds = 2,geneset = geneset,background_expressed_genes = background_expressed_genes ,ligands_oi = potential_ligands,ligand_target_matrix = ligand_target_matrix)
#' }
#'
#' @export
#'
assess_rf_class_probabilities = function(round,folds,geneset,background_expressed_genes,ligands_oi, ligand_target_matrix){
set.seed(round)
geneset_shuffled = sample(geneset, size = length(geneset))
geneset_grouped = split(geneset_shuffled,1:folds)
strict_background_expressed_genes = background_expressed_genes[!background_expressed_genes %in% geneset]
set.seed(round)
strict_background_expressed_genes_shuffled = sample(strict_background_expressed_genes, size = length(strict_background_expressed_genes))
strict_background_expressed_genes_grouped = split(strict_background_expressed_genes_shuffled,1:folds)
geneset_predictions_all = seq(length(geneset_grouped)) %>% lapply(rf_target_prediction,geneset_grouped,strict_background_expressed_genes_grouped,ligands_oi,ligand_target_matrix) %>% bind_rows()
geneset_predictions_all = geneset_predictions_all %>% mutate(response = gsub("\\.","",response) %>% as.logical())
}
#' @title Assess how well classification predictions accord to the expected response
#'
#' @description \code{classification_evaluation_continuous_pred_wrapper} Assess how well classification predictions accord to the expected response.
#'
#' @usage
#' classification_evaluation_continuous_pred_wrapper(response_prediction_tibble)
#'
#' @param response_prediction_tibble Tibble with columns "response" and "prediction" (e.g. output of function `assess_rf_class_probabilities`)
#'
#' @return A tibble showing several classification evaluation metrics.
#'
#' @examples
#' \dontrun{
#' weighted_networks = construct_weighted_networks(lr_network, sig_network, gr_network,source_weights_df)
#' ligands = list("TNF","BMP2","IL4")
#' ligand_target_matrix = construct_ligand_target_matrix(weighted_networks, ligands, ltf_cutoff = 0, algorithm = "PPR", damping_factor = 0.5, secondary_targets = FALSE)
#' potential_ligands = c("TNF","BMP2","IL4")
#' geneset = c("SOCS2","SOCS3", "IRF1")
#' background_expressed_genes = c("SOCS2","SOCS3","IRF1","ICAM1","ID1","ID2","ID3")
#' fold1_rf_prob = assess_rf_class_probabilities(round = 1,folds = 2,geneset = geneset,background_expressed_genes = background_expressed_genes ,ligands_oi = potential_ligands,ligand_target_matrix = ligand_target_matrix)
# classification_evaluation_continuous_pred_wrapper(fold1_rf_prob)
#' }
#'
#' @export
#'
classification_evaluation_continuous_pred_wrapper = function(response_prediction_tibble) {
prediction_performances = classification_evaluation_continuous_pred(response_prediction_tibble$prediction, response_prediction_tibble$response, iregulon = FALSE)
return(prediction_performances)
}
#' @title Find which genes were among the top-predicted targets genes in a specific cross-validation round and see whether these genes belong to the gene set of interest as well.
#'
#' @description \code{get_top_predicted_genes} Find which genes were among the top-predicted targets genes in a specific cross-validation round and see whether these genes belong to the gene set of interest as well.
#'
#' @usage
#' get_top_predicted_genes(round,gene_prediction_list, quantile_cutoff = 0.95)
#'
#' @param gene_prediction_list List with per round of cross-validation: a tibble with columns "gene", "prediction" and "response" (e.g. output of function `assess_rf_class_probabilities`)
#' @param round Integer describing which fold of the cross-validation scheme it is.
#' @param quantile_cutoff Quantile of which genes should be considered as top-predicted targets. Default: 0.95, thus considering the top 5 percent predicted genes as predicted targets.
#'
#' @return A tibble indicating for every gene whether it belongs to the geneset and whether it belongs to the top-predicted genes in a specific cross-validation round.
#'
#' @examples
#' \dontrun{
#' weighted_networks = construct_weighted_networks(lr_network, sig_network, gr_network,source_weights_df)
#' ligands = list("TNF","BMP2","IL4")
#' ligand_target_matrix = construct_ligand_target_matrix(weighted_networks, ligands, ltf_cutoff = 0, algorithm = "PPR", damping_factor = 0.5, secondary_targets = FALSE)
#' potential_ligands = c("TNF","BMP2","IL4")
#' geneset = c("SOCS2","SOCS3", "IRF1")
#' background_expressed_genes = c("SOCS2","SOCS3","IRF1","ICAM1","ID1","ID2","ID3")
#' gene_predictions_list = seq(2) %>% lapply(assess_rf_class_probabilities,2, geneset = geneset,background_expressed_genes = background_expressed_genes,ligands_oi = potential_ligands,ligand_target_matrix = ligand_target_matrix)
#' seq(length(gene_predictions_list)) %>% lapply(get_top_predicted_genes,gene_predictions_list)
#' }
#'
#' @export
#'
get_top_predicted_genes = function(round,gene_prediction_list, quantile_cutoff = 0.95){
affected_gene_predictions = gene_prediction_list[[round]]
predicted_positive = affected_gene_predictions %>%
arrange(-prediction) %>%
mutate(predicted_top_target = prediction >= quantile(prediction,quantile_cutoff)) %>%
filter(predicted_top_target) %>% rename(true_target = response) %>%
select(gene,true_target,predicted_top_target)
colnames(predicted_positive) = c("gene","true_target",paste0("predicted_top_target_round",round))
return(predicted_positive)
}
#' @title Determine the fraction of genes belonging to the geneset or background and to the top-predicted genes.
#'
#' @description \code{calculate_fraction_top_predicted} Defines the fraction of genes belonging to the geneset or background and to the top-predicted genes.
#'
#' @usage
#' calculate_fraction_top_predicted(affected_gene_predictions, quantile_cutoff = 0.95)
#'
#' @param affected_gene_predictions Tibble with columns "gene", "prediction" and "response" (e.g. output of function `assess_rf_class_probabilities`)
#' @param quantile_cutoff Quantile of which genes should be considered as top-predicted targets. Default: 0.95, thus considering the top 5 percent predicted genes as predicted targets.
#'
#' @return A tibble indicating the number of genes belonging to the gene set of interest or background (true_target column), the number and fraction of genes of these gruops that were part of the top predicted targets in a specific cross-validation round.
#'
#' @examples
#' \dontrun{
#' weighted_networks = construct_weighted_networks(lr_network, sig_network, gr_network,source_weights_df)
#' ligands = list("TNF","BMP2","IL4")
#' ligand_target_matrix = construct_ligand_target_matrix(weighted_networks, ligands, ltf_cutoff = 0, algorithm = "PPR", damping_factor = 0.5, secondary_targets = FALSE)
#' potential_ligands = c("TNF","BMP2","IL4")
#' geneset = c("SOCS2","SOCS3", "IRF1")
#' background_expressed_genes = c("SOCS2","SOCS3","IRF1","ICAM1","ID1","ID2","ID3")
#' gene_predictions_list = seq(2) %>% lapply(assess_rf_class_probabilities,2, geneset = geneset,background_expressed_genes = background_expressed_genes,ligands_oi = potential_ligands,ligand_target_matrix = ligand_target_matrix)
#' target_prediction_performances_discrete_cv = gene_predictions_list %>% lapply(calculate_fraction_top_predicted) %>% bind_rows() %>% ungroup() %>% mutate(round=rep(1:length(gene_predictions_list), each = 2))
#' }
#'
#' @export
#'
calculate_fraction_top_predicted = function(affected_gene_predictions, quantile_cutoff = 0.95){
predicted_positive = affected_gene_predictions %>% arrange(-prediction) %>% filter(prediction >= quantile(prediction,quantile_cutoff)) %>% group_by(response) %>% count() %>% rename(positive_prediction = n) %>% rename(true_target = response)
all = affected_gene_predictions %>% arrange(-prediction) %>% rename(true_target = response) %>% group_by(true_target) %>% count()
inner_join(all,predicted_positive, by = "true_target") %>% mutate(fraction_positive_predicted = positive_prediction/n)
}
#' @title Perform a Fisher's exact test to determine whether genes belonging to the gene set of interest are more likely to be part of the top-predicted targets.
#'
#' @description \code{calculate_fraction_top_predicted_fisher} Performs a Fisher's exact test to determine whether genes belonging to the gene set of interest are more likely to be part of the top-predicted targets.
#'
#' @usage
#' calculate_fraction_top_predicted_fisher(affected_gene_predictions, quantile_cutoff = 0.95, p_value_output = TRUE)
#'
#' @param p_value_output Should total summary or p-value be returned as output? Default: TRUE.
#' @inheritParams calculate_fraction_top_predicted
#'
#' @return Summary of the Fisher's exact test or just the p-value
#'
#' @examples
#' \dontrun{
#' weighted_networks = construct_weighted_networks(lr_network, sig_network, gr_network,source_weights_df)
#' ligands = list("TNF","BMP2","IL4")
#' ligand_target_matrix = construct_ligand_target_matrix(weighted_networks, ligands, ltf_cutoff = 0, algorithm = "PPR", damping_factor = 0.5, secondary_targets = FALSE)
#' potential_ligands = c("TNF","BMP2","IL4")
#' geneset = c("SOCS2","SOCS3", "IRF1")
#' background_expressed_genes = c("SOCS2","SOCS3","IRF1","ICAM1","ID1","ID2","ID3")
#' gene_predictions_list = seq(2) %>% lapply(assess_rf_class_probabilities,2, geneset = geneset,background_expressed_genes = background_expressed_genes,ligands_oi = potential_ligands,ligand_target_matrix = ligand_target_matrix)
#' target_prediction_performances_fisher_pval = gene_predictions_list %>% lapply(calculate_fraction_top_predicted_fisher) %>% unlist() %>% mean()
#' }
#'
#' @export
#'
calculate_fraction_top_predicted_fisher = function(affected_gene_predictions, quantile_cutoff = 0.95, p_value_output = TRUE){
predicted_positive = affected_gene_predictions %>% arrange(-prediction) %>% filter(prediction >= quantile(prediction,quantile_cutoff)) %>% group_by(response) %>% count() %>% rename(positive_prediction = n)
all = affected_gene_predictions %>% arrange(-prediction) %>% group_by(response) %>% count()
results_df = left_join(all, predicted_positive, by="response") %>% mutate(positive_prediction = replace_na(positive_prediction, 0))
tp = results_df %>% filter(response == TRUE) %>% .$positive_prediction
fp = results_df %>% filter(response == FALSE) %>% .$positive_prediction
fn = (results_df %>% filter(response == TRUE) %>% .$n) - (results_df %>% filter(response == TRUE) %>% .$positive_prediction)
tn = (results_df %>% filter(response == FALSE) %>% .$n) - (results_df %>% filter(response == FALSE) %>% .$positive_prediction)
contingency_table = matrix(c(tp,fp,fn,tn), nrow = 2,dimnames = list(c("geneset", "background"), c("top-predicted", "no-top-predicted")))
summary = fisher.test(contingency_table, alternative = "greater")
if(p_value_output == TRUE){
return(summary$p.value)
} else {
return(summary)
}
}
#' @title Cut off outer quantiles and rescale to a [0, 1] range
#'
#' @description \code{scale_quantile} Cut off outer quantiles and rescale to a [0, 1] range
#'
#' @usage
#' scale_quantile(x, outlier_cutoff = .05)
#'
#' @param x A numeric vector, matrix or data frame.
#' @param outlier_cutoff The quantile cutoff for outliers (default 0.05).
#'
#' @return The centered, scaled matrix or vector. The numeric centering and scalings used are returned as attributes.
#'
#' @examples
#' \dontrun{
#' ## Generate a matrix from a normal distribution
#' ## with a large standard deviation, centered at c(5, 5)
#' x <- matrix(rnorm(200*2, sd = 10, mean = 5), ncol = 2)
#'
#' ## Scale the dataset between [0,1]
#' x_scaled <- scale_quantile(x)
#'
#' ## Show ranges of each column
#' apply(x_scaled, 2, range)
#' }
#' @export
scale_quantile <- function(x, outlier_cutoff = .05) {
# same function as scale_quantile from dynutils (copied here for use in vignette to avoid having dynutils as dependency)
# credits to the amazing (w/z)outer and r(obrecht)cannood(t) from dynverse (https://github.com/dynverse)!
if (is.null(dim(x))) {
sc <- scale_quantile(matrix(x, ncol = 1), outlier_cutoff = outlier_cutoff)
out <- sc[,1]
names(out) <- names(x)
attr(out, "addend") <- attr(sc, "addend")
attr(out, "multiplier") <- attr(sc, "multiplier")
out
} else {
quants <- apply(x, 2, stats::quantile, c(outlier_cutoff, 1 - outlier_cutoff), na.rm = TRUE)
addend <- -quants[1,]
divisor <- apply(quants, 2, diff)
divisor[divisor == 0] <- 1
apply_quantile_scale(x, addend, 1 / divisor)
}
}
#' @title Prepare single-cell expression data to perform ligand activity analysis
#'
#' @description \code{convert_single_cell_expression_to_settings} Prepare single-cell expression data to perform ligand activity analysis
#'
#' @usage
#' convert_single_cell_expression_to_settings(cell_id, expression_matrix, setting_name, setting_from, regression = FALSE)
#'
#' @param cell_id Identity of the cell of interest
#' @param setting_name Name of the dataset
#' @param expression_matrix Gene expression matrix of single-cells
#' @param setting_from Character vector giving the gene symbols of the potentially active ligands you want to define ligand activities for.
#' @param regression Perform regression-based ligand activity analysis (TRUE) or classification-based ligand activity analysis (FALSE) by considering the genes expressed higher than the 0.975 quantiles as genes of interest. Default: FALSE.
#'
#' @return A list with slots $name, $from and $response respectively containing the setting name, potentially active ligands and the response to predict (whether genes belong to gene set of interest; i.e. most strongly expressed genes in a cell)
#'
#' @examples
#' \dontrun{
#' weighted_networks = construct_weighted_networks(lr_network, sig_network, gr_network,source_weights_df)
#' ligands = list("TNF","BMP2","IL4")
#' ligand_target_matrix = construct_ligand_target_matrix(weighted_networks, ligands, ltf_cutoff = 0, algorithm = "PPR", damping_factor = 0.5, secondary_targets = FALSE)
#' potential_ligands = c("TNF","BMP2","IL4")
#' genes = c("SOCS2","SOCS3","IRF1","ICAM1","ID1","ID2","ID3")
#' cell_ids = c("cell1","cell2")
#' expression_scaled = matrix(rnorm(length(genes)*2, sd = 0.5, mean = 0.5), nrow = 2)
#' rownames(expression_scaled) = cell_ids
#' colnames(expression_scaled) = genes
#' settings = convert_single_cell_expression_to_settings(cell_id = cell_ids[1], expression_matrix = expression_scaled, setting_name = "test", setting_from = potential_ligands)
#' }
#'
#' @export
#'
convert_single_cell_expression_to_settings = function(cell_id, expression_matrix, setting_name, setting_from, regression = FALSE){
# input check
requireNamespace("dplyr")
if (regression == TRUE){
response = expression_matrix[cell_id,]
} else {
response_continuous = expression_matrix[cell_id,]
response = response_continuous >= quantile(response_continuous,0.975)
}
return(list(name = paste0(setting_name,"_",cell_id), from = setting_from, response = response))
}
#' @title Single-cell ligand activity prediction
#'
#' @description \code{predict_single_cell_ligand_activities} For every individual cell of interest, predict activities of ligands in regulating expression of genes that are stronger expressed in that cell compared to other cells (0.975 quantile). Ligand activities are defined as how well they predict the observed transcriptional response (i.e. gene set) according to the NicheNet model.
#'
#' @usage
#' predict_single_cell_ligand_activities(cell_ids, expression_scaled,ligand_target_matrix, potential_ligands, single = TRUE,...)
#'
#' @param cell_ids Identities of cells for which the ligand activities should be calculated.
#' @param expression_scaled Scaled expression matrix of single-cells (scaled such that high values indicate that a gene is stronger expressed in that cell compared to others)
#' @param ligand_target_matrix The NicheNet ligand-target matrix denoting regulatory potential scores between ligands and targets (ligands in columns).
#' @param potential_ligands Character vector giving the gene symbols of the potentially active ligands you want to define ligand activities for.
#' @param single TRUE if you want to calculate ligand activity scores by considering every ligand individually (recommended). FALSE if you want to calculate ligand activity scores as variable importances of a multi-ligand classification model.
#' @param ... Additional parameters for get_multi_ligand_importances if single = FALSE.
#'
#' @return A tibble giving several ligand activity scores for single cells. Following columns in the tibble: $setting, $test_ligand, $auroc, $aupr and $pearson.
#'
#' @examples
#' \dontrun{
#' weighted_networks = construct_weighted_networks(lr_network, sig_network, gr_network,source_weights_df)
#' ligands = list("TNF","BMP2","IL4")
#' ligand_target_matrix = construct_ligand_target_matrix(weighted_networks, ligands, ltf_cutoff = 0, algorithm = "PPR", damping_factor = 0.5, secondary_targets = FALSE)
#' potential_ligands = c("TNF","BMP2","IL4")
#' genes = c("SOCS2","SOCS3","IRF1","ICAM1","ID1","ID2","ID3")
#' cell_ids = c("cell1","cell2")
#' expression_scaled = matrix(rnorm(length(genes)*2, sd = 0.5, mean = 0.5), nrow = 2)
#' rownames(expression_scaled) = cell_ids
#' colnames(expression_scaled) = genes
#' ligand_activities = predict_single_cell_ligand_activities(cell_ids = cell_ids, expression_scaled = expression_scaled, ligand_target_matrix = ligand_target_matrix, potential_ligands = potential_ligands)
#' }
#'
#' @export
#'
predict_single_cell_ligand_activities = function(cell_ids, expression_scaled,ligand_target_matrix, potential_ligands, single = TRUE,...){
settings_single_cell_ligand_pred = cell_ids %>% lapply(convert_single_cell_expression_to_settings, expression_scaled, "", potential_ligands)
if (single == TRUE){
settings_ligand_prediction = settings_single_cell_ligand_pred %>% convert_settings_ligand_prediction(all_ligands = potential_ligands, validation = FALSE, single = TRUE)
ligand_importances = settings_ligand_prediction %>% lapply(get_single_ligand_importances,ligand_target_matrix = ligand_target_matrix, known = FALSE) %>% bind_rows() %>% mutate(setting = gsub("^_","",setting))
} else {
settings_ligand_prediction = settings_single_cell_ligand_pred %>% convert_settings_ligand_prediction(all_ligands = potential_ligands, validation = FALSE, single = FALSE)
ligand_importances = settings_ligand_prediction %>% lapply(get_multi_ligand_importances,ligand_target_matrix = ligand_target_matrix, known = FALSE, ...) %>% bind_rows() %>% mutate(setting = gsub("^_","",setting))
}
return(ligand_importances %>% select(setting,test_ligand,auroc,aupr,pearson))
}
#' @title Normalize single-cell ligand activities
#'
#' @description \code{normalize_single_cell_ligand_activities} Normalize single-cell ligand activities to make ligand activities over different cells comparable.
#' @usage
#' normalize_single_cell_ligand_activities(ligand_activities)
#'
#' @param ligand_activities Output from the function `predict_single_cell_ligand_activities`.
#'
#' @return A tibble giving the normalized ligand activity scores for single cells. Following columns in the tibble: $cell, $ligand, $pearson, which is the normalized ligand activity value.
#'
#' @examples
#' \dontrun{
#' weighted_networks = construct_weighted_networks(lr_network, sig_network, gr_network,source_weights_df)
#' ligands = list("TNF","BMP2","IL4")
#' ligand_target_matrix = construct_ligand_target_matrix(weighted_networks, ligands, ltf_cutoff = 0, algorithm = "PPR", damping_factor = 0.5, secondary_targets = FALSE)
#' potential_ligands = c("TNF","BMP2","IL4")
#' genes = c("SOCS2","SOCS3","IRF1","ICAM1","ID1","ID2","ID3")
#' cell_ids = c("cell1","cell2")
#' expression_scaled = matrix(rnorm(length(genes)*2, sd = 0.5, mean = 0.5), nrow = 2)
#' rownames(expression_scaled) = cell_ids
#' colnames(expression_scaled) = genes
#' ligand_activities = predict_single_cell_ligand_activities(cell_ids = cell_ids, expression_scaled = expression_scaled, ligand_target_matrix = ligand_target_matrix, potential_ligands = potential_ligands)
#' normalized_ligand_activities = normalize_single_cell_ligand_activities(ligand_activities)
#' }
#'
#' @export
#'
normalize_single_cell_ligand_activities = function(ligand_activities){
single_ligand_activities_aupr_norm = ligand_activities %>%
group_by(setting) %>%
mutate(aupr = nichenetr::scaling_modified_zscore(aupr)) %>%
ungroup() %>%
rename(cell = setting, ligand = test_ligand) %>%
distinct(cell,ligand,aupr)
single_ligand_activities_aupr_norm_df = single_ligand_activities_aupr_norm %>%
spread(cell, aupr,fill = min(.$aupr))
single_ligand_activities_aupr_norm_matrix = single_ligand_activities_aupr_norm_df %>%
select(-ligand) %>%
t() %>%
magrittr::set_colnames(single_ligand_activities_aupr_norm_df$ligand)
single_ligand_activities_aupr_norm_df = single_ligand_activities_aupr_norm_matrix %>%
data.frame() %>%
rownames_to_column("cell") %>%
as_tibble()
}
#' @title Perform a correlation and regression analysis between cells' ligand activities and property scores of interest
#'
#' @description \code{single_ligand_activity_score_regression} Performs a correlation and regression analysis between cells' ligand activities and property scores of interest.
#' @usage
#' single_ligand_activity_score_regression(ligand_activities, scores_tbl)
#'
#' @param ligand_activities Output from the function `normalize_single_cell_ligand_activities`.
#' @param scores_tbl a tibble containing scores for every cell (columns: $cell and $score). The score should correspond to the property of interest
#'
#' @return A tibble giving for every ligand, the correlation/regression coefficients giving information about the relation between its activity and the property of interest.
#'
#' @examples
#' \dontrun{
#' weighted_networks = construct_weighted_networks(lr_network, sig_network, gr_network,source_weights_df)
#' ligands = list("TNF","BMP2","IL4")
#' ligand_target_matrix = construct_ligand_target_matrix(weighted_networks, ligands, ltf_cutoff = 0, algorithm = "PPR", damping_factor = 0.5, secondary_targets = FALSE)
#' potential_ligands = c("TNF","BMP2","IL4")
#' genes = c("SOCS2","SOCS3","IRF1","ICAM1","ID1","ID2","ID3")
#' cell_ids = c("cell1","cell2")
#' expression_scaled = matrix(rnorm(length(genes)*2, sd = 0.5, mean = 0.5), nrow = 2)
#' rownames(expression_scaled) = cell_ids
#' colnames(expression_scaled) = genes
#' ligand_activities = predict_single_cell_ligand_activities(cell_ids = cell_ids, expression_scaled = expression_scaled, ligand_target_matrix = ligand_target_matrix, potential_ligands = potential_ligands)
#' normalized_ligand_activities = normalize_single_cell_ligand_activities(ligand_activities)
#' cell_scores_tbl = tibble(cell = cell_ids, score = c(1,4))
#' regression_analysis_output = single_ligand_activity_score_regression(normalized_ligand_activities,cell_scores_tbl)
#' }
#'
#' @export
#'
single_ligand_activity_score_regression = function(ligand_activities, scores_tbl){
combined = inner_join(scores_tbl,ligand_activities)
output = lapply(combined %>% select(-cell, -score), function(activity_prediction, combined){
geneset_score = combined$score
metrics = regression_evaluation(activity_prediction,geneset_score)
}, combined)
ligands = names(output)
output_df = output %>% bind_rows() %>% mutate(ligand = ligands)
return(output_df)
}
#' @title Assess how well cells' ligand activities predict a binary property of interest of cells.
#'
#' @description \code{single_ligand_activity_score_classification} Evaluates classification performances: it assesses how well cells' ligand activities can predict a binary property of interest.
#' @usage
#' single_ligand_activity_score_classification(ligand_activities, scores_tbl)
#'
#' @param ligand_activities Output from the function `normalize_single_cell_ligand_activities`.
#' @param scores_tbl a tibble indicating for every cell whether the property of interests holds TRUE or FALSE (columns: $cell: character vector with cell ids and $score: logical vector according to property of interest).
#'
#' @return A tibble giving for every ligand, the classification performance metrics giving information about the relation between its activity and the property of interest.
#'
#' @examples
#' \dontrun{
#' weighted_networks = construct_weighted_networks(lr_network, sig_network, gr_network,source_weights_df)
#' ligands = list("TNF","BMP2","IL4")
#' ligand_target_matrix = construct_ligand_target_matrix(weighted_networks, ligands, ltf_cutoff = 0, algorithm = "PPR", damping_factor = 0.5, secondary_targets = FALSE)
#' potential_ligands = c("TNF","BMP2","IL4")
#' genes = c("SOCS2","SOCS3","IRF1","ICAM1","ID1","ID2","ID3")
#' cell_ids = c("cell1","cell2")
#' expression_scaled = matrix(rnorm(length(genes)*2, sd = 0.5, mean = 0.5), nrow = 2)
#' rownames(expression_scaled) = cell_ids
#' colnames(expression_scaled) = genes
#' ligand_activities = predict_single_cell_ligand_activities(cell_ids = cell_ids, expression_scaled = expression_scaled, ligand_target_matrix = ligand_target_matrix, potential_ligands = potential_ligands)
#' normalized_ligand_activities = normalize_single_cell_ligand_activities(ligand_activities)
#' cell_scores_tbl = tibble(cell = cell_ids, score = c(TRUE,FALSE))
#' classification_analysis_output = single_ligand_activity_score_classification(normalized_ligand_activities,cell_scores_tbl)
#' }
#'
#' @export
#'
single_ligand_activity_score_classification = function(ligand_activities, scores_tbl){
combined = inner_join(scores_tbl, ligand_activities)
output = lapply(combined %>% select(-cell, -score), function(activity_prediction,
combined) {
geneset_score = combined$score
metrics = classification_evaluation_continuous_pred(activity_prediction,
geneset_score, iregulon = F)
}, combined)
ligands = names(output)
output_df = output %>% bind_rows() %>% mutate(ligand = ligands)
return(output_df)
}
single_ligand_activity_score_regression = function(ligand_activities, scores_tbl){
combined = inner_join(scores_tbl,ligand_activities)
output = lapply(combined %>% select(-cell, -score), function(activity_prediction, combined){
geneset_score = combined$score
metrics = regression_evaluation(activity_prediction,geneset_score)
}, combined)
ligands = names(output)
output_df = output %>% bind_rows() %>% mutate(ligand = ligands)
return(output_df)
}
#' @title Perform NicheNet analysis on Seurat object: explain DE between conditions
#'
#' @description \code{nichenet_seuratobj_aggregate} Perform NicheNet analysis on Seurat object: explain differential expression (DE) in a receiver celltype between two different conditions by ligands expressed by sender cells
#' @usage
#' nichenet_seuratobj_aggregate(receiver, seurat_obj, condition_colname, condition_oi, condition_reference, sender = "all",ligand_target_matrix,lr_network,weighted_networks,expression_pct = 0.10, lfc_cutoff = 0.25, geneset = "DE", filter_top_ligands = TRUE, top_n_ligands = 30,top_n_targets = 200, cutoff_visualization = 0.33,verbose = TRUE, assay_oi = NULL)
#'
#' @param receiver Name of cluster identity/identities of cells that are presumably affected by intercellular communication with other cells
#' @param seurat_obj Single-cell expression dataset as Seurat object https://satijalab.org/seurat/.
#' @param condition_colname Name of the column in the meta data dataframe that indicates which condition/sample cells were coming from.
#' @param condition_oi Condition of interest in which receiver cells were presumably affected by other cells. Should be a name present in the `condition_colname` column of the metadata.
#' @param condition_reference The second condition (e.g. reference or steady-state condition). Should be a name present in the `condition_colname` column of the metadata.
#' @param sender Determine the potential sender cells. Name of cluster identity/identities of cells that presumably affect expression in the receiver cell type. In case you want to look at all possible sender cell types in the data, you can give this argument the value "all". "all" indicates thus that all cell types in the dataset will be considered as possible sender cells. As final option, you could give this argument the value "undefined"."undefined" won't look at ligands expressed by sender cells, but at all ligands for which a corresponding receptor is expressed. This could be useful if the presumably active sender cell is not profiled. Default: "all".
#' @param expression_pct To determine ligands and receptors expressed by sender and receiver cells, we consider genes expressed if they are expressed in at least a specific fraction of cells of a cluster. This number indicates this fraction. Default: 0.10
#' @param lfc_cutoff Cutoff on log fold change in the wilcoxon differential expression test. Default: 0.25.
#' @param geneset Indicate whether to consider all DE genes between condition 1 and 2 ("DE"), or only genes upregulated in condition 1 ("up"), or only genes downregulad in condition 1 ("down").
#' @param filter_top_ligands Indicate whether output tables for ligand-target and ligand-receptor networks should be done for a filtered set of top ligands (TRUE) or for all ligands (FALSE). Default: TRUE.
#' @param top_n_ligands Indicate how many ligands should be extracted as top-ligands after ligand activity analysis. Only for these ligands, target genes and receptors will be returned. Default: 30.
#' @param top_n_targets To predict active, affected targets of the prioritized ligands, consider only DE genes if they also belong to the a priori top n ("top_n_targets") targets of a ligand. Default = 200.
#' @param cutoff_visualization Because almost no ligand-target scores have a regulatory potential score of 0, we clarify the heatmap visualization by giving the links with the lowest scores a score of 0. The cutoff_visualization paramter indicates this fraction of links that are given a score of zero. Default = 0.33.
#' @param ligand_target_matrix The NicheNet ligand-target matrix of the organism of interest denoting regulatory potential scores between ligands and targets (ligands in columns).
#' @param lr_network The ligand-receptor network (columns that should be present: $from, $to) of the organism of interest.
#' @param weighted_networks The NicheNet weighted networks of the organism of interest denoting interactions and their weights/confidences in the ligand-signaling and gene regulatory network.
#' @param verbose Print out the current analysis stage. Default: TRUE.
#' @inheritParams get_expressed_genes
#'
#' @return A list with the following elements:
#' $ligand_activities: data frame with output ligand activity analysis;
#' $top_ligands: top_n ligands based on ligand activity;
#' $top_targets: active, affected target genes of these ligands;
#' $top_receptors: receptors of these ligands;
#' $ligand_target_matrix: matrix indicating regulatory potential scores between active ligands and their predicted targets;
#' $ligand_target_heatmap: heatmap of ligand-target regulatory potential;
#' $ligand_target_df: data frame showing regulatory potential scores of predicted active ligand-target network;
#' $ligand_activity_target_heatmap: heatmap showing both ligand activity scores and target genes of these top ligands;
#' $ligand_expression_dotplot: expression dotplot of the top ligands;
#' $ligand_differential_expression_heatmap = differential expression heatmap of the top ligands;
#' $ligand_receptor_matrix: matrix of ligand-receptor interactions;
#' $ligand_receptor_heatmap: heatmap showing ligand-receptor interactions;
#' $ligand_receptor_df: data frame of ligand-receptor interactions;
#' $geneset_oi: a vector containing the set of genes used as input for the ligand activity analysis;
#' $background_expressed_genes: the background of genes to which the geneset will be compared in the ligand activity analysis.
#'
#' @import Seurat
#' @import dplyr
#' @importFrom magrittr set_rownames set_colnames
#'
#' @examples
#' \dontrun{
#' seuratObj = readRDS(url("https://zenodo.org/record/3531889/files/seuratObj_test.rds"))
#' lr_network = readRDS(url("https://zenodo.org/record/7074291/files/lr_network_mouse_21122021.rds"))
#' ligand_target_matrix = readRDS(url("https://zenodo.org/record/7074291/files/ligand_target_matrix_nsga2r_final_mouse.rds"))
#' weighted_networks = readRDS(url("https://zenodo.org/record/7074291/files/weighted_networks_nsga2r_final_mouse.rds"))
#' nichenet_seuratobj_aggregate(receiver = "CD8 T", seurat_obj = seuratObj, condition_colname = "aggregate", condition_oi = "LCMV", condition_reference = "SS", sender = "Mono", ligand_target_matrix = ligand_target_matrix, lr_network = lr_network, weighted_networks = weighted_networks)
#' }
#'
#' @export
#'
nichenet_seuratobj_aggregate = function(receiver, seurat_obj, condition_colname, condition_oi, condition_reference, sender = "all",ligand_target_matrix,lr_network,weighted_networks,
expression_pct = 0.10, lfc_cutoff = 0.25, geneset = "DE", filter_top_ligands = TRUE ,top_n_ligands = 30,
top_n_targets = 200, cutoff_visualization = 0.33,
verbose = TRUE, assay_oi = NULL)
{
requireNamespace("Seurat")
requireNamespace("dplyr")
# input check
if(! "RNA" %in% names(seurat_obj@assays)){
if ("Spatial" %in% names(seurat_obj@assays)){
warning("You are going to apply NicheNet on a spatial seurat object. Be sure it's ok to use NicheNet the way you are planning to do it. So this means: you should have changes in gene expression in receiver cells caused by cell-cell interactions. Note that in the case of spatial transcriptomics, you are not dealing with single cells but with 'spots' containing multiple cells of the same of different cell types.")
if (class(seurat_obj@assays$Spatial@data) != "matrix" & class(seurat_obj@assays$Spatial@data) != "dgCMatrix") {
warning("Spatial Seurat object should contain a matrix of normalized expression data. Check 'seurat_obj@assays$Spatial@data' for default or 'seurat_obj@assays$SCT@data' for when the single-cell transform pipeline was applied")
}
if (sum(dim(seurat_obj@assays$Spatial@data)) == 0) {
stop("Seurat object should contain normalized expression data (numeric matrix). Check 'seurat_obj@assays$Spatial@data'")
}
}} else {
if (class(seurat_obj@assays$RNA@data) != "matrix" &
class(seurat_obj@assays$RNA@data) != "dgCMatrix") {
warning("Seurat object should contain a matrix of normalized expression data. Check 'seurat_obj@assays$RNA@data' for default or 'seurat_obj@assays$integrated@data' for integrated data or seurat_obj@assays$SCT@data for when the single-cell transform pipeline was applied")
}
if ("integrated" %in% names(seurat_obj@assays)) {
if (sum(dim(seurat_obj@assays$RNA@data)) == 0 & sum(dim(seurat_obj@assays$integrated@data)) ==
0)
stop("Seurat object should contain normalized expression data (numeric matrix). Check 'seurat_obj@assays$RNA@data' for default or 'seurat_obj@assays$integrated@data' for integrated data")
}
else if ("SCT" %in% names(seurat_obj@assays)) {
if (sum(dim(seurat_obj@assays$RNA@data)) == 0 & sum(dim(seurat_obj@assays$SCT@data)) ==
0) {
stop("Seurat object should contain normalized expression data (numeric matrix). Check 'seurat_obj@assays$RNA@data' for default or 'seurat_obj@assays$SCT@data' for data corrected via SCT")
}
}
else {
if (sum(dim(seurat_obj@assays$RNA@data)) == 0) {
stop("Seurat object should contain normalized expression data (numeric matrix). Check 'seurat_obj@assays$RNA@data'")
}
}
}
if(!condition_colname %in% colnames(seurat_obj@meta.data))
stop("Your column indicating the conditions/samples of interest should be in the metadata dataframe")
if(sum(condition_oi %in% c(seurat_obj[[condition_colname]] %>% unlist() %>% as.character() %>% unique())) != length(condition_oi))
stop("condition_oi should be in the condition-indicating column")
if(sum(condition_reference %in% c(seurat_obj[[condition_colname]] %>% unlist() %>% as.character() %>% unique())) != length(condition_reference))
stop("condition_reference should be in the condition-indicating column")
if(sum(receiver %in% unique(Idents(seurat_obj))) != length(receiver))
stop("The defined receiver cell type should be an identity class of your seurat object")
if(length(sender) == 1){
if(sender != "all" & sender != "undefined"){
if(sum(sender %in% unique(Idents(seurat_obj))) != length(sender)){
stop("The sender argument should be 'all' or 'undefined' or an identity class of your seurat object")
}
}
} else {
if(sum(sender %in% unique(Idents(seurat_obj))) != length(sender)){
stop("The sender argument should be 'all' or 'undefined' or an identity class of your seurat object")
}
}
if(geneset != "DE" & geneset != "up" & geneset != "down")
stop("geneset should be 'DE', 'up' or 'down'")
if("integrated" %in% names(seurat_obj@assays)){
warning("Seurat object is result from the Seurat integration workflow. Make sure that the way of defining expressed and differentially expressed genes in this wrapper is appropriate for your integrated data.")
}
# Read in and process NicheNet networks, define ligands and receptors
if (verbose == TRUE){print("Read in and process NicheNet's networks")}
weighted_networks_lr = weighted_networks$lr_sig %>% inner_join(lr_network %>% distinct(from,to), by = c("from","to"))
ligands = lr_network %>% pull(from) %>% unique()
receptors = lr_network %>% pull(to) %>% unique()
if (verbose == TRUE){print("Define expressed ligands and receptors in receiver and sender cells")}
# step1 nichenet analysis: get expressed genes in sender and receiver cells
## receiver
list_expressed_genes_receiver = receiver %>% unique() %>% lapply(get_expressed_genes, seurat_obj, expression_pct, assay_oi)
names(list_expressed_genes_receiver) = receiver %>% unique()
expressed_genes_receiver = list_expressed_genes_receiver %>% unlist() %>% unique()
## sender
if (length(sender) == 1){
if (sender == "all"){
sender_celltypes = Idents(seurat_obj) %>% levels()
list_expressed_genes_sender = sender_celltypes %>% lapply(get_expressed_genes, seurat_obj, expression_pct, assay_oi)
names(list_expressed_genes_sender) = sender_celltypes
expressed_genes_sender = list_expressed_genes_sender %>% unlist() %>% unique()
} else if (sender == "undefined") {
if("integrated" %in% names(seurat_obj@assays)){
expressed_genes_sender = union(seurat_obj@assays$integrated@data %>% rownames(),rownames(ligand_target_matrix)) %>% union(colnames(ligand_target_matrix))
} else {
expressed_genes_sender = union(seurat_obj@assays$RNA@data %>% rownames(),rownames(ligand_target_matrix)) %>% union(colnames(ligand_target_matrix))
}
} else if (sender != "all" & sender != "undefined") {
sender_celltypes = sender
list_expressed_genes_sender = sender_celltypes %>% unique() %>% lapply(get_expressed_genes, seurat_obj, expression_pct, assay_oi)
names(list_expressed_genes_sender) = sender_celltypes %>% unique()
expressed_genes_sender = list_expressed_genes_sender %>% unlist() %>% unique()
}
} else {
sender_celltypes = sender
list_expressed_genes_sender = sender_celltypes %>% unique() %>% lapply(get_expressed_genes, seurat_obj, expression_pct, assay_oi)
names(list_expressed_genes_sender) = sender_celltypes %>% unique()
expressed_genes_sender = list_expressed_genes_sender %>% unlist() %>% unique()
}
# step2 nichenet analysis: define background and gene list of interest: here differential expression between two conditions of cell type of interest
if (verbose == TRUE){print("Perform DE analysis in receiver cell")}
seurat_obj_receiver= subset(seurat_obj, idents = receiver)
seurat_obj_receiver = SetIdent(seurat_obj_receiver, value = seurat_obj_receiver[[condition_colname]])
DE_table_receiver = FindMarkers(object = seurat_obj_receiver, ident.1 = condition_oi, ident.2 = condition_reference, min.pct = expression_pct) %>% rownames_to_column("gene")
SeuratV4 = c("avg_log2FC") %in% colnames(DE_table_receiver)
if(SeuratV4 == TRUE){
if (geneset == "DE"){
geneset_oi = DE_table_receiver %>% filter(p_val_adj <= 0.05 & abs(avg_log2FC) >= lfc_cutoff) %>% pull(gene)
} else if (geneset == "up") {
geneset_oi = DE_table_receiver %>% filter(p_val_adj <= 0.05 & avg_log2FC >= lfc_cutoff) %>% pull(gene)
} else if (geneset == "down") {
geneset_oi = DE_table_receiver %>% filter(p_val_adj <= 0.05 & avg_log2FC <= lfc_cutoff) %>% pull(gene)
}
} else {
if (geneset == "DE"){
geneset_oi = DE_table_receiver %>% filter(p_val_adj <= 0.05 & abs(avg_logFC) >= lfc_cutoff) %>% pull(gene)
} else if (geneset == "up") {
geneset_oi = DE_table_receiver %>% filter(p_val_adj <= 0.05 & avg_logFC >= lfc_cutoff) %>% pull(gene)
} else if (geneset == "down") {
geneset_oi = DE_table_receiver %>% filter(p_val_adj <= 0.05 & avg_logFC <= lfc_cutoff) %>% pull(gene)
}
}
geneset_oi = geneset_oi %>% .[. %in% rownames(ligand_target_matrix)]
if (length(geneset_oi) == 0){
stop("No genes were differentially expressed")
}
background_expressed_genes = expressed_genes_receiver %>% .[. %in% rownames(ligand_target_matrix)]
# step3 nichenet analysis: define potential ligands
expressed_ligands = intersect(ligands,expressed_genes_sender)
expressed_receptors = intersect(receptors,expressed_genes_receiver)
if (length(expressed_ligands) == 0){
stop("No ligands expressed in sender cell")
}
if (length(expressed_receptors) == 0){
stop("No receptors expressed in receiver cell")
}
potential_ligands = lr_network %>% filter(from %in% expressed_ligands & to %in% expressed_receptors) %>% pull(from) %>% unique()
if (length(potential_ligands) == 0){
stop("No potentially active ligands")
}
if (verbose == TRUE){print("Perform NicheNet ligand activity analysis")}
# step4 perform NicheNet's ligand activity analysis
ligand_activities = predict_ligand_activities(geneset = geneset_oi, background_expressed_genes = background_expressed_genes, ligand_target_matrix = ligand_target_matrix, potential_ligands = potential_ligands)
ligand_activities = ligand_activities %>%
arrange(-aupr_corrected) %>%
mutate(rank = rank(desc(aupr_corrected)))
if(filter_top_ligands == TRUE){
best_upstream_ligands = ligand_activities %>% top_n(top_n_ligands, aupr_corrected) %>% arrange(-aupr_corrected) %>% pull(test_ligand) %>% unique()
} else {
best_upstream_ligands = ligand_activities %>% arrange(-aupr_corrected) %>% pull(test_ligand) %>% unique()
}
if (verbose == TRUE){print("Infer active target genes of the prioritized ligands")}
# step5 infer target genes of the top-ranked ligands
active_ligand_target_links_df = best_upstream_ligands %>% lapply(get_weighted_ligand_target_links,geneset = geneset_oi, ligand_target_matrix = ligand_target_matrix, n = top_n_targets) %>% bind_rows() %>% drop_na()
if(nrow(active_ligand_target_links_df) > 0){
active_ligand_target_links = prepare_ligand_target_visualization(ligand_target_df = active_ligand_target_links_df, ligand_target_matrix = ligand_target_matrix, cutoff = cutoff_visualization)
order_ligands = intersect(best_upstream_ligands, colnames(active_ligand_target_links)) %>% rev() %>% make.names()
order_targets = active_ligand_target_links_df$target %>% unique() %>% intersect(rownames(active_ligand_target_links)) %>% make.names()
rownames(active_ligand_target_links) = rownames(active_ligand_target_links) %>% make.names()
colnames(active_ligand_target_links) = colnames(active_ligand_target_links) %>% make.names()
order_targets = order_targets %>% intersect(rownames(active_ligand_target_links))
order_ligands = order_ligands %>% intersect(colnames(active_ligand_target_links))
vis_ligand_target = active_ligand_target_links[order_targets,order_ligands,drop=FALSE] %>% t()
p_ligand_target_network = vis_ligand_target %>% make_heatmap_ggplot("Prioritized ligands","Predicted target genes", color = "purple",legend_position = "top", x_axis_position = "top",legend_title = "Regulatory potential") + theme(axis.text.x = element_text(face = "italic")) #+ scale_fill_gradient2(low = "whitesmoke", high = "purple", breaks = c(0,0.006,0.012))
} else {
vis_ligand_target = NULL
p_ligand_target_network = NULL
print("no highly likely active targets found for top ligands")
}
# combined heatmap: overlay ligand activities
ligand_aupr_matrix = ligand_activities %>% select(aupr_corrected) %>% as.matrix() %>% magrittr::set_rownames(ligand_activities$test_ligand)
rownames(ligand_aupr_matrix) = rownames(ligand_aupr_matrix) %>% make.names()
colnames(ligand_aupr_matrix) = colnames(ligand_aupr_matrix) %>% make.names()
vis_ligand_aupr = ligand_aupr_matrix[order_ligands, ] %>% as.matrix(ncol = 1) %>% magrittr::set_colnames("AUPR")
p_ligand_aupr = vis_ligand_aupr %>% make_heatmap_ggplot("Prioritized ligands","Ligand activity", color = "darkorange",legend_position = "top", x_axis_position = "top", legend_title = "AUPR\n(target gene prediction ability)") + theme(legend.text = element_text(size = 9))
p_ligand_aupr
figures_without_legend = cowplot::plot_grid(
p_ligand_aupr + theme(legend.position = "none", axis.ticks = element_blank()) + theme(axis.title.x = element_text()),
p_ligand_target_network + theme(legend.position = "none", axis.ticks = element_blank()) + ylab(""),
align = "hv",
nrow = 1,
rel_widths = c(ncol(vis_ligand_aupr)+10, ncol(vis_ligand_target)))
legends = cowplot::plot_grid(
ggpubr::as_ggplot(ggpubr::get_legend(p_ligand_aupr)),
ggpubr::as_ggplot(ggpubr::get_legend(p_ligand_target_network)),
nrow = 1,
align = "h")
combined_plot = cowplot::plot_grid(figures_without_legend,
legends,
rel_heights = c(10,2), nrow = 2, align = "hv")
# ligand-receptor plot
# get the ligand-receptor network of the top-ranked ligands
if (verbose == TRUE){print("Infer receptors of the prioritized ligands")}
lr_network_top = lr_network %>% filter(from %in% best_upstream_ligands & to %in% expressed_receptors) %>% distinct(from,to)
best_upstream_receptors = lr_network_top %>% pull(to) %>% unique()
lr_network_top_df_large = weighted_networks_lr %>% filter(from %in% best_upstream_ligands & to %in% best_upstream_receptors)
lr_network_top_df = lr_network_top_df_large %>% spread("from","weight",fill = 0)
lr_network_top_matrix = lr_network_top_df %>% select(-to) %>% as.matrix() %>% magrittr::set_rownames(lr_network_top_df$to)
if (nrow(lr_network_top_matrix) > 1){
dist_receptors = dist(lr_network_top_matrix, method = "binary")
hclust_receptors = hclust(dist_receptors, method = "ward.D2")
order_receptors = hclust_receptors$labels[hclust_receptors$order]
} else {
order_receptors = rownames(lr_network_top_matrix)
}
if (ncol(lr_network_top_matrix) > 1) {
dist_ligands = dist(lr_network_top_matrix %>% t(), method = "binary")
hclust_ligands = hclust(dist_ligands, method = "ward.D2")
order_ligands_receptor = hclust_ligands$labels[hclust_ligands$order]
} else {
order_ligands_receptor = colnames(lr_network_top_matrix)
}
order_receptors = order_receptors %>% intersect(rownames(lr_network_top_matrix))
order_ligands_receptor = order_ligands_receptor %>% intersect(colnames(lr_network_top_matrix))
vis_ligand_receptor_network = lr_network_top_matrix[order_receptors, order_ligands_receptor]
dim(vis_ligand_receptor_network) = c(length(order_receptors), length(order_ligands_receptor))
rownames(vis_ligand_receptor_network) = order_receptors %>% make.names()
colnames(vis_ligand_receptor_network) = order_ligands_receptor %>% make.names()
p_ligand_receptor_network = vis_ligand_receptor_network %>% t() %>% make_heatmap_ggplot("Ligands","Receptors", color = "mediumvioletred", x_axis_position = "top",legend_title = "Prior interaction potential")
# DE analysis for each sender cell type -- of course only possible when having sender cell types
if (length(sender) > 1){
are_there_senders = TRUE
}
if(length(sender) == 1){
if(sender != "undefined"){
are_there_senders = TRUE
} else {
are_there_senders = FALSE
}
}
if (are_there_senders == TRUE){
if (verbose == TRUE){print("Perform DE analysis in sender cells")}
seurat_obj = subset(seurat_obj, features= potential_ligands)
DE_table_all = Idents(seurat_obj) %>% levels() %>% intersect(sender_celltypes) %>% lapply(get_lfc_celltype, seurat_obj = seurat_obj, condition_colname = condition_colname, condition_oi = condition_oi, condition_reference = condition_reference, expression_pct = expression_pct, celltype_col = NULL) %>% reduce(full_join, by = "gene") # use this if cell type labels are the identities of your Seurat object -- if not: indicate the celltype_col properly
DE_table_all[is.na(DE_table_all)] = 0
# Combine ligand activities with DE information
ligand_activities_de = ligand_activities %>% select(test_ligand, pearson) %>% rename(ligand = test_ligand) %>% left_join(DE_table_all %>% rename(ligand = gene), by = "ligand")
ligand_activities_de[is.na(ligand_activities_de)] = 0
# make LFC heatmap
lfc_matrix = ligand_activities_de %>% select(-ligand, -pearson) %>% as.matrix() %>% magrittr::set_rownames(ligand_activities_de$ligand)
rownames(lfc_matrix) = rownames(lfc_matrix) %>% make.names()
order_ligands = order_ligands[order_ligands %in% rownames(lfc_matrix)]
vis_ligand_lfc = lfc_matrix[order_ligands,]
vis_ligand_lfc = vis_ligand_lfc %>% as.matrix(ncol = length(Idents(seurat_obj) %>% levels() %>% intersect(sender_celltypes)))
colnames(vis_ligand_lfc) = vis_ligand_lfc %>% colnames() %>% make.names()
p_ligand_lfc = vis_ligand_lfc %>% make_threecolor_heatmap_ggplot("Prioritized ligands","LFC in Sender", low_color = "midnightblue",mid_color = "white", mid = median(vis_ligand_lfc), high_color = "red",legend_position = "top", x_axis_position = "top", legend_title = "LFC") + theme(axis.text.y = element_text(face = "italic"))
# ligand expression Seurat dotplot
real_makenames_conversion = lr_network$from %>% unique() %>% magrittr::set_names(lr_network$from %>% unique() %>% make.names())
order_ligands_adapted = real_makenames_conversion[order_ligands]
names(order_ligands_adapted) = NULL
seurat_obj_subset = seurat_obj %>% subset(idents = sender_celltypes)
seurat_obj_subset = SetIdent(seurat_obj_subset, value = seurat_obj_subset[[condition_colname]]) %>% subset(idents = condition_oi) ## only shows cells of the condition of interest
rotated_dotplot = DotPlot(seurat_obj %>% subset(cells = Cells(seurat_obj_subset)), features = order_ligands_adapted, cols = "RdYlBu") + coord_flip() + theme(legend.text = element_text(size = 10), legend.title = element_text(size = 12)) # flip of coordinates necessary because we want to show ligands in the rows when combining all plots
rm(seurat_obj_subset)
# combined plot
figures_without_legend = cowplot::plot_grid(
p_ligand_aupr + theme(legend.position = "none", axis.ticks = element_blank()) + theme(axis.title.x = element_text()),
rotated_dotplot + theme(legend.position = "none", axis.ticks = element_blank(), axis.title.x = element_text(size = 12), axis.text.y = element_text(face = "italic", size = 9), axis.text.x = element_text(size = 9, angle = 90,hjust = 0)) + ylab("Expression in Sender") + xlab("") + scale_y_discrete(position = "right"),
p_ligand_lfc + theme(legend.position = "none", axis.ticks = element_blank()) + theme(axis.title.x = element_text()) + ylab(""),
p_ligand_target_network + theme(legend.position = "none", axis.ticks = element_blank()) + ylab(""),
align = "hv",
nrow = 1,
rel_widths = c(ncol(vis_ligand_aupr)+6, ncol(vis_ligand_lfc) + 7, ncol(vis_ligand_lfc) + 8, ncol(vis_ligand_target)))
legends = cowplot::plot_grid(
ggpubr::as_ggplot(ggpubr::get_legend(p_ligand_aupr)),
ggpubr::as_ggplot(ggpubr::get_legend(rotated_dotplot)),
ggpubr::as_ggplot(ggpubr::get_legend(p_ligand_lfc)),
ggpubr::as_ggplot(ggpubr::get_legend(p_ligand_target_network)),
nrow = 1,
align = "h", rel_widths = c(1.5, 1, 1, 1))
combined_plot = cowplot::plot_grid(figures_without_legend, legends, rel_heights = c(10,5), nrow = 2, align = "hv")
combined_plot
} else {
rotated_dotplot = NULL
p_ligand_lfc = NULL
}
return(list(
ligand_activities = ligand_activities,
top_ligands = best_upstream_ligands,
top_targets = active_ligand_target_links_df$target %>% unique(),
top_receptors = lr_network_top_df_large$to %>% unique(),
ligand_target_matrix = vis_ligand_target,
ligand_target_heatmap = p_ligand_target_network,
ligand_target_df = active_ligand_target_links_df,
ligand_expression_dotplot = rotated_dotplot,
ligand_differential_expression_heatmap = p_ligand_lfc,
ligand_activity_target_heatmap = combined_plot,
ligand_receptor_matrix = vis_ligand_receptor_network,
ligand_receptor_heatmap = p_ligand_receptor_network,
ligand_receptor_df = lr_network_top_df_large %>% rename(ligand = from, receptor = to),
geneset_oi = geneset_oi,
background_expressed_genes = background_expressed_genes
))
}
#' @title Determine expressed genes of a cell type from a Seurat object single-cell RNA seq dataset or Seurat spatial transcriptomics dataset
#'
#' @description \code{get_expressed_genes} Return the genes that are expressed in a given cell cluster based on the fraction of cells in that cluster that should express the cell.
#' @usage
#' get_expressed_genes(ident, seurat_obj, pct = 0.10, assay_oi = NULL)
#'
#' @param ident Name of cluster identity/identities of cells
#' @param seurat_obj Single-cell expression dataset as Seurat object https://satijalab.org/seurat/.
#' @param pct We consider genes expressed if they are expressed in at least a specific fraction of cells of a cluster. This number indicates this fraction. Default: 0.10. Choice of this parameter is important and depends largely on the used sequencing platform. We recommend to require a lower fraction (like the default 0.10) for 10X data than for e.g. Smart-seq2 data.
#' @param assay_oi If wanted: specify yourself which assay to look for. Default this value is NULL and as a consequence the 'most advanced' assay will be used to define expressed genes.
#'
#' @return A character vector with the gene symbols of the expressed genes
#'
#' @import Seurat
#' @import dplyr
#'
#' @examples
#' \dontrun{
#' get_expressed_genes(ident = "CD8 T", seurat_obj = seuratObj, pct = 0.10)
#' }
#'
#' @export
#'
get_expressed_genes = function(ident, seurat_obj, pct = 0.1, assay_oi = NULL){
requireNamespace("Seurat")
requireNamespace("dplyr")
# input check
if (!"RNA" %in% names(seurat_obj@assays)) {
if ("Spatial" %in% names(seurat_obj@assays)) {
if (class(seurat_obj@assays$Spatial@data) != "matrix" &
class(seurat_obj@assays$Spatial@data) != "dgCMatrix") {
warning("Spatial Seurat object should contain a matrix of normalized expression data. Check 'seurat_obj@assays$Spatial@data' for default or 'seurat_obj@assays$SCT@data' for when the single-cell transform pipeline was applied")
}
if (sum(dim(seurat_obj@assays$Spatial@data)) == 0) {
stop("Seurat object should contain normalized expression data (numeric matrix). Check 'seurat_obj@assays$Spatial@data'")
}
}
}
else {
if (class(seurat_obj@assays$RNA@data) != "matrix" &
class(seurat_obj@assays$RNA@data) != "dgCMatrix") {
warning("Seurat object should contain a matrix of normalized expression data. Check 'seurat_obj@assays$RNA@data' for default or 'seurat_obj@assays$integrated@data' for integrated data or seurat_obj@assays$SCT@data for when the single-cell transform pipeline was applied")
}
if ("integrated" %in% names(seurat_obj@assays)) {
if (sum(dim(seurat_obj@assays$RNA@data)) == 0 & sum(dim(seurat_obj@assays$integrated@data)) ==
0)
stop("Seurat object should contain normalized expression data (numeric matrix). Check 'seurat_obj@assays$RNA@data' for default or 'seurat_obj@assays$integrated@data' for integrated data")
}
else if ("SCT" %in% names(seurat_obj@assays)) {
if (sum(dim(seurat_obj@assays$RNA@data)) == 0 & sum(dim(seurat_obj@assays$SCT@data)) ==
0) {
stop("Seurat object should contain normalized expression data (numeric matrix). Check 'seurat_obj@assays$RNA@data' for default or 'seurat_obj@assays$SCT@data' for data corrected via SCT")
}
}
else {
if (sum(dim(seurat_obj@assays$RNA@data)) == 0) {
stop("Seurat object should contain normalized expression data (numeric matrix). Check 'seurat_obj@assays$RNA@data'")
}
}
}
if (sum(ident %in% unique(Idents(seurat_obj))) != length(ident)) {
stop("One or more provided cell clusters is not part of the 'Idents' of your Seurat object")
}
if(!is.null(assay_oi)){
if(! assay_oi %in% Seurat::Assays(seurat_obj)){
stop("assay_oi should be an assay of your Seurat object")
}
}
# Get cell identities of cluster of interest
cells_oi = Idents(seurat_obj) %>% .[Idents(seurat_obj) %in%
ident] %>% names()
# Get exprs matrix: from assay oi or from most advanced assay if assay oi not specifcied
if(!is.null(assay_oi)){
cells_oi_in_matrix = intersect(colnames(seurat_obj[[assay_oi]]@data), cells_oi)
exprs_mat = seurat_obj[[assay_oi]]@data %>% .[, cells_oi_in_matrix]
} else {
if ("integrated" %in% names(seurat_obj@assays)) {
warning("Seurat object is result from the Seurat integration workflow. The expressed genes are now defined based on the integrated slot. You can change this via the assay_oi parameter of the get_expressed_genes() functions. Recommended assays: RNA or SCT")
cells_oi_in_matrix = intersect(colnames(seurat_obj@assays$integrated@data),
cells_oi)
if (length(cells_oi_in_matrix) != length(cells_oi))
stop("Not all cells of interest are in your expression matrix (seurat_obj@assays$integrated@data). Please check that the expression matrix contains cells in columns and genes in rows.")
exprs_mat = seurat_obj@assays$integrated@data %>% .[,
cells_oi_in_matrix]
}
else if ("SCT" %in% names(seurat_obj@assays) & !"Spatial" %in%
names(seurat_obj@assays)) {
warning("Seurat object is result from the Seurat single-cell transform workflow. The expressed genes are defined based on the SCT slot. You can change this via the assay_oi parameter of the get_expressed_genes() functions. Recommended assays: RNA or SCT")
cells_oi_in_matrix = intersect(colnames(seurat_obj@assays$SCT@data),
cells_oi)
if (length(cells_oi_in_matrix) != length(cells_oi))
stop("Not all cells of interest are in your expression matrix (seurat_obj@assays$SCT@data). Please check that the expression matrix contains cells in columns and genes in rows.")
exprs_mat = seurat_obj@assays$SCT@data %>% .[, cells_oi_in_matrix]
}
else if ("Spatial" %in% names(seurat_obj@assays) &
!"SCT" %in% names(seurat_obj@assays)) {
warning("Seurat object is result from the Seurat spatial object. The expressed genes are defined based on the Spatial slot. If the spatial data is spot-based (mixture of cells) and not single-cell resolution, we recommend against directly using nichenetr on spot-based data (because you want to look at cell-cell interactions, and not at spot-spot interactions! ;-) )")
cells_oi_in_matrix = intersect(colnames(seurat_obj@assays$Spatial@data),
cells_oi)
if (length(cells_oi_in_matrix) != length(cells_oi))
stop("Not all cells of interest are in your expression matrix (seurat_obj@assays$Spatial@data). Please check that the expression matrix contains cells in columns and genes in rows.")
exprs_mat = seurat_obj@assays$Spatial@data %>% .[, cells_oi_in_matrix]
}
else if ("Spatial" %in% names(seurat_obj@assays) &
"SCT" %in% names(seurat_obj@assays)) {
warning("Seurat object is result from the Seurat spatial object, followed by the SCT workflow. If the spatial data is spot-based (mixture of cells) and not single-cell resolution, we recommend against directly using nichenetr on spot-based data (because you want to look at cell-cell interactions, and not at spot-spot interactions! The expressed genes are defined based on the SCT slot, but this can be changed via the assay_oi parameter.")
cells_oi_in_matrix = intersect(colnames(seurat_obj@assays$SCT@data),
cells_oi)
if (length(cells_oi_in_matrix) != length(cells_oi))
stop("Not all cells of interest are in your expression matrix (seurat_obj@assays$Spatial@data). Please check that the expression matrix contains cells in columns and genes in rows.")
exprs_mat = seurat_obj@assays$SCT@data %>% .[, cells_oi_in_matrix]
}
else {
if (sum(cells_oi %in% colnames(seurat_obj@assays$RNA@data)) ==
0)
stop("None of the cells are in colnames of 'seurat_obj@assays$RNA@data'. The expression matrix should contain cells in columns and genes in rows.")
cells_oi_in_matrix = intersect(colnames(seurat_obj@assays$RNA@data),
cells_oi)
if (length(cells_oi_in_matrix) != length(cells_oi))
stop("Not all cells of interest are in your expression matrix (seurat_obj@assays$RNA@data). Please check that the expression matrix contains cells in columns and genes in rows.")
exprs_mat = seurat_obj@assays$RNA@data %>% .[, cells_oi_in_matrix]
}
}
# use defined cells and exprs matrix to get expressed genes
n_cells_oi_in_matrix = length(cells_oi_in_matrix)
if (n_cells_oi_in_matrix < 5000) {
genes = exprs_mat %>% apply(1, function(x) {
sum(x > 0)/n_cells_oi_in_matrix
}) %>% .[. >= pct] %>% names()
}
else {
splits = split(1:nrow(exprs_mat), ceiling(seq_along(1:nrow(exprs_mat))/100))
genes = splits %>% lapply(function(genes_indices, exprs,
pct, n_cells_oi_in_matrix) {
begin_i = genes_indices[1]
end_i = genes_indices[length(genes_indices)]
exprs = exprs[begin_i:end_i, , drop = FALSE]
genes = exprs %>% apply(1, function(x) {
sum(x > 0)/n_cells_oi_in_matrix
}) %>% .[. >= pct] %>% names()
}, exprs_mat, pct, n_cells_oi_in_matrix) %>% unlist() %>%
unname()
}
return(genes)
}
#' @title Perform NicheNet analysis on Seurat object: explain DE between two cell clusters
#'
#' @description \code{nichenet_seuratobj_cluster_de} Perform NicheNet analysis on Seurat object: explain differential expression (DE) between two 'receiver' cell clusters by ligands expressed by neighboring cells.
#' @usage
#' nichenet_seuratobj_cluster_de(seurat_obj, receiver_affected, receiver_reference, sender = "all",ligand_target_matrix,lr_network,weighted_networks,expression_pct = 0.10, lfc_cutoff = 0.25, geneset = "DE", filter_top_ligands = TRUE, top_n_ligands = 30,top_n_targets = 200, cutoff_visualization = 0.33,verbose = TRUE, assay_oi = NULL)
#'
#' @param seurat_obj Single-cell expression dataset as Seurat object https://satijalab.org/seurat/.
#' @param receiver_reference Name of cluster identity/identities of "steady-state" cells, before they are affected by intercellular communication with other cells
#' @param receiver_affected Name of cluster identity/identities of "affected" cells that were presumably affected by intercellular communication with other cells
#' @param sender Determine the potential sender cells. Name of cluster identity/identities of cells that presumably affect expression in the receiver cell type. In case you want to look at all possible sender cell types in the data, you can give this argument the value "all". "all" indicates thus that all cell types in the dataset will be considered as possible sender cells. As final option, you could give this argument the value "undefined"."undefined" won't look at ligands expressed by sender cells, but at all ligands for which a corresponding receptor is expressed. This could be useful if the presumably active sender cell is not profiled. Default: "all".
#' @param expression_pct To determine ligands and receptors expressed by sender and receiver cells, we consider genes expressed if they are expressed in at least a specific fraction of cells of a cluster. This number indicates this fraction. Default: 0.10
#' @param lfc_cutoff Cutoff on log fold change in the wilcoxon differential expression test. Default: 0.25.
#' @param geneset Indicate whether to consider all DE genes between condition 1 and 2 ("DE"), or only genes upregulated in condition 1 ("up"), or only genes downregulad in condition 1 ("down").
#' @param filter_top_ligands Indicate whether output tables for ligand-target and ligand-receptor networks should be done for a filtered set of top ligands (TRUE) or for all ligands (FALSE). Default: TRUE.
#' @param top_n_ligands Indicate how many ligands should be extracted as top-ligands after ligand activity analysis. Only for these ligands, target genes and receptors will be returned. Default: 30.
#' @param top_n_targets To predict active, affected targets of the prioritized ligands, consider only DE genes if they also belong to the a priori top n ("top_n_targets") targets of a ligand. Default = 200.
#' @param cutoff_visualization Because almost no ligand-target scores have a regulatory potential score of 0, we clarify the heatmap visualization by giving the links with the lowest scores a score of 0. The cutoff_visualization paramter indicates this fraction of links that are given a score of zero. Default = 0.33.
#' @param ligand_target_matrix The NicheNet ligand-target matrix denoting regulatory potential scores between ligands and targets (ligands in columns).
#' @param lr_network The ligand-receptor network (columns that should be present: $from, $to).
#' @param weighted_networks The NicheNet weighted networks denoting interactions and their weights/confidences in the ligand-signaling and gene regulatory network.
#' @param verbose Print out the current analysis stage. Default: TRUE.
#' @inheritParams get_expressed_genes
#'
#' @return A list with the following elements:
#' $ligand_activities: data frame with output ligand activity analysis;
#' $top_ligands: top_n ligands based on ligand activity;
#' $top_targets: active, affected target genes of these ligands;
#' $top_receptors: receptors of these ligands;
#' $ligand_target_matrix: matrix indicating regulatory potential scores between active ligands and their predicted targets;
#' $ligand_target_heatmap: heatmap of ligand-target regulatory potential;
#' $ligand_target_df: data frame showing regulatory potential scores of predicted active ligand-target network;
#' $ligand_activity_target_heatmap: heatmap showing both ligand activity scores and target genes of these top ligands;
#' $ligand_expression_dotplot: expression dotplot of the top ligands;
#' $ligand_receptor_matrix: matrix of ligand-receptor interactions;
#' $ligand_receptor_heatmap: heatmap showing ligand-receptor interactions;
#' $ligand_receptor_df: data frame of ligand-receptor interactions;
#' $geneset_oi: a vector containing the set of genes used as input for the ligand activity analysis;
#' $background_expressed_genes: the background of genes to which the geneset will be compared in the ligand activity analysis.
#'
#' @import Seurat
#' @import dplyr
#' @importFrom magrittr set_rownames set_colnames
#'
#' @examples
#' \dontrun{
#' seuratObj = readRDS(url("https://zenodo.org/record/3531889/files/seuratObj_test.rds"))
#' lr_network = readRDS(url("https://zenodo.org/record/7074291/files/lr_network_mouse_21122021.rds"))
#' ligand_target_matrix = readRDS(url("https://zenodo.org/record/7074291/files/ligand_target_matrix_nsga2r_final_mouse.rds"))
#' weighted_networks = readRDS(url("https://zenodo.org/record/7074291/files/weighted_networks_nsga2r_final_mouse.rds"))
#' # works, but does not make sense
#' nichenet_seuratobj_cluster_de(seurat_obj = seuratObj, receiver_affected = "CD8 T", receiver_reference = "Mono", sender = "Mono", ligand_target_matrix = ligand_target_matrix, lr_network = lr_network, weighted_networks = weighted_networks)
#' # type of analysis for which this would make sense
#' nichenet_seuratobj_cluster_de(seurat_obj = seuratObj, receiver_affected = "p-EMT-pos-cancer", receiver_reference = "p-EMT-neg-cancer", sender = "Fibroblast", ligand_target_matrix = ligand_target_matrix, lr_network = lr_network, weighted_networks = weighted_networks)
#' }
#'
#' @export
#'
nichenet_seuratobj_cluster_de = function(seurat_obj, receiver_affected, receiver_reference, sender = "all",ligand_target_matrix,lr_network,weighted_networks,
expression_pct = 0.10, lfc_cutoff = 0.25, geneset = "DE", filter_top_ligands = TRUE, top_n_ligands = 30,
top_n_targets = 200, cutoff_visualization = 0.33,
verbose = TRUE, assay_oi = NULL)
{
requireNamespace("Seurat")
requireNamespace("dplyr")
# input check
# input check
if(! "RNA" %in% names(seurat_obj@assays)){
if ("Spatial" %in% names(seurat_obj@assays)){
warning("You are going to apply NicheNet on a spatial seurat object. Be sure it's ok to use NicheNet the way you are planning to do it. So this means: you should have changes in gene expression in receiver cells caused by cell-cell interactions. Note that in the case of spatial transcriptomics, you are not dealing with single cells but with 'spots' containing multiple cells of the same of different cell types.")
if (class(seurat_obj@assays$Spatial@data) != "matrix" & class(seurat_obj@assays$Spatial@data) != "dgCMatrix") {
warning("Spatial Seurat object should contain a matrix of normalized expression data. Check 'seurat_obj@assays$Spatial@data' for default or 'seurat_obj@assays$SCT@data' for when the single-cell transform pipeline was applied")
}
if (sum(dim(seurat_obj@assays$Spatial@data)) == 0) {
stop("Seurat object should contain normalized expression data (numeric matrix). Check 'seurat_obj@assays$Spatial@data'")
}
}} else {
if (class(seurat_obj@assays$RNA@data) != "matrix" &
class(seurat_obj@assays$RNA@data) != "dgCMatrix") {
warning("Seurat object should contain a matrix of normalized expression data. Check 'seurat_obj@assays$RNA@data' for default or 'seurat_obj@assays$integrated@data' for integrated data or seurat_obj@assays$SCT@data for when the single-cell transform pipeline was applied")
}
if ("integrated" %in% names(seurat_obj@assays)) {
if (sum(dim(seurat_obj@assays$RNA@data)) == 0 & sum(dim(seurat_obj@assays$integrated@data)) ==
0)
stop("Seurat object should contain normalized expression data (numeric matrix). Check 'seurat_obj@assays$RNA@data' for default or 'seurat_obj@assays$integrated@data' for integrated data")
}
else if ("SCT" %in% names(seurat_obj@assays)) {
if (sum(dim(seurat_obj@assays$RNA@data)) == 0 & sum(dim(seurat_obj@assays$SCT@data)) ==
0) {
stop("Seurat object should contain normalized expression data (numeric matrix). Check 'seurat_obj@assays$RNA@data' for default or 'seurat_obj@assays$SCT@data' for data corrected via SCT")
}
}
else {
if (sum(dim(seurat_obj@assays$RNA@data)) == 0) {
stop("Seurat object should contain normalized expression data (numeric matrix). Check 'seurat_obj@assays$RNA@data'")
}
}
}
if(sum(receiver_affected %in% unique(Idents(seurat_obj))) != length(receiver_affected))
stop("The defined receiver_affected cell type should be an identity class of your seurat object")
if(sum(receiver_reference %in% unique(Idents(seurat_obj))) != length(receiver_reference))
stop("The defined receiver_reference cell type should be an identity class of your seurat object")
if(length(sender) == 1){
if(sender != "all" & sender != "undefined"){
if(sum(sender %in% unique(Idents(seurat_obj))) != length(sender)){
stop("The sender argument should be 'all' or 'undefined' or an identity class of your seurat object")
}
}
} else {
if(sum(sender %in% unique(Idents(seurat_obj))) != length(sender)){
stop("The sender argument should be 'all' or 'undefined' or an identity class of your seurat object")
}
}
if(geneset != "DE" & geneset != "up" & geneset != "down")
stop("geneset should be 'DE', 'up' or 'down'")
if("integrated" %in% names(seurat_obj@assays)){
warning("Seurat object is result from the Seurat integration workflow. Make sure that the way of defining expressed and differentially expressed genes in this wrapper is appropriate for your integrated data.")
}
# Read in and process NicheNet networks, define ligands and receptors
if (verbose == TRUE){print("Read in and process NicheNet's networks")}
weighted_networks_lr = weighted_networks$lr_sig %>% inner_join(lr_network %>% distinct(from,to), by = c("from","to"))
ligands = lr_network %>% pull(from) %>% unique()
receptors = lr_network %>% pull(to) %>% unique()
if (verbose == TRUE){print("Define expressed ligands and receptors in receiver and sender cells")}
# step1 nichenet analysis: get expressed genes in sender and receiver cells
## receiver
# expressed genes: only in steady state population (for determining receptors)
list_expressed_genes_receiver_ss = c(receiver_reference) %>% unique() %>% lapply(get_expressed_genes, seurat_obj, expression_pct, assay_oi)
names(list_expressed_genes_receiver_ss) = c(receiver_reference) %>% unique()
expressed_genes_receiver_ss = list_expressed_genes_receiver_ss %>% unlist() %>% unique()
# expressed genes: both in steady state and affected population (for determining background of expressed genes)
list_expressed_genes_receiver = c(receiver_reference,receiver_affected) %>% unique() %>% lapply(get_expressed_genes, seurat_obj, expression_pct, assay_oi)
names(list_expressed_genes_receiver) = c(receiver_reference,receiver_affected) %>% unique()
expressed_genes_receiver = list_expressed_genes_receiver %>% unlist() %>% unique()
## sender
if (length(sender) == 1){
if (sender == "all"){
sender_celltypes = Idents(seurat_obj) %>% levels()
list_expressed_genes_sender = sender_celltypes %>% lapply(get_expressed_genes, seurat_obj, expression_pct, assay_oi)
names(list_expressed_genes_sender) = sender_celltypes
expressed_genes_sender = list_expressed_genes_sender %>% unlist() %>% unique()
} else if (sender == "undefined") {
if("integrated" %in% names(seurat_obj@assays)){
expressed_genes_sender = union(seurat_obj@assays$integrated@data %>% rownames(),rownames(ligand_target_matrix)) %>% union(colnames(ligand_target_matrix))
} else {
expressed_genes_sender = union(seurat_obj@assays$RNA@data %>% rownames(),rownames(ligand_target_matrix)) %>% union(colnames(ligand_target_matrix))
}
} else if (sender != "all" & sender != "undefined") {
sender_celltypes = sender
list_expressed_genes_sender = sender_celltypes %>% unique() %>% lapply(get_expressed_genes, seurat_obj, expression_pct, assay_oi)
names(list_expressed_genes_sender) = sender_celltypes %>% unique()
expressed_genes_sender = list_expressed_genes_sender %>% unlist() %>% unique()
}
} else {
sender_celltypes = sender
list_expressed_genes_sender = sender_celltypes %>% unique() %>% lapply(get_expressed_genes, seurat_obj, expression_pct, assay_oi)
names(list_expressed_genes_sender) = sender_celltypes %>% unique()
expressed_genes_sender = list_expressed_genes_sender %>% unlist() %>% unique()
}
# step2 nichenet analysis: define background and gene list of interest: here differential expression between two conditions of cell type of interest
if (verbose == TRUE){print("Perform DE analysis between two receiver cell clusters")}
DE_table_receiver = FindMarkers(object = seurat_obj, ident.1 = receiver_affected, ident.2 = receiver_reference, min.pct = expression_pct) %>% rownames_to_column("gene")
SeuratV4 = c("avg_log2FC") %in% colnames(DE_table_receiver)
if(SeuratV4 == TRUE){
if (geneset == "DE"){
geneset_oi = DE_table_receiver %>% filter(p_val_adj <= 0.05 & abs(avg_log2FC) >= lfc_cutoff) %>% pull(gene)
} else if (geneset == "up") {
geneset_oi = DE_table_receiver %>% filter(p_val_adj <= 0.05 & avg_log2FC >= lfc_cutoff) %>% pull(gene)
} else if (geneset == "down") {
geneset_oi = DE_table_receiver %>% filter(p_val_adj <= 0.05 & avg_log2FC <= lfc_cutoff) %>% pull(gene)
}
} else {
if (geneset == "DE"){
geneset_oi = DE_table_receiver %>% filter(p_val_adj <= 0.05 & abs(avg_logFC) >= lfc_cutoff) %>% pull(gene)
} else if (geneset == "up") {
geneset_oi = DE_table_receiver %>% filter(p_val_adj <= 0.05 & avg_logFC >= lfc_cutoff) %>% pull(gene)
} else if (geneset == "down") {
geneset_oi = DE_table_receiver %>% filter(p_val_adj <= 0.05 & avg_logFC <= lfc_cutoff) %>% pull(gene)
}
}
geneset_oi = geneset_oi %>% .[. %in% rownames(ligand_target_matrix)]
if (length(geneset_oi) == 0){
stop("No genes were differentially expressed")
}
background_expressed_genes = expressed_genes_receiver %>% .[. %in% rownames(ligand_target_matrix)]
# step3 nichenet analysis: define potential ligands
expressed_ligands = intersect(ligands,expressed_genes_sender)
expressed_receptors = intersect(receptors,expressed_genes_receiver)
if (length(expressed_ligands) == 0){
stop("No ligands expressed in sender cell")
}
if (length(expressed_receptors) == 0){
stop("No receptors expressed in receiver cell")
}
potential_ligands = lr_network %>% filter(from %in% expressed_ligands & to %in% expressed_receptors) %>% pull(from) %>% unique()
if (length(potential_ligands) == 0){
stop("No potentially active ligands")
}
if (verbose == TRUE){print("Perform NicheNet ligand activity analysis")}
# step4 perform NicheNet's ligand activity analysis
ligand_activities = predict_ligand_activities(geneset = geneset_oi, background_expressed_genes = background_expressed_genes, ligand_target_matrix = ligand_target_matrix, potential_ligands = potential_ligands)
ligand_activities = ligand_activities %>%
arrange(-aupr_corrected) %>%
mutate(rank = rank(desc(aupr_corrected)))
if(filter_top_ligands == TRUE){
best_upstream_ligands = ligand_activities %>% top_n(top_n_ligands, aupr_corrected) %>% arrange(-aupr_corrected) %>% pull(test_ligand) %>% unique()
} else {
best_upstream_ligands = ligand_activities %>% arrange(-aupr_corrected) %>% pull(test_ligand) %>% unique()
}
if (verbose == TRUE){print("Infer active target genes of the prioritized ligands")}
# step5 infer target genes of the top-ranked ligands
active_ligand_target_links_df = best_upstream_ligands %>% lapply(get_weighted_ligand_target_links,geneset = geneset_oi, ligand_target_matrix = ligand_target_matrix, n = top_n_targets) %>% bind_rows() %>% drop_na()
if(nrow(active_ligand_target_links_df) > 0){
active_ligand_target_links = prepare_ligand_target_visualization(ligand_target_df = active_ligand_target_links_df, ligand_target_matrix = ligand_target_matrix, cutoff = cutoff_visualization)
order_ligands = intersect(best_upstream_ligands, colnames(active_ligand_target_links)) %>% rev() %>% make.names()
order_targets = active_ligand_target_links_df$target %>% unique() %>% intersect(rownames(active_ligand_target_links)) %>% make.names()
rownames(active_ligand_target_links) = rownames(active_ligand_target_links) %>% make.names()
colnames(active_ligand_target_links) = colnames(active_ligand_target_links) %>% make.names()
order_targets = order_targets %>% intersect(rownames(active_ligand_target_links))
order_ligands = order_ligands %>% intersect(colnames(active_ligand_target_links))
vis_ligand_target = active_ligand_target_links[order_targets,order_ligands] %>% t()
p_ligand_target_network = vis_ligand_target %>% make_heatmap_ggplot("Prioritized ligands","Predicted target genes", color = "purple",legend_position = "top", x_axis_position = "top",legend_title = "Regulatory potential") + theme(axis.text.x = element_text(face = "italic")) #+ scale_fill_gradient2(low = "whitesmoke", high = "purple", breaks = c(0,0.006,0.012))
} else {
vis_ligand_target = NULL
p_ligand_target_network = NULL
print("no highly likely active targets found for top ligands")
}
# combined heatmap: overlay ligand activities
ligand_aupr_matrix = ligand_activities %>% select(aupr_corrected) %>% as.matrix() %>% magrittr::set_rownames(ligand_activities$test_ligand)
rownames(ligand_aupr_matrix) = rownames(ligand_aupr_matrix) %>% make.names()
colnames(ligand_aupr_matrix) = colnames(ligand_aupr_matrix) %>% make.names()
vis_ligand_aupr = ligand_aupr_matrix[order_ligands, ] %>% as.matrix(ncol = 1) %>% magrittr::set_colnames("AUPR")
p_ligand_aupr = vis_ligand_aupr %>% make_heatmap_ggplot("Prioritized ligands","Ligand activity", color = "darkorange",legend_position = "top", x_axis_position = "top", legend_title = "AUPR\n(target gene prediction ability)") + theme(legend.text = element_text(size = 9))
p_ligand_aupr
figures_without_legend = cowplot::plot_grid(
p_ligand_aupr + theme(legend.position = "none", axis.ticks = element_blank()) + theme(axis.title.x = element_text()),
p_ligand_target_network + theme(legend.position = "none", axis.ticks = element_blank()) + ylab(""),
align = "hv",
nrow = 1,
rel_widths = c(ncol(vis_ligand_aupr)+10, ncol(vis_ligand_target)))
legends = cowplot::plot_grid(
ggpubr::as_ggplot(ggpubr::get_legend(p_ligand_aupr)),
ggpubr::as_ggplot(ggpubr::get_legend(p_ligand_target_network)),
nrow = 1,
align = "h")
combined_plot = cowplot::plot_grid(figures_without_legend,
legends,
rel_heights = c(10,2), nrow = 2, align = "hv")
# ligand-receptor plot
# get the ligand-receptor network of the top-ranked ligands
if (verbose == TRUE){print("Infer receptors of the prioritized ligands")}
lr_network_top = lr_network %>% filter(from %in% best_upstream_ligands & to %in% expressed_receptors) %>% distinct(from,to)
best_upstream_receptors = lr_network_top %>% pull(to) %>% unique()
lr_network_top_df_large = weighted_networks_lr %>% filter(from %in% best_upstream_ligands & to %in% best_upstream_receptors)
lr_network_top_df = lr_network_top_df_large %>% spread("from","weight",fill = 0)
lr_network_top_matrix = lr_network_top_df %>% select(-to) %>% as.matrix() %>% magrittr::set_rownames(lr_network_top_df$to)
if (nrow(lr_network_top_matrix) > 1){
dist_receptors = dist(lr_network_top_matrix, method = "binary")
hclust_receptors = hclust(dist_receptors, method = "ward.D2")
order_receptors = hclust_receptors$labels[hclust_receptors$order]
} else {
order_receptors = rownames(lr_network_top_matrix)
}
if (ncol(lr_network_top_matrix) > 1) {
dist_ligands = dist(lr_network_top_matrix %>% t(), method = "binary")
hclust_ligands = hclust(dist_ligands, method = "ward.D2")
order_ligands_receptor = hclust_ligands$labels[hclust_ligands$order]
} else {
order_ligands_receptor = colnames(lr_network_top_matrix)
}
order_receptors = order_receptors %>% intersect(rownames(lr_network_top_matrix))
order_ligands_receptor = order_ligands_receptor %>% intersect(colnames(lr_network_top_matrix))
vis_ligand_receptor_network = lr_network_top_matrix[order_receptors, order_ligands_receptor]
dim(vis_ligand_receptor_network) = c(length(order_receptors), length(order_ligands_receptor))
rownames(vis_ligand_receptor_network) = order_receptors %>% make.names()
colnames(vis_ligand_receptor_network) = order_ligands_receptor %>% make.names()
p_ligand_receptor_network = vis_ligand_receptor_network %>% t() %>% make_heatmap_ggplot("Ligands","Receptors", color = "mediumvioletred", x_axis_position = "top",legend_title = "Prior interaction potential")
# ligand expression Seurat dotplot
if (length(sender) > 1){
are_there_senders = TRUE
}
if(length(sender) == 1){
if(sender != "undefined"){
are_there_senders = TRUE
} else {
are_there_senders = FALSE
}
}
if (are_there_senders == TRUE){
real_makenames_conversion = lr_network$from %>% unique() %>% magrittr::set_names(lr_network$from %>% unique() %>% make.names())
order_ligands_adapted = real_makenames_conversion[order_ligands]
names(order_ligands_adapted) = NULL
rotated_dotplot = DotPlot(seurat_obj %>% subset(idents = sender_celltypes), features = order_ligands_adapted, cols = "RdYlBu") + coord_flip() + theme(legend.text = element_text(size = 10), legend.title = element_text(size = 12)) # flip of coordinates necessary because we want to show ligands in the rows when combining all plots
} else {
rotated_dotplot = NULL
}
return(list(
ligand_activities = ligand_activities,
top_ligands = best_upstream_ligands,
top_targets = active_ligand_target_links_df$target %>% unique(),
top_receptors = lr_network_top_df_large$to %>% unique(),
ligand_target_matrix = vis_ligand_target,
ligand_target_heatmap = p_ligand_target_network,
ligand_target_df = active_ligand_target_links_df,
ligand_expression_dotplot = rotated_dotplot,
ligand_activity_target_heatmap = combined_plot,
ligand_receptor_matrix = vis_ligand_receptor_network,
ligand_receptor_heatmap = p_ligand_receptor_network,
ligand_receptor_df = lr_network_top_df_large %>% rename(ligand = from, receptor = to),
geneset_oi = geneset_oi,
background_expressed_genes = background_expressed_genes
))
}
#' @title Perform NicheNet analysis on Seurat object: explain DE between two cell clusters from separate conditions
#'
#' @description \code{nichenet_seuratobj_aggregate_cluster_de} Perform NicheNet analysis on Seurat object: explain differential expression (DE) between two 'receiver' cell clusters coming from different conditions, by ligands expressed by neighboring cells.
#' @usage
#' nichenet_seuratobj_aggregate_cluster_de(seurat_obj, receiver_affected, receiver_reference, condition_colname, condition_oi, condition_reference, sender = "all",ligand_target_matrix,lr_network,weighted_networks,expression_pct = 0.10, lfc_cutoff = 0.25, geneset = "DE", filter_top_ligands = TRUE, top_n_ligands = 30,top_n_targets = 200, cutoff_visualization = 0.33,verbose = TRUE, assay_oi = NULL)
#'
#' @param seurat_obj Single-cell expression dataset as Seurat object https://satijalab.org/seurat/.
#' @param receiver_reference Name of cluster identity/identities of "steady-state" cells, before they are affected by intercellular communication with other cells
#' @param receiver_affected Name of cluster identity/identities of "affected" cells that were presumably affected by intercellular communication with other cells
#' @param condition_colname Name of the column in the meta data dataframe that indicates which condition/sample cells were coming from.
#' @param condition_oi Condition of interest in which receiver cells were presumably affected by other cells. Should be a name present in the `condition_colname` column of the metadata.
#' @param condition_reference The second condition (e.g. reference or steady-state condition). Should be a name present in the `condition_colname` column of the metadata.
#' @param sender Determine the potential sender cells. Name of cluster identity/identities of cells that presumably affect expression in the receiver cell type. In case you want to look at all possible sender cell types in the data, you can give this argument the value "all". "all" indicates thus that all cell types in the dataset will be considered as possible sender cells. As final option, you could give this argument the value "undefined"."undefined" won't look at ligands expressed by sender cells, but at all ligands for which a corresponding receptor is expressed. This could be useful if the presumably active sender cell is not profiled. Default: "all".
#' @param expression_pct To determine ligands and receptors expressed by sender and receiver cells, we consider genes expressed if they are expressed in at least a specific fraction of cells of a cluster. This number indicates this fraction. Default: 0.10
#' @param lfc_cutoff Cutoff on log fold change in the wilcoxon differential expression test. Default: 0.25.
#' @param geneset Indicate whether to consider all DE genes between condition 1 and 2 ("DE"), or only genes upregulated in condition 1 ("up"), or only genes downregulad in condition 1 ("down").
#' @param filter_top_ligands Indicate whether output tables for ligand-target and ligand-receptor networks should be done for a filtered set of top ligands (TRUE) or for all ligands (FALSE). Default: TRUE.
#' @param top_n_ligands Indicate how many ligands should be extracted as top-ligands after ligand activity analysis. Only for these ligands, target genes and receptors will be returned. Default: 30.
#' @param top_n_targets To predict active, affected targets of the prioritized ligands, consider only DE genes if they also belong to the a priori top n ("top_n_targets") targets of a ligand. Default = 200.
#' @param cutoff_visualization Because almost no ligand-target scores have a regulatory potential score of 0, we clarify the heatmap visualization by giving the links with the lowest scores a score of 0. The cutoff_visualization paramter indicates this fraction of links that are given a score of zero. Default = 0.33.
#' @param ligand_target_matrix The NicheNet ligand-target matrix of the organism of interest denoting regulatory potential scores between ligands and targets (ligands in columns).
#' @param lr_network The ligand-receptor network (columns that should be present: $from, $to) of the organism of interest.
#' @param weighted_networks The NicheNet weighted networks of the organism of interest denoting interactions and their weights/confidences in the ligand-signaling and gene regulatory network.
#' @param verbose Print out the current analysis stage. Default: TRUE.
#' @inheritParams get_expressed_genes
#'
#' @return A list with the following elements:
#' $ligand_activities: data frame with output ligand activity analysis;
#' $top_ligands: top_n ligands based on ligand activity;
#' $top_targets: active, affected target genes of these ligands;
#' $top_receptors: receptors of these ligands;
#' $ligand_target_matrix: matrix indicating regulatory potential scores between active ligands and their predicted targets;
#' $ligand_target_heatmap: heatmap of ligand-target regulatory potential;
#' $ligand_target_df: data frame showing regulatory potential scores of predicted active ligand-target network;
#' $ligand_activity_target_heatmap: heatmap showing both ligand activity scores and target genes of these top ligands;
#' $ligand_expression_dotplot: expression dotplot of the top ligands;
#' $ligand_receptor_matrix: matrix of ligand-receptor interactions;
#' $ligand_receptor_heatmap: heatmap showing ligand-receptor interactions;
#' $ligand_receptor_df: data frame of ligand-receptor interactions;
#' $geneset_oi: a vector containing the set of genes used as input for the ligand activity analysis;
#' $background_expressed_genes: the background of genes to which the geneset will be compared in the ligand activity analysis.
#'
#' @import Seurat
#' @import dplyr
#' @importFrom magrittr set_rownames set_colnames
#'
#' @examples
#' \dontrun{
#' seuratObj = readRDS(url("https://zenodo.org/record/3531889/files/seuratObj_test.rds"))
#' lr_network = readRDS(url("https://zenodo.org/record/7074291/files/lr_network_mouse_21122021.rds"))
#' ligand_target_matrix = readRDS(url("https://zenodo.org/record/7074291/files/ligand_target_matrix_nsga2r_final_mouse.rds"))
#' weighted_networks = readRDS(url("https://zenodo.org/record/7074291/files/weighted_networks_nsga2r_final_mouse.rds"))
#' nichenet_seuratobj_aggregate_cluster_de(seurat_obj = seuratObj, receiver_affected = "CD8 T", receiver_reference = "CD8 T", condition_colname = "aggregate", condition_oi = "LCMV", condition_reference = "SS", sender = "Mono", ligand_target_matrix = ligand_target_matrix, lr_network = lr_network, weighted_networks = weighted_networks)
#' }
#'
#' @export
#'
nichenet_seuratobj_aggregate_cluster_de = function(seurat_obj, receiver_affected, receiver_reference,
condition_colname, condition_oi, condition_reference, sender = "all",
ligand_target_matrix,lr_network,weighted_networks,
expression_pct = 0.10, lfc_cutoff = 0.25, geneset = "DE", filter_top_ligands = TRUE, top_n_ligands = 30,
top_n_targets = 200, cutoff_visualization = 0.33,
verbose = TRUE, assay_oi = NULL)
{
requireNamespace("Seurat")
requireNamespace("dplyr")
# input check
if(! "RNA" %in% names(seurat_obj@assays)){
if ("Spatial" %in% names(seurat_obj@assays)){
warning("You are going to apply NicheNet on a spatial seurat object. Be sure it's ok to use NicheNet the way you are planning to do it. So this means: you should have changes in gene expression in receiver cells caused by cell-cell interactions. Note that in the case of spatial transcriptomics, you are not dealing with single cells but with 'spots' containing multiple cells of the same of different cell types.")
if (class(seurat_obj@assays$Spatial@data) != "matrix" & class(seurat_obj@assays$Spatial@data) != "dgCMatrix") {
warning("Spatial Seurat object should contain a matrix of normalized expression data. Check 'seurat_obj@assays$Spatial@data' for default or 'seurat_obj@assays$SCT@data' for when the single-cell transform pipeline was applied")
}
if (sum(dim(seurat_obj@assays$Spatial@data)) == 0) {
stop("Seurat object should contain normalized expression data (numeric matrix). Check 'seurat_obj@assays$Spatial@data'")
}
}} else {
if (class(seurat_obj@assays$RNA@data) != "matrix" &
class(seurat_obj@assays$RNA@data) != "dgCMatrix") {
warning("Seurat object should contain a matrix of normalized expression data. Check 'seurat_obj@assays$RNA@data' for default or 'seurat_obj@assays$integrated@data' for integrated data or seurat_obj@assays$SCT@data for when the single-cell transform pipeline was applied")
}
if ("integrated" %in% names(seurat_obj@assays)) {
if (sum(dim(seurat_obj@assays$RNA@data)) == 0 & sum(dim(seurat_obj@assays$integrated@data)) ==
0)
stop("Seurat object should contain normalized expression data (numeric matrix). Check 'seurat_obj@assays$RNA@data' for default or 'seurat_obj@assays$integrated@data' for integrated data")
}
else if ("SCT" %in% names(seurat_obj@assays)) {
if (sum(dim(seurat_obj@assays$RNA@data)) == 0 & sum(dim(seurat_obj@assays$SCT@data)) ==
0) {
stop("Seurat object should contain normalized expression data (numeric matrix). Check 'seurat_obj@assays$RNA@data' for default or 'seurat_obj@assays$SCT@data' for data corrected via SCT")
}
}
else {
if (sum(dim(seurat_obj@assays$RNA@data)) == 0) {
stop("Seurat object should contain normalized expression data (numeric matrix). Check 'seurat_obj@assays$RNA@data'")
}
}
}
if(sum(receiver_affected %in% unique(Idents(seurat_obj))) != length(receiver_affected))
stop("The defined receiver_affected cell type should be an identity class of your seurat object")
if(sum(receiver_reference %in% unique(Idents(seurat_obj))) != length(receiver_reference))
stop("The defined receiver_reference cell type should be an identity class of your seurat object")
if(!condition_colname %in% colnames(seurat_obj@meta.data))
stop("Your column indicating the conditions/samples of interest should be in the metadata dataframe")
if(sum(condition_oi %in% c(seurat_obj[[condition_colname]] %>% unlist() %>% as.character() %>% unique())) != length(condition_oi))
stop("condition_oi should be in the condition-indicating column")
if(sum(condition_reference %in% c(seurat_obj[[condition_colname]] %>% unlist() %>% as.character() %>% unique())) != length(condition_reference))
stop("condition_reference should be in the condition-indicating column")
if(length(sender) == 1){
if(sender != "all" & sender != "undefined"){
if(sum(sender %in% unique(Idents(seurat_obj))) != length(sender)){
stop("The sender argument should be 'all' or 'undefined' or an identity class of your seurat object")
}
}
} else {
if(sum(sender %in% unique(Idents(seurat_obj))) != length(sender)){
stop("The sender argument should be 'all' or 'undefined' or an identity class of your seurat object")
}
}
if(geneset != "DE" & geneset != "up" & geneset != "down")
stop("geneset should be 'DE', 'up' or 'down'")
if("integrated" %in% names(seurat_obj@assays)){
warning("Seurat object is result from the Seurat integration workflow. Make sure that the way of defining expressed and differentially expressed genes in this wrapper is appropriate for your integrated data.")
}
# Read in and process NicheNet networks, define ligands and receptors
if (verbose == TRUE){print("Read in and process NicheNet's networks")}
weighted_networks_lr = weighted_networks$lr_sig %>% inner_join(lr_network %>% distinct(from,to), by = c("from","to"))
ligands = lr_network %>% pull(from) %>% unique()
receptors = lr_network %>% pull(to) %>% unique()
if (verbose == TRUE){print("Define expressed ligands and receptors in receiver and sender cells")}
# step1 nichenet analysis: get expressed genes in sender and receiver cells
## receiver
# expressed genes: only in steady state population (for determining receptors)
list_expressed_genes_receiver_ss = c(receiver_reference) %>% unique() %>% lapply(get_expressed_genes, seurat_obj, expression_pct, assay_oi)
names(list_expressed_genes_receiver_ss) = c(receiver_reference) %>% unique()
expressed_genes_receiver_ss = list_expressed_genes_receiver_ss %>% unlist() %>% unique()
# expressed genes: both in steady state and affected population (for determining background of expressed genes)
list_expressed_genes_receiver = c(receiver_reference,receiver_affected) %>% unique() %>% lapply(get_expressed_genes, seurat_obj, expression_pct, assay_oi)
names(list_expressed_genes_receiver) = c(receiver_reference,receiver_affected) %>% unique()
expressed_genes_receiver = list_expressed_genes_receiver %>% unlist() %>% unique()
## sender
if (length(sender) == 1){
if (sender == "all"){
sender_celltypes = Idents(seurat_obj) %>% levels()
list_expressed_genes_sender = sender_celltypes %>% lapply(get_expressed_genes, seurat_obj, expression_pct, assay_oi)
names(list_expressed_genes_sender) = sender_celltypes
expressed_genes_sender = list_expressed_genes_sender %>% unlist() %>% unique()
} else if (sender == "undefined") {
if("integrated" %in% names(seurat_obj@assays)){
expressed_genes_sender = union(seurat_obj@assays$integrated@data %>% rownames(),rownames(ligand_target_matrix)) %>% union(colnames(ligand_target_matrix))
} else {
expressed_genes_sender = union(seurat_obj@assays$RNA@data %>% rownames(),rownames(ligand_target_matrix)) %>% union(colnames(ligand_target_matrix))
}
} else if (sender != "all" & sender != "undefined") {
sender_celltypes = sender
list_expressed_genes_sender = sender_celltypes %>% unique() %>% lapply(get_expressed_genes, seurat_obj, expression_pct, assay_oi)
names(list_expressed_genes_sender) = sender_celltypes %>% unique()
expressed_genes_sender = list_expressed_genes_sender %>% unlist() %>% unique()
}
} else {
sender_celltypes = sender
list_expressed_genes_sender = sender_celltypes %>% unique() %>% lapply(get_expressed_genes, seurat_obj, expression_pct, assay_oi)
names(list_expressed_genes_sender) = sender_celltypes %>% unique()
expressed_genes_sender = list_expressed_genes_sender %>% unlist() %>% unique()
}
# step2 nichenet analysis: define background and gene list of interest: here differential expression between two conditions of cell type of interest
if (verbose == TRUE){print("Perform DE analysis between two receiver cell clusters")}
seurat_obj_receiver_affected= subset(seurat_obj, idents = receiver_affected)
seurat_obj_receiver_affected = SetIdent(seurat_obj_receiver_affected, value = seurat_obj_receiver_affected[[condition_colname]])
seurat_obj_receiver_affected= subset(seurat_obj_receiver_affected, idents = condition_oi)
seurat_obj_receiver_reference= subset(seurat_obj, idents = receiver_reference)
seurat_obj_receiver_reference = SetIdent(seurat_obj_receiver_reference, value = seurat_obj_receiver_reference[[condition_colname]])
seurat_obj_receiver_reference= subset(seurat_obj_receiver_reference, idents = condition_reference)
seurat_obj_receiver = merge(seurat_obj_receiver_affected, seurat_obj_receiver_reference)
DE_table_receiver = FindMarkers(object = seurat_obj_receiver, ident.1 = condition_oi, ident.2 = condition_reference, min.pct = expression_pct) %>% rownames_to_column("gene")
SeuratV4 = c("avg_log2FC") %in% colnames(DE_table_receiver)
if(SeuratV4 == TRUE){
if (geneset == "DE"){
geneset_oi = DE_table_receiver %>% filter(p_val_adj <= 0.05 & abs(avg_log2FC) >= lfc_cutoff) %>% pull(gene)
} else if (geneset == "up") {
geneset_oi = DE_table_receiver %>% filter(p_val_adj <= 0.05 & avg_log2FC >= lfc_cutoff) %>% pull(gene)
} else if (geneset == "down") {
geneset_oi = DE_table_receiver %>% filter(p_val_adj <= 0.05 & avg_log2FC <= lfc_cutoff) %>% pull(gene)
}
} else {
if (geneset == "DE"){
geneset_oi = DE_table_receiver %>% filter(p_val_adj <= 0.05 & abs(avg_logFC) >= lfc_cutoff) %>% pull(gene)
} else if (geneset == "up") {
geneset_oi = DE_table_receiver %>% filter(p_val_adj <= 0.05 & avg_logFC >= lfc_cutoff) %>% pull(gene)
} else if (geneset == "down") {
geneset_oi = DE_table_receiver %>% filter(p_val_adj <= 0.05 & avg_logFC <= lfc_cutoff) %>% pull(gene)
}
}
geneset_oi = geneset_oi %>% .[. %in% rownames(ligand_target_matrix)]
if (length(geneset_oi) == 0){
stop("No genes were differentially expressed")
}
background_expressed_genes = expressed_genes_receiver %>% .[. %in% rownames(ligand_target_matrix)]
# step3 nichenet analysis: define potential ligands
expressed_ligands = intersect(ligands,expressed_genes_sender)
expressed_receptors = intersect(receptors,expressed_genes_receiver)
if (length(expressed_ligands) == 0){
stop("No ligands expressed in sender cell")
}
if (length(expressed_receptors) == 0){
stop("No receptors expressed in receiver cell")
}
potential_ligands = lr_network %>% filter(from %in% expressed_ligands & to %in% expressed_receptors) %>% pull(from) %>% unique()
if (length(potential_ligands) == 0){
stop("No potentially active ligands")
}
if (verbose == TRUE){print("Perform NicheNet ligand activity analysis")}
# step4 perform NicheNet's ligand activity analysis
ligand_activities = predict_ligand_activities(geneset = geneset_oi, background_expressed_genes = background_expressed_genes, ligand_target_matrix = ligand_target_matrix, potential_ligands = potential_ligands)
ligand_activities = ligand_activities %>%
arrange(-aupr_corrected) %>%
mutate(rank = rank(desc(aupr_corrected)))
if(filter_top_ligands == TRUE){
best_upstream_ligands = ligand_activities %>% top_n(top_n_ligands, aupr_corrected) %>% arrange(-aupr_corrected) %>% pull(test_ligand) %>% unique()
} else {
best_upstream_ligands = ligand_activities %>% arrange(-aupr_corrected) %>% pull(test_ligand) %>% unique()
}
if (verbose == TRUE){print("Infer active target genes of the prioritized ligands")}
# step5 infer target genes of the top-ranked ligands
active_ligand_target_links_df = best_upstream_ligands %>% lapply(get_weighted_ligand_target_links,geneset = geneset_oi, ligand_target_matrix = ligand_target_matrix, n = top_n_targets) %>% bind_rows() %>% drop_na()
if(nrow(active_ligand_target_links_df) > 0){
active_ligand_target_links = prepare_ligand_target_visualization(ligand_target_df = active_ligand_target_links_df, ligand_target_matrix = ligand_target_matrix, cutoff = cutoff_visualization)
order_ligands = intersect(best_upstream_ligands, colnames(active_ligand_target_links)) %>% rev() %>% make.names()
order_targets = active_ligand_target_links_df$target %>% unique() %>% intersect(rownames(active_ligand_target_links)) %>% make.names()
rownames(active_ligand_target_links) = rownames(active_ligand_target_links) %>% make.names()
colnames(active_ligand_target_links) = colnames(active_ligand_target_links) %>% make.names()
order_targets = order_targets %>% intersect(rownames(active_ligand_target_links))
order_ligands = order_ligands %>% intersect(colnames(active_ligand_target_links))
vis_ligand_target = active_ligand_target_links[order_targets,order_ligands] %>% t()
p_ligand_target_network = vis_ligand_target %>% make_heatmap_ggplot("Prioritized ligands","Predicted target genes", color = "purple",legend_position = "top", x_axis_position = "top",legend_title = "Regulatory potential") + theme(axis.text.x = element_text(face = "italic")) #+ scale_fill_gradient2(low = "whitesmoke", high = "purple", breaks = c(0,0.006,0.012))
} else {
vis_ligand_target = NULL
p_ligand_target_network = NULL
print("no highly likely active targets found for top ligands")
}
# combined heatmap: overlay ligand activities
ligand_aupr_matrix = ligand_activities %>% select(aupr_corrected) %>% as.matrix() %>% magrittr::set_rownames(ligand_activities$test_ligand)
rownames(ligand_aupr_matrix) = rownames(ligand_aupr_matrix) %>% make.names()
colnames(ligand_aupr_matrix) = colnames(ligand_aupr_matrix) %>% make.names()
vis_ligand_aupr = ligand_aupr_matrix[order_ligands, ] %>% as.matrix(ncol = 1) %>% magrittr::set_colnames("AUPR")
p_ligand_aupr = vis_ligand_aupr %>% make_heatmap_ggplot("Prioritized ligands","Ligand activity", color = "darkorange",legend_position = "top", x_axis_position = "top", legend_title = "AUPR\n(target gene prediction ability)") + theme(legend.text = element_text(size = 9))
p_ligand_aupr
figures_without_legend = cowplot::plot_grid(
p_ligand_aupr + theme(legend.position = "none", axis.ticks = element_blank()) + theme(axis.title.x = element_text()),
p_ligand_target_network + theme(legend.position = "none", axis.ticks = element_blank()) + ylab(""),
align = "hv",
nrow = 1,
rel_widths = c(ncol(vis_ligand_aupr)+10, ncol(vis_ligand_target)))
legends = cowplot::plot_grid(
ggpubr::as_ggplot(ggpubr::get_legend(p_ligand_aupr)),
ggpubr::as_ggplot(ggpubr::get_legend(p_ligand_target_network)),
nrow = 1,
align = "h")
combined_plot = cowplot::plot_grid(figures_without_legend,
legends,
rel_heights = c(10,2), nrow = 2, align = "hv")
# ligand-receptor plot
# get the ligand-receptor network of the top-ranked ligands
if (verbose == TRUE){print("Infer receptors of the prioritized ligands")}
lr_network_top = lr_network %>% filter(from %in% best_upstream_ligands & to %in% expressed_receptors) %>% distinct(from,to)
best_upstream_receptors = lr_network_top %>% pull(to) %>% unique()
lr_network_top_df_large = weighted_networks_lr %>% filter(from %in% best_upstream_ligands & to %in% best_upstream_receptors)
lr_network_top_df = lr_network_top_df_large %>% spread("from","weight",fill = 0)
lr_network_top_matrix = lr_network_top_df %>% select(-to) %>% as.matrix() %>% magrittr::set_rownames(lr_network_top_df$to)
if (nrow(lr_network_top_matrix) > 1){
dist_receptors = dist(lr_network_top_matrix, method = "binary")
hclust_receptors = hclust(dist_receptors, method = "ward.D2")
order_receptors = hclust_receptors$labels[hclust_receptors$order]
} else {
order_receptors = rownames(lr_network_top_matrix)
}
if (ncol(lr_network_top_matrix) > 1) {
dist_ligands = dist(lr_network_top_matrix %>% t(), method = "binary")
hclust_ligands = hclust(dist_ligands, method = "ward.D2")
order_ligands_receptor = hclust_ligands$labels[hclust_ligands$order]
} else {
order_ligands_receptor = colnames(lr_network_top_matrix)
}
order_receptors = order_receptors %>% intersect(rownames(lr_network_top_matrix))
order_ligands_receptor = order_ligands_receptor %>% intersect(colnames(lr_network_top_matrix))
vis_ligand_receptor_network = lr_network_top_matrix[order_receptors, order_ligands_receptor]
dim(vis_ligand_receptor_network) = c(length(order_receptors), length(order_ligands_receptor))
rownames(vis_ligand_receptor_network) = order_receptors %>% make.names()
colnames(vis_ligand_receptor_network) = order_ligands_receptor %>% make.names()
p_ligand_receptor_network = vis_ligand_receptor_network %>% t() %>% make_heatmap_ggplot("Ligands","Receptors", color = "mediumvioletred", x_axis_position = "top",legend_title = "Prior interaction potential")
# ligand expression Seurat dotplot
if (length(sender) > 1){
are_there_senders = TRUE
}
if(length(sender) == 1){
if(sender != "undefined"){
are_there_senders = TRUE
} else {
are_there_senders = FALSE
}
}
if (are_there_senders == TRUE){
real_makenames_conversion = lr_network$from %>% unique() %>% magrittr::set_names(lr_network$from %>% unique() %>% make.names())
order_ligands_adapted = real_makenames_conversion[order_ligands]
names(order_ligands_adapted) = NULL
rotated_dotplot = DotPlot(seurat_obj %>% subset(idents = sender_celltypes), features = order_ligands_adapted, cols = "RdYlBu") + coord_flip() + theme(legend.text = element_text(size = 10), legend.title = element_text(size = 12)) # flip of coordinates necessary because we want to show ligands in the rows when combining all plots
} else {
rotated_dotplot = NULL
}
return(list(
ligand_activities = ligand_activities,
top_ligands = best_upstream_ligands,
top_targets = active_ligand_target_links_df$target %>% unique(),
top_receptors = lr_network_top_df_large$to %>% unique(),
ligand_target_matrix = vis_ligand_target,
ligand_target_heatmap = p_ligand_target_network,
ligand_target_df = active_ligand_target_links_df,
ligand_expression_dotplot = rotated_dotplot,
ligand_activity_target_heatmap = combined_plot,
ligand_receptor_matrix = vis_ligand_receptor_network,
ligand_receptor_heatmap = p_ligand_receptor_network,
ligand_receptor_df = lr_network_top_df_large %>% rename(ligand = from, receptor = to),
geneset_oi = geneset_oi,
background_expressed_genes = background_expressed_genes
))
}
#' @title Get log fold change values of genes in cell type of interest
#'
#' @description \code{get_lfc_celltype} Get log fold change of genes between two conditions in cell type of interest when using a Seurat single-cell object.
#'
#' @usage
#' get_lfc_celltype(celltype_oi, seurat_obj, condition_colname, condition_oi, condition_reference, celltype_col = "celltype", expression_pct = 0.10)
#' #'
#' @param seurat_obj Single-cell expression dataset as Seurat object https://satijalab.org/seurat/.
#' @param celltype_oi Name of celltype of interest. Should be present in the celltype metadata dataframe.
#' @param condition_colname Name of the column in the meta data dataframe that indicates which condition/sample cells were coming from.
#' @param condition_oi Condition of interest. Should be a name present in the "condition_colname" column of the metadata.
#' @param condition_reference The second condition (e.g. reference or steady-state condition). Should be a name present in the "condition_colname" column of the metadata.
#' @param celltype_col Metadata colum name where the cell type identifier is stored. Default: "celltype". If this is NULL, the Idents() of the seurat object will be considered as your cell type identifier.
#' @param expression_pct To consider only genes if they are expressed in at least a specific fraction of cells of a cluster. This number indicates this fraction. Default: 0.10
#'
#' @return A tbl with the log fold change values of genes. Positive lfc values: higher in condition_oi compared to condition_reference.
#'
#' @import Seurat
#' @import dplyr
#'
#' @examples
#' \dontrun{
#' requireNamespace("dplyr")
#' seuratObj = readRDS(url("https://zenodo.org/record/3531889/files/seuratObj_test.rds"))
#' get_lfc_celltype(seurat_obj = seuratObj, celltype_oi = "CD8 T", condition_colname = "aggregate", condition_oi = "LCMV", condition_reference = "SS", celltype_col = "celltype", expression_pct = 0.10)
#' }
#' @export
#'
get_lfc_celltype = function(celltype_oi, seurat_obj, condition_colname, condition_oi, condition_reference, celltype_col = "celltype", expression_pct = 0.10){
requireNamespace("Seurat")
requireNamespace("dplyr")
if(!is.null(celltype_col)){
seurat_obj_celltype = SetIdent(seurat_obj, value = seurat_obj[[celltype_col]])
seuratObj_sender = subset(seurat_obj_celltype, idents = celltype_oi)
} else {
seuratObj_sender = subset(seurat_obj, idents = celltype_oi)
}
seuratObj_sender = SetIdent(seuratObj_sender, value = seuratObj_sender[[condition_colname]])
DE_table_sender = FindMarkers(object = seuratObj_sender, ident.1 = condition_oi, ident.2 = condition_reference, min.pct = expression_pct, logfc.threshold = 0.05) %>% rownames_to_column("gene")
SeuratV4 = c("avg_log2FC") %in% colnames(DE_table_sender)
if(SeuratV4 == TRUE){
DE_table_sender = DE_table_sender %>% as_tibble() %>% select(-p_val) %>% select(gene, avg_log2FC)
} else {
DE_table_sender = DE_table_sender %>% as_tibble() %>% select(-p_val) %>% select(gene, avg_logFC)
}
colnames(DE_table_sender) = c("gene",celltype_oi)
return(DE_table_sender)
}
|
#on charge le package Mass
library(MASS);
# z contient les données extremums de loi géomètrique
z<-dataZ041018[1:10000,1];
# on utilise la fonction fitdistr pour une loi Weibull et une loi lognormal
paraw <- fitdistr(z,densfun="weibull");
logLik(paraw) # on peut avoir le loglikelihood
paral <- fitdistr(z, densfun="log-normal");
logLik(paral) # on peut avoir le loglikelihood
# on visualise les résultats sur un graphique : histogramme+ loi
# histogramme
hist(z,
freq = F,
breaks = 100,
col = "green",
xlab = "Extremum",
ylab = "Densité",
main = "Loi d'extremum, cas géométrique p=0.1"
);# penser à freq=FALSE!!
lines(dweibull(0:max(z),shape=paraw$estimate[1],scale=paraw$estimate[2]),type='l',col='blue',lwd=2);
lines(dlnorm(0:max(z),meanlog = paral$estimate[1],sdlog=paral$estimate[2]),type='l',col='red',lwd=2);
legend(80, 0.04, legend=c("Loi de Weibull estimée","Loi Log-normale estimée"),col=c("blue", "red"), lty=1, cex=0.8);
|
/src/geomExtrem.R
|
no_license
|
perfectstrong/OS13-Devoir2
|
R
| false
| false
| 983
|
r
|
#on charge le package Mass
library(MASS);
# z contient les données extremums de loi géomètrique
z<-dataZ041018[1:10000,1];
# on utilise la fonction fitdistr pour une loi Weibull et une loi lognormal
paraw <- fitdistr(z,densfun="weibull");
logLik(paraw) # on peut avoir le loglikelihood
paral <- fitdistr(z, densfun="log-normal");
logLik(paral) # on peut avoir le loglikelihood
# on visualise les résultats sur un graphique : histogramme+ loi
# histogramme
hist(z,
freq = F,
breaks = 100,
col = "green",
xlab = "Extremum",
ylab = "Densité",
main = "Loi d'extremum, cas géométrique p=0.1"
);# penser à freq=FALSE!!
lines(dweibull(0:max(z),shape=paraw$estimate[1],scale=paraw$estimate[2]),type='l',col='blue',lwd=2);
lines(dlnorm(0:max(z),meanlog = paral$estimate[1],sdlog=paral$estimate[2]),type='l',col='red',lwd=2);
legend(80, 0.04, legend=c("Loi de Weibull estimée","Loi Log-normale estimée"),col=c("blue", "red"), lty=1, cex=0.8);
|
#######################################
# МЕХАНИКА КОРРЕСПОНДЕНТНОГО АНАЛИЗА #
#######################################
#Вадим Хайтов, Марина Варфоломеева
# Проблемы PCA
library(readxl)
birds <- read_excel(path = "data/macnally.xlsx")
# имена переводим в нижний регистр
colnames(birds) <- tolower(colnames(birds))
# Проведите анализ главных компонент и визуализируйте его результаты для данных приведенных в датасете macnally.xlsx
#Код для анализа главных компонент
library(vegan)
bird_pca <- rda(birds[,-c(1:2)], scale = T)
screeplot(bird_pca, bstick = T)
# Код для вывода информации об информативности PC
summary(bird_pca)
plot(bird_pca, display = "sites")
# Код для построения биплота
biplot(bird_pca, scaling = "sites")
biplot(bird_pca, scaling = "species")
biplot(bird_pca, scaling = "species", display = "species")
# Механика Корреспондентного анализа
library(vegan)
data(mite)
data(mite.env)
data(mite.xy)
head(mite[ , 1:6], 2)
str(mite)
str(mite.xy)
str(mite.env)
mite_pca <- rda(mite, scaling = TRUE)
screeplot(mite_pca, bstick = T)
biplot(mite_pca, scaling = "sites", type = "t")
mite$LCIL
plot(mite_pca, display = "sites", type = "t")
mite_mds <- metaMDS(mite)
plot(mite_mds, display = "site")
mite_ca <- cca(mite)
# biplot(mite_ca)
screeplot(mite_ca, bstick = T)
plot(mite_ca)
#### Матрицы сопряженности #####
peas <- matrix(c(99, 42, 29, 13), byrow = T, ncol = 2)
Ft <- sum(peas)
f_i <- apply(peas, MARGIN = 1, FUN = sum)
f_j <- apply(peas, 2, FUN = sum)
p_i <- f_i / Ft #Вектор вероятностей для формы
p_j <- f_j / Ft #Вектор вероятностей для цвета
q <- p_i %*% t(p_j)
E <- q * Ft
O <- peas
sum((O-E)^2/E)
chisq.test(x = O, p = q, correct = F)
### CA Вручную ##############
Ft <- sum(mite)
f_ij <- mite #Частота встречи данного вида в данной пробе, то есть это первичные даные!
p_ij <- mite/Ft #вероятность встречи данного вида в данной пробе
Ft <- sum(mite) #Общее количество найденных животных
f_i <- apply(mite, MARGIN = 1, FUN = sum) #Общее количество особей в каждой пробе
p_i <- f_i/Ft #Вектор вероятностей встретить какую-либо особь в данной пробе
f_j <- apply(mite, MARGIN = 2, FUN = sum) #Общее количество особей в каждом виде
p_j <- f_j/Ft #Вектор вероятностей встретить особь данного вида
q <- p_i %*% t(p_j) #вероятность встретить особь в данной пробе.
E <- (p_i %*% t(p_j) * Ft)
O <- mite
Chi2 <- sum((O-E)^2/E)
Inertia <- Chi2/Ft
mite_cca <- cca(mite)
summary(mite_cca)
Q1 <- (p_ij - p_i %*% t(p_j))/sqrt(p_i %*% t(p_j))
#Та же матрица, вычисленная через частоты
Q <- (f_ij*Ft - f_i %*% t(f_j))/(Ft*sqrt(f_i %*% t(f_j)))
sum(Q^2)
Q <- as.matrix(Q)
sum(eigenvals(mite_cca))
bstick(mite_cca)
chisq.test(mite/sum(mite))
str(mite_cca)
mite_cca$rowsum #Это сумма по строкам, деленная на общее количестов чисел во сей таблице. Это вес строк.
apply(mite, 1, FUN = sum)/sum(mite)
mite_cca$colsum #Это сумма по столбцам, деленная на общее количестов чисел во сей таблице. Это вес колонок.
apply(mite, 2, FUN = sum)/sum(mite)
# Вычисляем матрицу Q
p_ij <- mite/sum(mite)
p_i <- apply(mite, 1, FUN = sum)/sum(mite) #Маргинальная сумма по строкам
p_j <- apply(mite, 2, FUN = sum)/sum(mite) #Маргинальная сумма по столбцам
Q <- (p_ij - p_i %*% t(p_j))/sqrt(p_i %*% t(p_j))
f_ij <- mite #Частота встречи данного вида в данной пробе, то есть это первичные даные!
p_ij <- mite/Ft #вероятность встречи данного вида в данной пробе
q <- p_i %*% t(p_j) #вероятность встретить особь в данной пробе.
Ft <- sum(mite) #Общее количество найденных животных
f_i <- apply(mite, MARGIN = 1, FUN = sum) #Общее количество особей в каждой пробе
p_i <- f_i/Ft #Вектор вероятностей встретить какую-либо особь в данной пробе
f_j <- apply(mite, MARGIN = 2, FUN = sum) #Общее количество особей в каждом виде
p_j <- f_j/Ft #Вектор вероятностей встретить особь данного вида
Q <- (p_ij - p_i %*% t(p_j))/sqrt(p_i %*% t(p_j))
summary(mite_ca)
sum(Q^2)
#Или все то же самое в абсолютных значениях
f_ij <- mite
f <- sum(mite)
f_i <- apply(mite, 1, FUN = sum)
length(f_i)
f_j <- apply(mite, 2, FUN = sum)
length(f_j)
# Ожидаемые частоты для нулевой модели, то есть при условии, что все станции и все виды независимы
E <- f_i %*% t(f_j) / f
Q <- (f_ij*f - f_i %*% t(f_j))/(f*sqrt(f_i %*% t(f_j)))
class(Q)
Q <- as.matrix(Q)
Inertia <- sum(Q^2)
# Все то же самое в терминах наблюдаемые (O) и ожидаемые (E) частоты
O <- mite
sum(((O - E)/sqrt(E) / sum(O))^2 * f) #та же самая инерция
### Сингулярное разложеине матрицы сопряженности ######
U <- svd(Q)$u
D <- svd(Q)$d
V <- svd(Q)$v
dim(U)
dim(V)
round(D, 2)
str(D)
Qsvd <- U %*% diag(D) %*% t(V) #матрица "восстановленная" из "вспомогательных" матриц
round(sum(Q - Qsvd)) #разность между исходной и "восстановленной" матрицами
# Связь SVD и собственных значений
D <- diag(D)
dim(D)
round(t(Q) %*% Q - V %*% t(D) %*% D %*% t(V))
A <- t(Q) %*% Q
eig_values <- eigen(A)$values #Собственные числа матрицы A
eig_vectors <- eigen(A)$vectors #Матрица собственных векторов для матрицы A
plot(eig_values, diag(D))
round(eig_values, 4)
eigen(t(Q) %*% Q)$values #Собственные значения для матрицы ковариации Q'Q
svd((t(Q) %*% Q))$d #Это те же собственные значения.
diag(D)^2 #Квадраты сингулярных чисел
plot(eigen(t(Q) %*% Q)$values, diag(D))
sum(eig_values)
Information <- data.frame(
CA = 1:length(eig_values),
Eigenval =round(eig_values, 5),
Prop_Explained = round(eig_values/sum(eig_values), 5),
Cumul_Prop=round(cumsum(eig_values/sum(eig_values)),5)
)
CA_samples <- diag(p_i^(-1/2))%*% U[,1:2]
library(ggplot2)
Pl_CA_st <-
ggplot(as.data.frame(CA_samples), aes(x=V1, y=V2) ) +
geom_text(label = rownames(mite)) +
geom_hline(yintercept=0, linetype = 2) +
geom_vline(xintercept = 0, linetype = 2) +
theme_bw() +
labs(x= "CA1", y = "CA2")
Pl_CA_st
CA_species <- diag(p_j^(-1/2))%*% V[,1:2]
Pl_CA_sp <-
ggplot(as.data.frame( CA_species), aes(x = V1, y = V2) ) +
geom_hline(yintercept=0, linetype = 2) +
geom_vline(xintercept = 0, linetype = 2) +
theme_bw() +
labs(x= "CA1", y = "CA2") +
geom_text(label = names(mite))
Pl_CA_sp
# Задание: Проведите корреспондентный анализ данных по птицам Австралии, используя "ручной" метод обработки
|
/09_CA_calculation.R
|
no_license
|
varmara/multivar
|
R
| false
| false
| 8,167
|
r
|
#######################################
# МЕХАНИКА КОРРЕСПОНДЕНТНОГО АНАЛИЗА #
#######################################
#Вадим Хайтов, Марина Варфоломеева
# Проблемы PCA
library(readxl)
birds <- read_excel(path = "data/macnally.xlsx")
# имена переводим в нижний регистр
colnames(birds) <- tolower(colnames(birds))
# Проведите анализ главных компонент и визуализируйте его результаты для данных приведенных в датасете macnally.xlsx
#Код для анализа главных компонент
library(vegan)
bird_pca <- rda(birds[,-c(1:2)], scale = T)
screeplot(bird_pca, bstick = T)
# Код для вывода информации об информативности PC
summary(bird_pca)
plot(bird_pca, display = "sites")
# Код для построения биплота
biplot(bird_pca, scaling = "sites")
biplot(bird_pca, scaling = "species")
biplot(bird_pca, scaling = "species", display = "species")
# Механика Корреспондентного анализа
library(vegan)
data(mite)
data(mite.env)
data(mite.xy)
head(mite[ , 1:6], 2)
str(mite)
str(mite.xy)
str(mite.env)
mite_pca <- rda(mite, scaling = TRUE)
screeplot(mite_pca, bstick = T)
biplot(mite_pca, scaling = "sites", type = "t")
mite$LCIL
plot(mite_pca, display = "sites", type = "t")
mite_mds <- metaMDS(mite)
plot(mite_mds, display = "site")
mite_ca <- cca(mite)
# biplot(mite_ca)
screeplot(mite_ca, bstick = T)
plot(mite_ca)
#### Матрицы сопряженности #####
peas <- matrix(c(99, 42, 29, 13), byrow = T, ncol = 2)
Ft <- sum(peas)
f_i <- apply(peas, MARGIN = 1, FUN = sum)
f_j <- apply(peas, 2, FUN = sum)
p_i <- f_i / Ft #Вектор вероятностей для формы
p_j <- f_j / Ft #Вектор вероятностей для цвета
q <- p_i %*% t(p_j)
E <- q * Ft
O <- peas
sum((O-E)^2/E)
chisq.test(x = O, p = q, correct = F)
### CA Вручную ##############
Ft <- sum(mite)
f_ij <- mite #Частота встречи данного вида в данной пробе, то есть это первичные даные!
p_ij <- mite/Ft #вероятность встречи данного вида в данной пробе
Ft <- sum(mite) #Общее количество найденных животных
f_i <- apply(mite, MARGIN = 1, FUN = sum) #Общее количество особей в каждой пробе
p_i <- f_i/Ft #Вектор вероятностей встретить какую-либо особь в данной пробе
f_j <- apply(mite, MARGIN = 2, FUN = sum) #Общее количество особей в каждом виде
p_j <- f_j/Ft #Вектор вероятностей встретить особь данного вида
q <- p_i %*% t(p_j) #вероятность встретить особь в данной пробе.
E <- (p_i %*% t(p_j) * Ft)
O <- mite
Chi2 <- sum((O-E)^2/E)
Inertia <- Chi2/Ft
mite_cca <- cca(mite)
summary(mite_cca)
Q1 <- (p_ij - p_i %*% t(p_j))/sqrt(p_i %*% t(p_j))
#Та же матрица, вычисленная через частоты
Q <- (f_ij*Ft - f_i %*% t(f_j))/(Ft*sqrt(f_i %*% t(f_j)))
sum(Q^2)
Q <- as.matrix(Q)
sum(eigenvals(mite_cca))
bstick(mite_cca)
chisq.test(mite/sum(mite))
str(mite_cca)
mite_cca$rowsum #Это сумма по строкам, деленная на общее количестов чисел во сей таблице. Это вес строк.
apply(mite, 1, FUN = sum)/sum(mite)
mite_cca$colsum #Это сумма по столбцам, деленная на общее количестов чисел во сей таблице. Это вес колонок.
apply(mite, 2, FUN = sum)/sum(mite)
# Вычисляем матрицу Q
p_ij <- mite/sum(mite)
p_i <- apply(mite, 1, FUN = sum)/sum(mite) #Маргинальная сумма по строкам
p_j <- apply(mite, 2, FUN = sum)/sum(mite) #Маргинальная сумма по столбцам
Q <- (p_ij - p_i %*% t(p_j))/sqrt(p_i %*% t(p_j))
f_ij <- mite #Частота встречи данного вида в данной пробе, то есть это первичные даные!
p_ij <- mite/Ft #вероятность встречи данного вида в данной пробе
q <- p_i %*% t(p_j) #вероятность встретить особь в данной пробе.
Ft <- sum(mite) #Общее количество найденных животных
f_i <- apply(mite, MARGIN = 1, FUN = sum) #Общее количество особей в каждой пробе
p_i <- f_i/Ft #Вектор вероятностей встретить какую-либо особь в данной пробе
f_j <- apply(mite, MARGIN = 2, FUN = sum) #Общее количество особей в каждом виде
p_j <- f_j/Ft #Вектор вероятностей встретить особь данного вида
Q <- (p_ij - p_i %*% t(p_j))/sqrt(p_i %*% t(p_j))
summary(mite_ca)
sum(Q^2)
#Или все то же самое в абсолютных значениях
f_ij <- mite
f <- sum(mite)
f_i <- apply(mite, 1, FUN = sum)
length(f_i)
f_j <- apply(mite, 2, FUN = sum)
length(f_j)
# Ожидаемые частоты для нулевой модели, то есть при условии, что все станции и все виды независимы
E <- f_i %*% t(f_j) / f
Q <- (f_ij*f - f_i %*% t(f_j))/(f*sqrt(f_i %*% t(f_j)))
class(Q)
Q <- as.matrix(Q)
Inertia <- sum(Q^2)
# Все то же самое в терминах наблюдаемые (O) и ожидаемые (E) частоты
O <- mite
sum(((O - E)/sqrt(E) / sum(O))^2 * f) #та же самая инерция
### Сингулярное разложеине матрицы сопряженности ######
U <- svd(Q)$u
D <- svd(Q)$d
V <- svd(Q)$v
dim(U)
dim(V)
round(D, 2)
str(D)
Qsvd <- U %*% diag(D) %*% t(V) #матрица "восстановленная" из "вспомогательных" матриц
round(sum(Q - Qsvd)) #разность между исходной и "восстановленной" матрицами
# Связь SVD и собственных значений
D <- diag(D)
dim(D)
round(t(Q) %*% Q - V %*% t(D) %*% D %*% t(V))
A <- t(Q) %*% Q
eig_values <- eigen(A)$values #Собственные числа матрицы A
eig_vectors <- eigen(A)$vectors #Матрица собственных векторов для матрицы A
plot(eig_values, diag(D))
round(eig_values, 4)
eigen(t(Q) %*% Q)$values #Собственные значения для матрицы ковариации Q'Q
svd((t(Q) %*% Q))$d #Это те же собственные значения.
diag(D)^2 #Квадраты сингулярных чисел
plot(eigen(t(Q) %*% Q)$values, diag(D))
sum(eig_values)
Information <- data.frame(
CA = 1:length(eig_values),
Eigenval =round(eig_values, 5),
Prop_Explained = round(eig_values/sum(eig_values), 5),
Cumul_Prop=round(cumsum(eig_values/sum(eig_values)),5)
)
CA_samples <- diag(p_i^(-1/2))%*% U[,1:2]
library(ggplot2)
Pl_CA_st <-
ggplot(as.data.frame(CA_samples), aes(x=V1, y=V2) ) +
geom_text(label = rownames(mite)) +
geom_hline(yintercept=0, linetype = 2) +
geom_vline(xintercept = 0, linetype = 2) +
theme_bw() +
labs(x= "CA1", y = "CA2")
Pl_CA_st
CA_species <- diag(p_j^(-1/2))%*% V[,1:2]
Pl_CA_sp <-
ggplot(as.data.frame( CA_species), aes(x = V1, y = V2) ) +
geom_hline(yintercept=0, linetype = 2) +
geom_vline(xintercept = 0, linetype = 2) +
theme_bw() +
labs(x= "CA1", y = "CA2") +
geom_text(label = names(mite))
Pl_CA_sp
# Задание: Проведите корреспондентный анализ данных по птицам Австралии, используя "ручной" метод обработки
|
\name{rating.scale.name<-}
\docType{methods}
\alias{rating.scale.name<-}
\alias{set.RISK.NAME<-}
\alias{set.RISK.NAME<-,crp.CSFP,character-method}
\alias{rating.scale.name<--methods}
\alias{rating.scale.name<-,crp.CSFP,character-method}
\title{Set the name for the file containing the rating scale}
\description{
The method changes the models value of \code{rating.scale.name}}
\keyword{methods}
|
/man/rating.scale.name_--methods.Rd
|
no_license
|
cran/crp.CSFP
|
R
| false
| false
| 407
|
rd
|
\name{rating.scale.name<-}
\docType{methods}
\alias{rating.scale.name<-}
\alias{set.RISK.NAME<-}
\alias{set.RISK.NAME<-,crp.CSFP,character-method}
\alias{rating.scale.name<--methods}
\alias{rating.scale.name<-,crp.CSFP,character-method}
\title{Set the name for the file containing the rating scale}
\description{
The method changes the models value of \code{rating.scale.name}}
\keyword{methods}
|
##' @title Calculate the MS1 and MS2 level QC metrics
##' @description Calculate the MS1 level QC metrics
##' @param spectraList An experiment design input file
##' @param outdir Output directory
##' @param cpu The number of cpu used
##' @return A data frame
##' @author Bo Wen \email{wenbo@@genomics.cn}
calcMSQCMetrics=function(spectraList=NULL,cpu=2,outdir="./"){
exp <- read.delim(spectraList)
if(cpu==0){
cpu <- detectCores()
}
if(cpu>=4){
cpu=4
}
cl <- makeCluster(getOption("cl.cores", cpu))
clusterEvalQ(cl,library("MSnbase"))
clusterExport(cl, c("outdir"),envir=environment())
res<-parSapply(cl,as.character(exp[,.INPUT.COLS["FILE"]]),function(x){
mz <- readMSData(x,msLevel=1)
outfile <- paste(outdir,"/",basename(x),"-ms1qc.txt",collapse="",sep="")
write.table(x=header(mz),file=outfile,quote=FALSE,sep="\t",
col.names=TRUE, row.names=FALSE)
mz2 <- readMSData(x,msLevel=2)
outfile2 <- paste(outdir,"/",basename(x),"-ms2qc.txt",collapse="",sep="")
write.table(x=header(mz2),file=outfile2,quote=FALSE,sep="\t",
col.names=TRUE, row.names=FALSE)
out <- c(outfile,outfile2)
## return a matrix. Can not use data.frame here.
out
})
stopCluster(cl)
res<-as.data.frame(t(res))
names(res) <- c("MS1QC","MS2QC")
res[,"file"]=rownames(res)
res <- merge(res,exp,by=.INPUT.COLS["FILE"])
res
}
plotMS1TIC=function(x, fig="test.png"){
x$MS1QC <- as.character(x$MS1QC)
x$MS2QC <- as.character(x$MS2QC)
p <- plyr::ddply(x,.(sample,bioRep,techRep,fraction),
summarise,
y = read.delim(MS1QC)$tic,
x= read.delim(MS1QC)$retention.time/60)
ggplot.RT(p,fig=fig,xlab="Retention time",ylab="TIC")
}
plotMS1PeaksCount=function(x, fig="test.png"){
x$MS1QC <- as.character(x$MS1QC)
x$MS2QC <- as.character(x$MS2QC)
p <- plyr::ddply(x,.(sample,bioRep,techRep,fraction),
summarise,
y = read.delim(MS1QC)$peaks.count,
x= read.delim(MS1QC)$retention.time/60)
ggplot.RT(p,fig=fig,xlab="Retention time",ylab="Peaks count")
}
plotMS1IonCount=function(x,fig="test.png"){
x$MS1QC <- as.character(x$MS1QC)
x$MS2QC <- as.character(x$MS2QC)
p <- plyr::ddply(x,.(sample,bioRep,techRep,fraction),
summarise,
y = read.delim(MS1QC)$ionCount,
x= read.delim(MS1QC)$retention.time/60)
ggplot.RT(p,fig=fig,xlab="Retention time",ylab="Ion count")
}
plotMS2PeakFreq=function(x, fig="test.png"){
x$MS1QC <- as.character(x$MS1QC)
x$MS2QC <- as.character(x$MS2QC)
p <- plyr::ddply(x,.(sample,bioRep,techRep,fraction),
summarise,
x= read.delim(MS2QC)$retention.time/60)
png(fig,width=1200,height=1000,res=200)
gg.obj <- ggplot(data=p,aes(x=x,colour=as.factor(techRep),
linetype=as.factor(bioRep))) +
geom_density(size=0.2,alpha=0.8)+
#scale_x_continuous(labels = comma)+
#scale_y_continuous(labels=comma)+
theme(axis.text.x = element_text(size=6,angle=90,vjust=0.5))+
theme(axis.text.y = element_text(size=6))+
xlab(label="Retention time")+
#ylab(label="rt")+
facet_wrap( ~ fraction+sample, ncol = 6)+
labs(colour="Technical replicate",linetype="Biological replicate")+
coord_flip()+
theme(legend.text=element_text(size=6.5),legend.title=element_text(size=7))
print(gg.obj)
dev.off()
}
plotMS1boxplot=function(x, fig="test.png"){
x$MS1QC <- as.character(x$MS1QC)
x$MS2QC <- as.character(x$MS2QC)
p <- plyr::ddply(x,.(sample,bioRep,techRep,fraction),
summarise,
x= read.delim(MS1QC)$retention.time/60)
png(fig,width=1200,height=1000,res=200)
gg.obj <- ggplot(data=p,aes(y=x,x=as.factor(fraction))) +
geom_boxplot(size=0.2,alpha=0.8)+
#scale_x_continuous(labels = comma)+
#scale_y_continuous(labels=comma)+
theme(axis.text.x = element_text(size=6,angle=90,vjust=0.5))+
theme(axis.text.y = element_text(size=6))+
ylab(label="Retention time")+
xlab(label="Fraction")+
facet_wrap( ~ sample+bioRep+techRep, ncol = 3)+
#labs(colour="Technical replicate",linetype="Biological replicate")+
#coord_flip()+
theme(legend.text=element_text(size=6.5),legend.title=element_text(size=7))
print(gg.obj)
dev.off()
}
plotMS1Count=function(x, fig="test.png"){
x$MS1QC <- as.character(x$MS1QC)
x$MS2QC <- as.character(x$MS2QC)
pdat <- plyr::ddply(x,.(sample,bioRep,techRep,fraction),
summarise,
ms1count= nrow(read.delim(MS1QC)))
pdat$sample <- as.character(pdat$sample)
pdat$bioRep <- as.character(pdat$bioRep)
pdat$techRep <- as.character(pdat$techRep)
#pdat$sample <- as.character(pdat$sample)
plotClass <- "ms1count"
x<-reshape2::dcast(pdat,
sample+bioRep+fraction~techRep,
fill=0,value.var=c(plotClass))
m<-reshape2::melt(x,id.vars=c("sample","bioRep","fraction"),
value.name=c(plotClass),
variable.name="techRep")
#if(length(unique(m$techRep))>=6){
if(max(nchar(levels(as.factor(m$techRep))))>=6){
rotate=90
}else{
rotate=0
}
png(fig,width=1000,height=800,res=200)
p<-ggplot(m,aes_string(x="fraction",y=plotClass,
linetype="techRep",
colour="sample",shape="bioRep"))+
geom_point()+
geom_line()+
ylab("MS1 Count")+
xlab("Fraction")+
expand_limits(y=0)
print(p)
dev.off()
#pngFile <- paste(basename(res$input_parameter$report.dir),basename(pngFile),
# sep="/")
#return(fig)
}
plotMS1CountErrorBar=function(x, fig="test.png"){
x$MS1QC <- as.character(x$MS1QC)
x$MS2QC <- as.character(x$MS2QC)
pdat <- plyr::ddply(x,.(sample,bioRep,techRep,fraction),
summarise,
ms1count= nrow(read.delim(MS1QC)))
pdat$sample <- as.character(pdat$sample)
pdat$bioRep <- as.character(pdat$bioRep)
pdat$techRep <- as.character(pdat$techRep)
#pdat$sample <- as.character(pdat$sample)
plotClass <- "ms1count"
z<-ddply(pdat,.(sample,fraction),
function(x){
data.frame(val=mean(x[,plotClass]),
se=sd(x[,plotClass])/sqrt(length(x[,plotClass])))})
#pngFile=paste(outdir,"/","fig_ms1_sample_error_bar_",plotClass,".png",
# sep="",collapse="")
png(fig,width=1000,height=800,res=200)
p<-ggplot(z,aes(x=fraction, y = val,fill=sample,colour=sample))+
expand_limits(y = 0)+
geom_line()+
geom_point()+
ylab("MS1 Count")+
xlab("Fraction")+
geom_errorbar(aes(ymin=val-se, ymax=val+se),width=0.3)+
#scale_x_continuous(breaks = seq(1,max(res_fraction_level$fraction),1))+
#theme(axis.text.x = element_text(angle=0))+
#axis.title=element_text(face="bold",size=15),
#plot.title=element_text(face="bold",size=20))+
scale_fill_hue(c=90,l=50)
#if(res$input_parameter$maxFraction > 6){
# p <- p + theme(axis.text.x = element_text(angle=90,vjust=0.5 ))
#}
print(p)
dev.off()
#pngFile <- paste(basename(res$input_parameter$report.dir),basename(pngFile),
# sep="/")
return(fig)
}
plotMS2boxplot=function(x, fig="test.png"){
x$MS1QC <- as.character(x$MS1QC)
x$MS2QC <- as.character(x$MS2QC)
p <- plyr::ddply(x,.(sample,bioRep,techRep,fraction),
summarise,
x= read.delim(MS2QC)$retention.time/60)
png(fig,width=1200,height=1000,res=200)
gg.obj <- ggplot(data=p,aes(y=x,x=as.factor(fraction))) +
geom_boxplot(size=0.2,alpha=0.8)+
#scale_x_continuous(labels = comma)+
#scale_y_continuous(labels=comma)+
theme(axis.text.x = element_text(size=6,angle=90,vjust=0.5))+
theme(axis.text.y = element_text(size=6))+
ylab(label="Retention time")+
xlab(label="Fraction")+
facet_wrap( ~ sample+bioRep+techRep, ncol = 3)+
#labs(colour="Technical replicate",linetype="Biological replicate")+
#coord_flip()+
theme(legend.text=element_text(size=6.5),legend.title=element_text(size=7))
print(gg.obj)
dev.off()
}
ggplot.RT=function(data=NULL,fig=NULL,xlab=NULL,ylab=NULL){
png(fig,width=1200,height=1000,res=200)
gg.obj <- ggplot(data=data,aes(x=x,y=y,colour=as.factor(techRep),
linetype=as.factor(bioRep))) +
geom_line(size=0.2,alpha=0.8)+
#scale_x_continuous(labels = comma)+
#scale_y_continuous(labels=comma)+
theme(axis.text.x = element_text(size=6,angle=90,vjust=0.5))+
theme(axis.text.y = element_text(size=6))+
xlab(label=xlab)+
ylab(label=ylab)+
#facet_wrap( ~ fraction, ncol = 6)+
facet_wrap( ~ fraction+sample, ncol = 6)+
labs(colour="Technical replicate",linetype="Biological replicate")+
coord_flip()+
theme(legend.text=element_text(size=6.5),legend.title=element_text(size=7))
print(gg.obj)
dev.off()
}
plotMS12=function(res=NULL, outdir="./"){
outfig <- list()
outfig$ms1tic <- paste(outdir,"/ms1tic.png",sep="")
outfig$ms1peakscount <- paste(outdir,"/ms1peakscount.png",sep="")
outfig$ms1ioncount <- paste(outdir,"/ms1ioncount.png",sep="")
outfig$ms2peaksdensity <- paste(outdir,"/ms2peaksdensity.png",sep="")
outfig$ms2boxplot <- paste(outdir,"/ms2boxplot.png",sep="")
outfig$ms1countdot <- paste(outdir,"/ms1countdot.png",sep="")
outfig$ms1counterrorbar <- paste(outdir,"/ms1counterrorbar.png",sep="")
outfig$ms1boxplot <- paste(outdir,"/ms1boxplot.png",sep="")
plotMS1TIC(res,fig=outfig$ms1tic)
plotMS1PeaksCount(res,fig=outfig$ms1peakscount)
plotMS1IonCount(res,fig=outfig$ms1ioncount)
plotMS2PeakFreq(res,fig=outfig$ms2peaksdensity)
plotMS2boxplot(res,fig=outfig$ms2boxplot)
plotMS1boxplot(res,fig=outfig$ms1boxplot)
plotMS1Count(res,fig=outfig$ms1countdot)
plotMS1CountErrorBar(res,fig=outfig$ms1counterrorbar)
return(outfig)
}
|
/R/ms12QC.R
|
no_license
|
wenbostar/proteoQC
|
R
| false
| false
| 10,049
|
r
|
##' @title Calculate the MS1 and MS2 level QC metrics
##' @description Calculate the MS1 level QC metrics
##' @param spectraList An experiment design input file
##' @param outdir Output directory
##' @param cpu The number of cpu used
##' @return A data frame
##' @author Bo Wen \email{wenbo@@genomics.cn}
calcMSQCMetrics=function(spectraList=NULL,cpu=2,outdir="./"){
exp <- read.delim(spectraList)
if(cpu==0){
cpu <- detectCores()
}
if(cpu>=4){
cpu=4
}
cl <- makeCluster(getOption("cl.cores", cpu))
clusterEvalQ(cl,library("MSnbase"))
clusterExport(cl, c("outdir"),envir=environment())
res<-parSapply(cl,as.character(exp[,.INPUT.COLS["FILE"]]),function(x){
mz <- readMSData(x,msLevel=1)
outfile <- paste(outdir,"/",basename(x),"-ms1qc.txt",collapse="",sep="")
write.table(x=header(mz),file=outfile,quote=FALSE,sep="\t",
col.names=TRUE, row.names=FALSE)
mz2 <- readMSData(x,msLevel=2)
outfile2 <- paste(outdir,"/",basename(x),"-ms2qc.txt",collapse="",sep="")
write.table(x=header(mz2),file=outfile2,quote=FALSE,sep="\t",
col.names=TRUE, row.names=FALSE)
out <- c(outfile,outfile2)
## return a matrix. Can not use data.frame here.
out
})
stopCluster(cl)
res<-as.data.frame(t(res))
names(res) <- c("MS1QC","MS2QC")
res[,"file"]=rownames(res)
res <- merge(res,exp,by=.INPUT.COLS["FILE"])
res
}
plotMS1TIC=function(x, fig="test.png"){
x$MS1QC <- as.character(x$MS1QC)
x$MS2QC <- as.character(x$MS2QC)
p <- plyr::ddply(x,.(sample,bioRep,techRep,fraction),
summarise,
y = read.delim(MS1QC)$tic,
x= read.delim(MS1QC)$retention.time/60)
ggplot.RT(p,fig=fig,xlab="Retention time",ylab="TIC")
}
plotMS1PeaksCount=function(x, fig="test.png"){
x$MS1QC <- as.character(x$MS1QC)
x$MS2QC <- as.character(x$MS2QC)
p <- plyr::ddply(x,.(sample,bioRep,techRep,fraction),
summarise,
y = read.delim(MS1QC)$peaks.count,
x= read.delim(MS1QC)$retention.time/60)
ggplot.RT(p,fig=fig,xlab="Retention time",ylab="Peaks count")
}
plotMS1IonCount=function(x,fig="test.png"){
x$MS1QC <- as.character(x$MS1QC)
x$MS2QC <- as.character(x$MS2QC)
p <- plyr::ddply(x,.(sample,bioRep,techRep,fraction),
summarise,
y = read.delim(MS1QC)$ionCount,
x= read.delim(MS1QC)$retention.time/60)
ggplot.RT(p,fig=fig,xlab="Retention time",ylab="Ion count")
}
plotMS2PeakFreq=function(x, fig="test.png"){
x$MS1QC <- as.character(x$MS1QC)
x$MS2QC <- as.character(x$MS2QC)
p <- plyr::ddply(x,.(sample,bioRep,techRep,fraction),
summarise,
x= read.delim(MS2QC)$retention.time/60)
png(fig,width=1200,height=1000,res=200)
gg.obj <- ggplot(data=p,aes(x=x,colour=as.factor(techRep),
linetype=as.factor(bioRep))) +
geom_density(size=0.2,alpha=0.8)+
#scale_x_continuous(labels = comma)+
#scale_y_continuous(labels=comma)+
theme(axis.text.x = element_text(size=6,angle=90,vjust=0.5))+
theme(axis.text.y = element_text(size=6))+
xlab(label="Retention time")+
#ylab(label="rt")+
facet_wrap( ~ fraction+sample, ncol = 6)+
labs(colour="Technical replicate",linetype="Biological replicate")+
coord_flip()+
theme(legend.text=element_text(size=6.5),legend.title=element_text(size=7))
print(gg.obj)
dev.off()
}
plotMS1boxplot=function(x, fig="test.png"){
x$MS1QC <- as.character(x$MS1QC)
x$MS2QC <- as.character(x$MS2QC)
p <- plyr::ddply(x,.(sample,bioRep,techRep,fraction),
summarise,
x= read.delim(MS1QC)$retention.time/60)
png(fig,width=1200,height=1000,res=200)
gg.obj <- ggplot(data=p,aes(y=x,x=as.factor(fraction))) +
geom_boxplot(size=0.2,alpha=0.8)+
#scale_x_continuous(labels = comma)+
#scale_y_continuous(labels=comma)+
theme(axis.text.x = element_text(size=6,angle=90,vjust=0.5))+
theme(axis.text.y = element_text(size=6))+
ylab(label="Retention time")+
xlab(label="Fraction")+
facet_wrap( ~ sample+bioRep+techRep, ncol = 3)+
#labs(colour="Technical replicate",linetype="Biological replicate")+
#coord_flip()+
theme(legend.text=element_text(size=6.5),legend.title=element_text(size=7))
print(gg.obj)
dev.off()
}
plotMS1Count=function(x, fig="test.png"){
x$MS1QC <- as.character(x$MS1QC)
x$MS2QC <- as.character(x$MS2QC)
pdat <- plyr::ddply(x,.(sample,bioRep,techRep,fraction),
summarise,
ms1count= nrow(read.delim(MS1QC)))
pdat$sample <- as.character(pdat$sample)
pdat$bioRep <- as.character(pdat$bioRep)
pdat$techRep <- as.character(pdat$techRep)
#pdat$sample <- as.character(pdat$sample)
plotClass <- "ms1count"
x<-reshape2::dcast(pdat,
sample+bioRep+fraction~techRep,
fill=0,value.var=c(plotClass))
m<-reshape2::melt(x,id.vars=c("sample","bioRep","fraction"),
value.name=c(plotClass),
variable.name="techRep")
#if(length(unique(m$techRep))>=6){
if(max(nchar(levels(as.factor(m$techRep))))>=6){
rotate=90
}else{
rotate=0
}
png(fig,width=1000,height=800,res=200)
p<-ggplot(m,aes_string(x="fraction",y=plotClass,
linetype="techRep",
colour="sample",shape="bioRep"))+
geom_point()+
geom_line()+
ylab("MS1 Count")+
xlab("Fraction")+
expand_limits(y=0)
print(p)
dev.off()
#pngFile <- paste(basename(res$input_parameter$report.dir),basename(pngFile),
# sep="/")
#return(fig)
}
plotMS1CountErrorBar=function(x, fig="test.png"){
x$MS1QC <- as.character(x$MS1QC)
x$MS2QC <- as.character(x$MS2QC)
pdat <- plyr::ddply(x,.(sample,bioRep,techRep,fraction),
summarise,
ms1count= nrow(read.delim(MS1QC)))
pdat$sample <- as.character(pdat$sample)
pdat$bioRep <- as.character(pdat$bioRep)
pdat$techRep <- as.character(pdat$techRep)
#pdat$sample <- as.character(pdat$sample)
plotClass <- "ms1count"
z<-ddply(pdat,.(sample,fraction),
function(x){
data.frame(val=mean(x[,plotClass]),
se=sd(x[,plotClass])/sqrt(length(x[,plotClass])))})
#pngFile=paste(outdir,"/","fig_ms1_sample_error_bar_",plotClass,".png",
# sep="",collapse="")
png(fig,width=1000,height=800,res=200)
p<-ggplot(z,aes(x=fraction, y = val,fill=sample,colour=sample))+
expand_limits(y = 0)+
geom_line()+
geom_point()+
ylab("MS1 Count")+
xlab("Fraction")+
geom_errorbar(aes(ymin=val-se, ymax=val+se),width=0.3)+
#scale_x_continuous(breaks = seq(1,max(res_fraction_level$fraction),1))+
#theme(axis.text.x = element_text(angle=0))+
#axis.title=element_text(face="bold",size=15),
#plot.title=element_text(face="bold",size=20))+
scale_fill_hue(c=90,l=50)
#if(res$input_parameter$maxFraction > 6){
# p <- p + theme(axis.text.x = element_text(angle=90,vjust=0.5 ))
#}
print(p)
dev.off()
#pngFile <- paste(basename(res$input_parameter$report.dir),basename(pngFile),
# sep="/")
return(fig)
}
plotMS2boxplot=function(x, fig="test.png"){
x$MS1QC <- as.character(x$MS1QC)
x$MS2QC <- as.character(x$MS2QC)
p <- plyr::ddply(x,.(sample,bioRep,techRep,fraction),
summarise,
x= read.delim(MS2QC)$retention.time/60)
png(fig,width=1200,height=1000,res=200)
gg.obj <- ggplot(data=p,aes(y=x,x=as.factor(fraction))) +
geom_boxplot(size=0.2,alpha=0.8)+
#scale_x_continuous(labels = comma)+
#scale_y_continuous(labels=comma)+
theme(axis.text.x = element_text(size=6,angle=90,vjust=0.5))+
theme(axis.text.y = element_text(size=6))+
ylab(label="Retention time")+
xlab(label="Fraction")+
facet_wrap( ~ sample+bioRep+techRep, ncol = 3)+
#labs(colour="Technical replicate",linetype="Biological replicate")+
#coord_flip()+
theme(legend.text=element_text(size=6.5),legend.title=element_text(size=7))
print(gg.obj)
dev.off()
}
ggplot.RT=function(data=NULL,fig=NULL,xlab=NULL,ylab=NULL){
png(fig,width=1200,height=1000,res=200)
gg.obj <- ggplot(data=data,aes(x=x,y=y,colour=as.factor(techRep),
linetype=as.factor(bioRep))) +
geom_line(size=0.2,alpha=0.8)+
#scale_x_continuous(labels = comma)+
#scale_y_continuous(labels=comma)+
theme(axis.text.x = element_text(size=6,angle=90,vjust=0.5))+
theme(axis.text.y = element_text(size=6))+
xlab(label=xlab)+
ylab(label=ylab)+
#facet_wrap( ~ fraction, ncol = 6)+
facet_wrap( ~ fraction+sample, ncol = 6)+
labs(colour="Technical replicate",linetype="Biological replicate")+
coord_flip()+
theme(legend.text=element_text(size=6.5),legend.title=element_text(size=7))
print(gg.obj)
dev.off()
}
plotMS12=function(res=NULL, outdir="./"){
outfig <- list()
outfig$ms1tic <- paste(outdir,"/ms1tic.png",sep="")
outfig$ms1peakscount <- paste(outdir,"/ms1peakscount.png",sep="")
outfig$ms1ioncount <- paste(outdir,"/ms1ioncount.png",sep="")
outfig$ms2peaksdensity <- paste(outdir,"/ms2peaksdensity.png",sep="")
outfig$ms2boxplot <- paste(outdir,"/ms2boxplot.png",sep="")
outfig$ms1countdot <- paste(outdir,"/ms1countdot.png",sep="")
outfig$ms1counterrorbar <- paste(outdir,"/ms1counterrorbar.png",sep="")
outfig$ms1boxplot <- paste(outdir,"/ms1boxplot.png",sep="")
plotMS1TIC(res,fig=outfig$ms1tic)
plotMS1PeaksCount(res,fig=outfig$ms1peakscount)
plotMS1IonCount(res,fig=outfig$ms1ioncount)
plotMS2PeakFreq(res,fig=outfig$ms2peaksdensity)
plotMS2boxplot(res,fig=outfig$ms2boxplot)
plotMS1boxplot(res,fig=outfig$ms1boxplot)
plotMS1Count(res,fig=outfig$ms1countdot)
plotMS1CountErrorBar(res,fig=outfig$ms1counterrorbar)
return(outfig)
}
|
#' Aggregate dataset by state
#'
#' @param dt data.table
#' @param year_min integer
#' @param year_max integer
#' @param evtypes character vector
#' @return data.table
#'
aggregate_by_state <- function(dt, year_min, year_max, evtypes) {
replace_na <- function(x) ifelse(is.na(x), 0, x)
round_2 <- function(x) round(x, 2)
states <- data.table(STATE=sort(unique(dt$STATE)))
aggregated <- dt %>% filter(YEAR >= year_min, YEAR <= year_max, EVTYPE %in% evtypes) %>%
group_by(STATE) %>%
summarise_each(funs(sum), COUNT:CROPDMG)
# We want all states to be present even if nothing happened
left_join(states, aggregated, by = "STATE") %>%
mutate_each(funs(replace_na), FATALITIES:CROPDMG) %>%
mutate_each(funs(round_2), PROPDMG, CROPDMG)
}
#' Aggregate dataset by year
#'
#' @param dt data.table
#' @param year_min integer
#' @param year_max integer
#' @param evtypes character vector
#' @return data.table
#'
aggregate_by_year <- function(dt, year_min, year_max, evtypes) {
round_2 <- function(x) round(x, 2)
# Filter
dt %>% filter(YEAR >= year_min, YEAR <= year_max, EVTYPE %in% evtypes) %>%
# Group and aggregate
group_by(YEAR) %>% summarise_each(funs(sum), COUNT:CROPDMG) %>%
# Round
mutate_each(funs(round_2), PROPDMG, CROPDMG) %>%
rename(
Year = YEAR, Count = COUNT,
Fatalities = FATALITIES, Injuries = INJURIES,
Property = PROPDMG, Crops = CROPDMG
)
}
#' Add Affected column based on category
#'
#' @param dt data.table
#' @param category character
#' @return data.table
#'
compute_affected <- function(dt, category) {
dt %>% mutate(Affected = {
if(category == 'both') {
INJURIES + FATALITIES
} else if(category == 'fatalities') {
FATALITIES
} else {
INJURIES
}
})
}
#' Add Damages column based on category
#'
#' @param dt data.table
#' @param category character
#' @return data.table
#'
compute_damages <- function(dt, category) {
dt %>% mutate(Damages = {
if(category == 'both') {
PROPDMG + CROPDMG
} else if(category == 'crops') {
CROPDMG
} else {
PROPDMG
}
})
}
#' Prepare map of economic or population Impact
#'
#' @param dt data.table
#' @param states_map data.frame returned from map_data("state")
#' @param year_min integer
#' @param year_max integer
#' @param fill character name of the variable
#' @param title character
#' @param low character hex
#' @param high character hex
#' @return ggplot
#'
plot_impact_by_state <- function (dt, states_map, year_min, year_max, fill, title, low = "#fff5eb", high = "#d94801") {
title <- sprintf(title, year_min, year_max)
p <- ggplot(dt, aes(map_id = STATE))
p <- p + geom_map(aes_string(fill = fill), map = states_map, colour='black')
p <- p + expand_limits(x = states_map$long, y = states_map$lat)
p <- p + coord_map() + theme_bw()
p <- p + labs(x = "Long", y = "Lat", title = title)
p + scale_fill_gradient(low = low, high = high)
}
#' Prepare plots of Impact by year
#'
#' @param dt data.table
#' @param dom
#' @param yAxisLabel
#' @param desc
#' @return plot
#'
plot_impact_by_year <- function(dt, dom, yAxisLabel, desc = FALSE) {
impactPlot <- nPlot(
value ~ Year, group = "variable",
data = melt(dt, id="Year") %>% arrange(Year, if (desc) { desc(variable) } else { variable }),
type = "stackedAreaChart", dom = dom, width = 650
)
impactPlot$chart(margin = list(left = 100))
impactPlot$yAxis(axisLabel = yAxisLabel, width = 80)
impactPlot$xAxis(axisLabel = "Year", width = 70)
impactPlot
}
#' Prepare plot of number of events by year
#'
#' @param dt data.table
#' @param dom
#' @param yAxisLabel
#' @return plot
plot_events_by_year <- function(dt, dom = "eventsByYear", yAxisLabel = "Count") {
eventsByYear <- nPlot(
Count ~ Year,
data = dt,
type = "lineChart", dom = dom, width = 650
)
eventsByYear$chart(margin = list(left = 100))
eventsByYear$yAxis( axisLabel = yAxisLabel, width = 80)
eventsByYear$xAxis( axisLabel = "Year", width = 70)
eventsByYear
}
#' Prepare dataset for downloads
#'
#' @param dt data.table
#' @return data.table
prepare_downolads <- function(dt) {
dt %>% rename(
State = STATE, Count = COUNT,
Injuries = INJURIES, Fatalities = FATALITIES,
Property.damage = PROPDMG, Crops.damage = CROPDMG
) %>% mutate(State=state.abb[match(State, tolower(state.name))])
}
|
/Processing.R
|
no_license
|
ThotaSravani/Developing-Data-Products-Course-Project
|
R
| false
| false
| 4,618
|
r
|
#' Aggregate dataset by state
#'
#' @param dt data.table
#' @param year_min integer
#' @param year_max integer
#' @param evtypes character vector
#' @return data.table
#'
aggregate_by_state <- function(dt, year_min, year_max, evtypes) {
replace_na <- function(x) ifelse(is.na(x), 0, x)
round_2 <- function(x) round(x, 2)
states <- data.table(STATE=sort(unique(dt$STATE)))
aggregated <- dt %>% filter(YEAR >= year_min, YEAR <= year_max, EVTYPE %in% evtypes) %>%
group_by(STATE) %>%
summarise_each(funs(sum), COUNT:CROPDMG)
# We want all states to be present even if nothing happened
left_join(states, aggregated, by = "STATE") %>%
mutate_each(funs(replace_na), FATALITIES:CROPDMG) %>%
mutate_each(funs(round_2), PROPDMG, CROPDMG)
}
#' Aggregate dataset by year
#'
#' @param dt data.table
#' @param year_min integer
#' @param year_max integer
#' @param evtypes character vector
#' @return data.table
#'
aggregate_by_year <- function(dt, year_min, year_max, evtypes) {
round_2 <- function(x) round(x, 2)
# Filter
dt %>% filter(YEAR >= year_min, YEAR <= year_max, EVTYPE %in% evtypes) %>%
# Group and aggregate
group_by(YEAR) %>% summarise_each(funs(sum), COUNT:CROPDMG) %>%
# Round
mutate_each(funs(round_2), PROPDMG, CROPDMG) %>%
rename(
Year = YEAR, Count = COUNT,
Fatalities = FATALITIES, Injuries = INJURIES,
Property = PROPDMG, Crops = CROPDMG
)
}
#' Add Affected column based on category
#'
#' @param dt data.table
#' @param category character
#' @return data.table
#'
compute_affected <- function(dt, category) {
dt %>% mutate(Affected = {
if(category == 'both') {
INJURIES + FATALITIES
} else if(category == 'fatalities') {
FATALITIES
} else {
INJURIES
}
})
}
#' Add Damages column based on category
#'
#' @param dt data.table
#' @param category character
#' @return data.table
#'
compute_damages <- function(dt, category) {
dt %>% mutate(Damages = {
if(category == 'both') {
PROPDMG + CROPDMG
} else if(category == 'crops') {
CROPDMG
} else {
PROPDMG
}
})
}
#' Prepare map of economic or population Impact
#'
#' @param dt data.table
#' @param states_map data.frame returned from map_data("state")
#' @param year_min integer
#' @param year_max integer
#' @param fill character name of the variable
#' @param title character
#' @param low character hex
#' @param high character hex
#' @return ggplot
#'
plot_impact_by_state <- function (dt, states_map, year_min, year_max, fill, title, low = "#fff5eb", high = "#d94801") {
title <- sprintf(title, year_min, year_max)
p <- ggplot(dt, aes(map_id = STATE))
p <- p + geom_map(aes_string(fill = fill), map = states_map, colour='black')
p <- p + expand_limits(x = states_map$long, y = states_map$lat)
p <- p + coord_map() + theme_bw()
p <- p + labs(x = "Long", y = "Lat", title = title)
p + scale_fill_gradient(low = low, high = high)
}
#' Prepare plots of Impact by year
#'
#' @param dt data.table
#' @param dom
#' @param yAxisLabel
#' @param desc
#' @return plot
#'
plot_impact_by_year <- function(dt, dom, yAxisLabel, desc = FALSE) {
impactPlot <- nPlot(
value ~ Year, group = "variable",
data = melt(dt, id="Year") %>% arrange(Year, if (desc) { desc(variable) } else { variable }),
type = "stackedAreaChart", dom = dom, width = 650
)
impactPlot$chart(margin = list(left = 100))
impactPlot$yAxis(axisLabel = yAxisLabel, width = 80)
impactPlot$xAxis(axisLabel = "Year", width = 70)
impactPlot
}
#' Prepare plot of number of events by year
#'
#' @param dt data.table
#' @param dom
#' @param yAxisLabel
#' @return plot
plot_events_by_year <- function(dt, dom = "eventsByYear", yAxisLabel = "Count") {
eventsByYear <- nPlot(
Count ~ Year,
data = dt,
type = "lineChart", dom = dom, width = 650
)
eventsByYear$chart(margin = list(left = 100))
eventsByYear$yAxis( axisLabel = yAxisLabel, width = 80)
eventsByYear$xAxis( axisLabel = "Year", width = 70)
eventsByYear
}
#' Prepare dataset for downloads
#'
#' @param dt data.table
#' @return data.table
prepare_downolads <- function(dt) {
dt %>% rename(
State = STATE, Count = COUNT,
Injuries = INJURIES, Fatalities = FATALITIES,
Property.damage = PROPDMG, Crops.damage = CROPDMG
) %>% mutate(State=state.abb[match(State, tolower(state.name))])
}
|
library(shiny)
library(shinydashboard)
library(rlang)
library(ggplot2)
rate <- read.csv("Unemployment_Rate_Clean.csv")
colnames(rate) <- c("No","States","Year2017","Year2018","Year2019")
rate$No <- as.numeric(rate$No)
rate$States<- as.character(rate$States)
rate$Year2017 <- as.numeric(rate$Year2017)
rate$Year2018 <- as.numeric(rate$Year2018)
rate$Year2019 <- as.numeric(rate$Year2019)
ui <- dashboardPage(
skin = "purple",
dashboardHeader(title = "WhereToWork"),
dashboardSidebar(
sidebarMenu(
menuItem("Dashboard", tabName = "dashboard", icon = icon("dashboard")),
menuItem("Extra", tabName = "extra", icon = icon("th")),
menuItem("Documentation", tabName = "Documentation", icon = icon("th"))
)
),
dashboardBody(
tabItems(
# First tab content
tabItem(tabName = "dashboard",
fluidRow(
box(
width = 3,
title = "Select a Plot Type:",
selectInput("plot.type","Plot Type:",
choices=c("Column", "Dot Plot"),
selected = "Column"),
),
box(
width = 3,
title = "Select a Year:",
selectInput("year", "Year:",
choices=c("2017","2018","2019"),
selected = "2019"),
),
box(
width = 3,
title = "Change the background color:",status = "primary", solidHeader = TRUE,
collapsible = TRUE,
radioButtons("radio", label ="Background Color:",
choices = c("Thistle","Light Steel Blue 2","Khaki"),
selected = "Light Steel Blue 2")),
box(
width = 12,
title = "Results",
plotOutput("plot1", height = 350, width =1000 ),
),
)
),
# Second tab content
tabItem(tabName = "extra",
fluidRow(
column(8, align="center",offset = 2,
h3(strong("How to calculate the unemployment rate?")),
box(
width = 12,
img(src = "https://cdn.educba.com/academy/wp-content/uploads/2019/07/Unemploy-ment-Rate-Formula-1.jpg", height = 200, width = 400),
),
box(
width = 12,
title = "Job Sites", status = "primary", solidHeader = TRUE,
collapsible = TRUE,
h4("Find job opportunities in these websites"),
h3(strong("1. LinkedIn")),
tags$img(src="https://pioneerresumes.com/wp-content/uploads/2020/07/past-applicants.gif", height=250 , width=500),
h5("LinkedIn is used for professional networking that includes employers posting jobs and job seekers posting their Curriculum Vitae."),
tagList("Website:", a("https://my.linkedin.com/", href="https://my.linkedin.com/")),
h3(strong("2. Jobstreet")),
tags$img(src="https://www.the-network.com/wp-content/uploads/browshot/jobstreet-malaysia.png", height=250 , width=500),
h5("Started in 1997, JobStreet is currently the largest online employment company in Southesast Asia."),
tagList("Website:", a("https://www.jobstreet.com.my/", href="https://www.jobstreet.com.my/")),
h3(strong("3. Monster")),
tags$img(src="https://cdn01.vulcanpost.com/wp-uploads/2017/01/monstermalaysia.com_.png", height=250 , width=500),
h5("Looking for a job where you can work from the comfort of your own home? There are several job opportunities that offer
a work-from-home set-up!"),
tagList("Website:", a("https://www.monster.com.my/", href="https://www.monster.com.my/")),
h3(strong("4. MauKerja")),
tags$img(src="http://infojimat.com/wp-content/uploads/2021/02/4.png", height=250 , width=500),
h5("An exciting solution that benefit both business owners and job seekers is provided by MauKerja."),
tagList("Website:", a("https://www.maukerja.my/", href="https://www.maukerja.my/")),
h3(strong("5. Wobb.my")),
tags$img(src="https://assets.wobbjobs.com/images/faq/img2-a.jpg", height=250 , width=500),
h5("Working On Bean Bags (Wobb) lists employers according to their companies, and job hunters are able to have a sneak peek of the
interior of the office and also a closer look at the people and working culture before sending in their applications."),
tagList("Website:", a("https://my.wobbjobs.com/", href="https://my.wobbjobs.com/")),
), #End box
box(
width = 12,
title = "Interview", status = "primary", solidHeader = TRUE,
collapsible = TRUE,
img(src = "https://www.thebalancecareers.com/thmb/RFUaH4oaj1t0d_AHaggGJJBiIj8=/1500x1000/filters:no_upscale():max_bytes(150000):strip_icc()/top-interview-tips-2058577_FINAL-5b7339fb46e0fb0050b4b20d.png",
height = 350, width = 500),
),
box(
width = 12,
title = "Resume vs CV", status = "primary", solidHeader = TRUE,
collapsible = TRUE,
img(src = "https://resumegenius.com/wp-content/uploads/2020/06/CV-vs-Resume-Differences.png", height = 450, width = 600),
),
box(
width = 12,
title = "Resume", status = "primary", solidHeader = TRUE,
collapsible = TRUE,
img(src = "https://trainerkart.com/wp-content/uploads/2018/06/DOs-and-Donts-of-Resume.png", height = 600, width = 400),
),
box(
width = 12,
title = "Curriculum Vitae (CV)", status = "primary", solidHeader = TRUE,
collapsible = TRUE,
img(src = "https://www.careerexperts.co.uk/wp-content/uploads/2018/03/7-CV-improvememt-tips-info.jpg", height = 700, width = 400),
),
)
)
),
tabItem(tabName = "Documentation",
#summary.Rmd
hr(),
fluidRow(
column(8, offset = 2,
includeMarkdown(rmarkdown::render("summary.Rmd"))
)
),
hr()
)
)
),
)
server <- function(input, output) {
output$plot1 <- renderPlot({
if (input$year == '2017') {
i <- 3
}
if (input$year == '2018') {
i <- 4
}
if (input$year == '2019') {
i <- 5
}
y <-rate[,i]
if (input$plot.type == "Column") {
ggplot(data= rate) + geom_text(aes_string(x="States", y, label=y), vjust = -0.5) +
geom_col(mapping=aes_string(x="States", y, fill="States"), show.legend=FALSE, col = input$radio) +
labs(x="States", y="Unemployment Rate (%)") +
ggtitle(paste("Column Plot: Unemployment Rate in Malaysia in", input$year)) +
theme(axis.text.x = element_text(colour = "black", size = 6, face = "bold"),
axis.text.y = element_text(colour = "black", size = 8, face = "bold"),
axis.title.x = element_text(size = 15, face = "bold"),
axis.title.y = element_text(size = 15, face = "bold"),
plot.background = element_rect(fill = input$radio),
plot.title = element_text(colour = "black", face = "bold", size = rel(2)))
}else{
ggplot(data = rate) +
geom_dotplot(binwidth = 0.5, mapping = aes_string(x="States", y, fill="States"), show.legend=FALSE,
col = input$radio, binaxis='y', stackdir='center',stackratio=1, dotsize=0.5) + coord_flip() +
labs(x="States", y = "Unemployment Rate (%)") + geom_text(aes_string(x="States", y, label=y)) +
ggtitle(paste("Dot Plot: Unemployment Rate in Malaysia in", input$year)) +
theme(axis.text.x = element_text(colour = "black", size = 10, face = "bold"),
axis.text.y = element_text(colour = "black", size = 8, face = "bold"),
axis.title.x = element_text(size = 15, face = "bold"),
axis.title.y = element_text(size = 15, face = "bold"),
plot.background = element_rect(fill = input$radio),
plot.title = element_text(colour = "black", face = "bold", size = rel(2)))
}
})
}
shinyApp(ui, server)
|
/Data Product/app.R
|
no_license
|
amniwahit/GroupProject
|
R
| false
| false
| 9,964
|
r
|
library(shiny)
library(shinydashboard)
library(rlang)
library(ggplot2)
rate <- read.csv("Unemployment_Rate_Clean.csv")
colnames(rate) <- c("No","States","Year2017","Year2018","Year2019")
rate$No <- as.numeric(rate$No)
rate$States<- as.character(rate$States)
rate$Year2017 <- as.numeric(rate$Year2017)
rate$Year2018 <- as.numeric(rate$Year2018)
rate$Year2019 <- as.numeric(rate$Year2019)
ui <- dashboardPage(
skin = "purple",
dashboardHeader(title = "WhereToWork"),
dashboardSidebar(
sidebarMenu(
menuItem("Dashboard", tabName = "dashboard", icon = icon("dashboard")),
menuItem("Extra", tabName = "extra", icon = icon("th")),
menuItem("Documentation", tabName = "Documentation", icon = icon("th"))
)
),
dashboardBody(
tabItems(
# First tab content
tabItem(tabName = "dashboard",
fluidRow(
box(
width = 3,
title = "Select a Plot Type:",
selectInput("plot.type","Plot Type:",
choices=c("Column", "Dot Plot"),
selected = "Column"),
),
box(
width = 3,
title = "Select a Year:",
selectInput("year", "Year:",
choices=c("2017","2018","2019"),
selected = "2019"),
),
box(
width = 3,
title = "Change the background color:",status = "primary", solidHeader = TRUE,
collapsible = TRUE,
radioButtons("radio", label ="Background Color:",
choices = c("Thistle","Light Steel Blue 2","Khaki"),
selected = "Light Steel Blue 2")),
box(
width = 12,
title = "Results",
plotOutput("plot1", height = 350, width =1000 ),
),
)
),
# Second tab content
tabItem(tabName = "extra",
fluidRow(
column(8, align="center",offset = 2,
h3(strong("How to calculate the unemployment rate?")),
box(
width = 12,
img(src = "https://cdn.educba.com/academy/wp-content/uploads/2019/07/Unemploy-ment-Rate-Formula-1.jpg", height = 200, width = 400),
),
box(
width = 12,
title = "Job Sites", status = "primary", solidHeader = TRUE,
collapsible = TRUE,
h4("Find job opportunities in these websites"),
h3(strong("1. LinkedIn")),
tags$img(src="https://pioneerresumes.com/wp-content/uploads/2020/07/past-applicants.gif", height=250 , width=500),
h5("LinkedIn is used for professional networking that includes employers posting jobs and job seekers posting their Curriculum Vitae."),
tagList("Website:", a("https://my.linkedin.com/", href="https://my.linkedin.com/")),
h3(strong("2. Jobstreet")),
tags$img(src="https://www.the-network.com/wp-content/uploads/browshot/jobstreet-malaysia.png", height=250 , width=500),
h5("Started in 1997, JobStreet is currently the largest online employment company in Southesast Asia."),
tagList("Website:", a("https://www.jobstreet.com.my/", href="https://www.jobstreet.com.my/")),
h3(strong("3. Monster")),
tags$img(src="https://cdn01.vulcanpost.com/wp-uploads/2017/01/monstermalaysia.com_.png", height=250 , width=500),
h5("Looking for a job where you can work from the comfort of your own home? There are several job opportunities that offer
a work-from-home set-up!"),
tagList("Website:", a("https://www.monster.com.my/", href="https://www.monster.com.my/")),
h3(strong("4. MauKerja")),
tags$img(src="http://infojimat.com/wp-content/uploads/2021/02/4.png", height=250 , width=500),
h5("An exciting solution that benefit both business owners and job seekers is provided by MauKerja."),
tagList("Website:", a("https://www.maukerja.my/", href="https://www.maukerja.my/")),
h3(strong("5. Wobb.my")),
tags$img(src="https://assets.wobbjobs.com/images/faq/img2-a.jpg", height=250 , width=500),
h5("Working On Bean Bags (Wobb) lists employers according to their companies, and job hunters are able to have a sneak peek of the
interior of the office and also a closer look at the people and working culture before sending in their applications."),
tagList("Website:", a("https://my.wobbjobs.com/", href="https://my.wobbjobs.com/")),
), #End box
box(
width = 12,
title = "Interview", status = "primary", solidHeader = TRUE,
collapsible = TRUE,
img(src = "https://www.thebalancecareers.com/thmb/RFUaH4oaj1t0d_AHaggGJJBiIj8=/1500x1000/filters:no_upscale():max_bytes(150000):strip_icc()/top-interview-tips-2058577_FINAL-5b7339fb46e0fb0050b4b20d.png",
height = 350, width = 500),
),
box(
width = 12,
title = "Resume vs CV", status = "primary", solidHeader = TRUE,
collapsible = TRUE,
img(src = "https://resumegenius.com/wp-content/uploads/2020/06/CV-vs-Resume-Differences.png", height = 450, width = 600),
),
box(
width = 12,
title = "Resume", status = "primary", solidHeader = TRUE,
collapsible = TRUE,
img(src = "https://trainerkart.com/wp-content/uploads/2018/06/DOs-and-Donts-of-Resume.png", height = 600, width = 400),
),
box(
width = 12,
title = "Curriculum Vitae (CV)", status = "primary", solidHeader = TRUE,
collapsible = TRUE,
img(src = "https://www.careerexperts.co.uk/wp-content/uploads/2018/03/7-CV-improvememt-tips-info.jpg", height = 700, width = 400),
),
)
)
),
tabItem(tabName = "Documentation",
#summary.Rmd
hr(),
fluidRow(
column(8, offset = 2,
includeMarkdown(rmarkdown::render("summary.Rmd"))
)
),
hr()
)
)
),
)
server <- function(input, output) {
output$plot1 <- renderPlot({
if (input$year == '2017') {
i <- 3
}
if (input$year == '2018') {
i <- 4
}
if (input$year == '2019') {
i <- 5
}
y <-rate[,i]
if (input$plot.type == "Column") {
ggplot(data= rate) + geom_text(aes_string(x="States", y, label=y), vjust = -0.5) +
geom_col(mapping=aes_string(x="States", y, fill="States"), show.legend=FALSE, col = input$radio) +
labs(x="States", y="Unemployment Rate (%)") +
ggtitle(paste("Column Plot: Unemployment Rate in Malaysia in", input$year)) +
theme(axis.text.x = element_text(colour = "black", size = 6, face = "bold"),
axis.text.y = element_text(colour = "black", size = 8, face = "bold"),
axis.title.x = element_text(size = 15, face = "bold"),
axis.title.y = element_text(size = 15, face = "bold"),
plot.background = element_rect(fill = input$radio),
plot.title = element_text(colour = "black", face = "bold", size = rel(2)))
}else{
ggplot(data = rate) +
geom_dotplot(binwidth = 0.5, mapping = aes_string(x="States", y, fill="States"), show.legend=FALSE,
col = input$radio, binaxis='y', stackdir='center',stackratio=1, dotsize=0.5) + coord_flip() +
labs(x="States", y = "Unemployment Rate (%)") + geom_text(aes_string(x="States", y, label=y)) +
ggtitle(paste("Dot Plot: Unemployment Rate in Malaysia in", input$year)) +
theme(axis.text.x = element_text(colour = "black", size = 10, face = "bold"),
axis.text.y = element_text(colour = "black", size = 8, face = "bold"),
axis.title.x = element_text(size = 15, face = "bold"),
axis.title.y = element_text(size = 15, face = "bold"),
plot.background = element_rect(fill = input$radio),
plot.title = element_text(colour = "black", face = "bold", size = rel(2)))
}
})
}
shinyApp(ui, server)
|
\name{confus}
\alias{confus}
\alias{fuzconfus}
\title{(Fuzzy) Confusion Matrix}
\description{A confusion matrix is a cross-tabulation of actual class
membership with memberships predicted by a discriminant function,
classification tree, or other predictive model.
A fuzzy confusion
matrix is a confusion matrix that corrects for \sQuote{near misses}
in prediction by comparing the similarity of the predicted type to
the actual type and giving credit for the similarity.}
\usage{confus(clustering,model,diss=NULL)}
\arguments{
\item{clustering}{an object of class \sQuote{clustering} or
a vector of (integer or factor) class membership values}
\item{model}{a predictive model of class \sQuote{tree} or \sQuote{randomForest}}
\item{diss}{optionally, a dissimilarity object of class \sQuote{dist} from
\sQuote{dist}, \sQuote{dsvdis}, or \sQuote{vegdist}}
}
\details{Cross-classifies each sample by actual class membership and
predicted membership, computing overall accuracy, and the Kappa
statistic of agreement. If a dissimilarity matrix is passed, calculates a
fuzzy confusion matrix.
In this case, correct predictions are assigned values of 1.0, and other predictions are given the value of the
similarity of the two types an placed on the diagonal. The dissimilarity of the two types is added off
the diagonal as fuzzy error.
}
\value{produces a list with elements
\item{matrix}{the (fuzzy) cross-tabulation matrix as a data.frame}
\item{correct}{the fraction of (fuzzily) correctly predicted samples}
\item{kappa}{the value of the Kappa statistic}
\item{legend}{the text legend for the cross-tabulation matrix}
}
\references{\url{http://ecology.msu.montana.edu/labdsv/R}}
\note{Confusion matrices are commonly computed in remote sensing
applications, but are equally suited to the evaluation of any
predictive methods of class membership or factors.}
\author{
David W. Roberts
\email{droberts@montana.edu}
\url{http://ecology.msu.montana.edu/labdsv/R}
}
\examples{
data(shoshveg) # returns a data frame of vegetation data
data(shoshsite) # returns a data frame of site data
dis.bc <- dsvdis(shoshveg,'bray')
opt.5 <- optpart(5,dis.bc)
library(tree)
mod <- tree(factor(opt.5$clustering)~ elevation+slope+av,
data=shoshsite)
confus(opt.5,mod)
confus(opt.5,mod,dis.bc)
}
\keyword{htest}
|
/man/confus.Rd
|
no_license
|
cran/optpart
|
R
| false
| false
| 2,335
|
rd
|
\name{confus}
\alias{confus}
\alias{fuzconfus}
\title{(Fuzzy) Confusion Matrix}
\description{A confusion matrix is a cross-tabulation of actual class
membership with memberships predicted by a discriminant function,
classification tree, or other predictive model.
A fuzzy confusion
matrix is a confusion matrix that corrects for \sQuote{near misses}
in prediction by comparing the similarity of the predicted type to
the actual type and giving credit for the similarity.}
\usage{confus(clustering,model,diss=NULL)}
\arguments{
\item{clustering}{an object of class \sQuote{clustering} or
a vector of (integer or factor) class membership values}
\item{model}{a predictive model of class \sQuote{tree} or \sQuote{randomForest}}
\item{diss}{optionally, a dissimilarity object of class \sQuote{dist} from
\sQuote{dist}, \sQuote{dsvdis}, or \sQuote{vegdist}}
}
\details{Cross-classifies each sample by actual class membership and
predicted membership, computing overall accuracy, and the Kappa
statistic of agreement. If a dissimilarity matrix is passed, calculates a
fuzzy confusion matrix.
In this case, correct predictions are assigned values of 1.0, and other predictions are given the value of the
similarity of the two types an placed on the diagonal. The dissimilarity of the two types is added off
the diagonal as fuzzy error.
}
\value{produces a list with elements
\item{matrix}{the (fuzzy) cross-tabulation matrix as a data.frame}
\item{correct}{the fraction of (fuzzily) correctly predicted samples}
\item{kappa}{the value of the Kappa statistic}
\item{legend}{the text legend for the cross-tabulation matrix}
}
\references{\url{http://ecology.msu.montana.edu/labdsv/R}}
\note{Confusion matrices are commonly computed in remote sensing
applications, but are equally suited to the evaluation of any
predictive methods of class membership or factors.}
\author{
David W. Roberts
\email{droberts@montana.edu}
\url{http://ecology.msu.montana.edu/labdsv/R}
}
\examples{
data(shoshveg) # returns a data frame of vegetation data
data(shoshsite) # returns a data frame of site data
dis.bc <- dsvdis(shoshveg,'bray')
opt.5 <- optpart(5,dis.bc)
library(tree)
mod <- tree(factor(opt.5$clustering)~ elevation+slope+av,
data=shoshsite)
confus(opt.5,mod)
confus(opt.5,mod,dis.bc)
}
\keyword{htest}
|
# nocov start
# tested in tidymodels/extratests#67
new_reverse_km_fit <-
function(formula,
object,
pkgs = character(0),
label = character(0),
extra_cls = character(0)) {
res <- list(formula = formula, fit = object, label = label, required_pkgs = pkgs)
class(res) <- c(paste0("censoring_model_", label), "censoring_model", extra_cls)
res
}
# ------------------------------------------------------------------------------
# estimate the reverse km curve for censored regression models
reverse_km <- function(obj, eval_env) {
if (obj$mode != "censored regression") {
return(list())
}
rlang::check_installed("prodlim")
# Note: even when fit_xy() is called, eval_env will still have
# objects data and formula in them
f <- eval_env$formula
km_form <- stats::update(f, ~ 1)
cl <-
rlang::call2(
"prodlim",
formula = km_form,
.ns = "prodlim",
reverse = TRUE,
type = "surv",
x = FALSE,
data = rlang::expr(eval_env$data)
)
if (!is.null(eval_env$weights)) {
cl <- rlang::call_modify(cl, caseweights = rlang::expr(eval_env$weights))
}
rkm <- try(rlang::eval_tidy(cl), silent = TRUE)
new_reverse_km_fit(f, object = rkm, label = "reverse_km", pkgs = "prodlim")
}
# ------------------------------------------------------------------------------
# Basic S3 methods
#' @export
print.censoring_model <- function(x, ...) {
cat(x$label, "model for predicting the probability of censoring\n")
invisible(x)
}
#' @export
predict.censoring_model <- function(object, ...) {
rlang::abort(
paste("Don't know how to predict with a censoring model of type:", object$label)
)
invisible(NULL)
}
#' @export
predict.censoring_model_reverse_km <- function(object, new_data = NULL, time, as_vector = FALSE, ...) {
rlang::check_installed("prodlim", version = "2022.10.13")
rlang::check_installed("censored", version = "0.1.1.9002")
res <- rep(NA_real_, length(time))
if (length(time) == 0) {
return(res)
}
# Some time values might be NA (for Graf category 2)
is_na <- which(is.na(time))
if (length(is_na) > 0) {
time <- time[-is_na]
}
if (is.null(new_data)) {
tmp <-
purrr::map_dbl(time, ~ predict(object$fit, times = .x, type = "surv"))
} else {
tmp <-
purrr::map_dbl(time, ~ predict(object$fit, newdata = new_data, times = .x, type = "surv"))
}
zero_prob <- purrr::map_lgl(tmp, ~ !is.na(.x) && .x == 0)
if (any(zero_prob)) {
# Don't want censoring probabilities of zero so add an epsilon
# Either use 1/n or half of the minimum survival probability
n <- max(object$fit$n.risk)
half_min_surv_prob <- min(object$fit$surv[object$fit$surv > 0]) / 2
eps <- min(1 / n, half_min_surv_prob)
tmp[zero_prob] <- eps
}
if (length(is_na) > 0) {
res[-is_na] <- tmp
} else {
res <- tmp
}
if (!as_vector) {
res <- tibble::tibble(.prob_censored = unname(res))
}
res
}
# nocov end
|
/R/survival-censoring-model.R
|
permissive
|
tidymodels/parsnip
|
R
| false
| false
| 3,007
|
r
|
# nocov start
# tested in tidymodels/extratests#67
new_reverse_km_fit <-
function(formula,
object,
pkgs = character(0),
label = character(0),
extra_cls = character(0)) {
res <- list(formula = formula, fit = object, label = label, required_pkgs = pkgs)
class(res) <- c(paste0("censoring_model_", label), "censoring_model", extra_cls)
res
}
# ------------------------------------------------------------------------------
# estimate the reverse km curve for censored regression models
reverse_km <- function(obj, eval_env) {
if (obj$mode != "censored regression") {
return(list())
}
rlang::check_installed("prodlim")
# Note: even when fit_xy() is called, eval_env will still have
# objects data and formula in them
f <- eval_env$formula
km_form <- stats::update(f, ~ 1)
cl <-
rlang::call2(
"prodlim",
formula = km_form,
.ns = "prodlim",
reverse = TRUE,
type = "surv",
x = FALSE,
data = rlang::expr(eval_env$data)
)
if (!is.null(eval_env$weights)) {
cl <- rlang::call_modify(cl, caseweights = rlang::expr(eval_env$weights))
}
rkm <- try(rlang::eval_tidy(cl), silent = TRUE)
new_reverse_km_fit(f, object = rkm, label = "reverse_km", pkgs = "prodlim")
}
# ------------------------------------------------------------------------------
# Basic S3 methods
#' @export
print.censoring_model <- function(x, ...) {
cat(x$label, "model for predicting the probability of censoring\n")
invisible(x)
}
#' @export
predict.censoring_model <- function(object, ...) {
rlang::abort(
paste("Don't know how to predict with a censoring model of type:", object$label)
)
invisible(NULL)
}
#' @export
predict.censoring_model_reverse_km <- function(object, new_data = NULL, time, as_vector = FALSE, ...) {
rlang::check_installed("prodlim", version = "2022.10.13")
rlang::check_installed("censored", version = "0.1.1.9002")
res <- rep(NA_real_, length(time))
if (length(time) == 0) {
return(res)
}
# Some time values might be NA (for Graf category 2)
is_na <- which(is.na(time))
if (length(is_na) > 0) {
time <- time[-is_na]
}
if (is.null(new_data)) {
tmp <-
purrr::map_dbl(time, ~ predict(object$fit, times = .x, type = "surv"))
} else {
tmp <-
purrr::map_dbl(time, ~ predict(object$fit, newdata = new_data, times = .x, type = "surv"))
}
zero_prob <- purrr::map_lgl(tmp, ~ !is.na(.x) && .x == 0)
if (any(zero_prob)) {
# Don't want censoring probabilities of zero so add an epsilon
# Either use 1/n or half of the minimum survival probability
n <- max(object$fit$n.risk)
half_min_surv_prob <- min(object$fit$surv[object$fit$surv > 0]) / 2
eps <- min(1 / n, half_min_surv_prob)
tmp[zero_prob] <- eps
}
if (length(is_na) > 0) {
res[-is_na] <- tmp
} else {
res <- tmp
}
if (!as_vector) {
res <- tibble::tibble(.prob_censored = unname(res))
}
res
}
# nocov end
|
###########################################################
# Big Data Analytics #
# Session 8 - Text mining #
# #
# Student survey analysis #
# Urban Outfitters tweets #
# Sentiment analysis across brands #
# #
# Authors: Nicolas Glady and Michael Surlereaux #
###########################################################
###########################################################
# Disclaimer: this script is used to produce the examples #
# presented during the course Big Data Analytics. The #
# authors are not responsible in any way for any problem #
# encountered during this code execution. #
###########################################################
#################################
#### Student survey analysis ####
#################################
# 1 Data extraction
library(openxlsx)
rawSurveydata <- read.xlsx("TQ_BigDataAnalyt.xlsx", sheet = "BigDataAnalyt", startRow = 1, colNames = TRUE)
View(Survey[,c(12:15,26)]) # Show the "open entry" results only
Texttomine<-paste(Survey[,12],Survey[,13],Survey[,14],Survey[,15],Survey[,26],sep=" ") # All the columns are concatenated
# 2 Data Visualization with a Wordcloud
library(wordcloud)
word.cloud <- wordcloud(words=paste(Texttomine,collapse=''),scale=c(8,.2),min.freq=1,
max.words=Inf, random.order=FALSE, rot.per=.15, colors=brewer.pal(8,"Dark2"))
# 3.1 Data transformation - Use of the tm package
library(tm)
MyCorpus <- Corpus(VectorSource(Texttomine))
inspect(MyCorpus) # Contains all the different n data entries (here 34) in a "Corpus" (a structure)
tdm <- TermDocumentMatrix(MyCorpus) # A TermDocumentMatrix is more easy to work with
inspect(tdm) # A matrix of n x number of words (here 34 x number of words) with the number of times the words appears in each entry
findFreqTerms(tdm, lowfreq=10) # Words appearing more than 10 times
# 3.2 Remove irrelavant data (data cleaning)
MyCorpus<-tm_map(MyCorpus,stripWhitespace)
MyCorpus<-tm_map(MyCorpus, removePunctuation)
myStopWords <-c(stopwords('english'),'also') # all the words that are irrelevant
MyCorpus<-tm_map(MyCorpus, removeWords,myStopWords) # Remove irrelevant words like (about, etc.)
tdmclean <- TermDocumentMatrix(MyCorpus)
# 4 Data analysis
findFreqTerms(tdmclean, lowfreq=10) # Words appearing more than 10 times
findAssocs(tdmclean,"data",0.5) # Words correlated (more than 50%) with data
findAssocs(tdmclean,"business",0.7) # Words correlated (more than 70%) with business
# 5 Hierarchical cluster analysis
# 5.1 Remove sparse terms
tdm2 <- removeSparseTerms(tdmclean, sparse=0.8) # Let's trim the unfrequent (sparse) words
# 5.2 Compute the distance matrix
tdm2 <- as.matrix(tdm2)
distMatrix <- dist(scale(tdm2))
# 5.3 Operate the Hierarchical clustering
Survey.cluster <- hclust(distMatrix, method="ward.D")
# 5.4 Dendogram
plot(Survey.cluster, cex=0.9, hang=-1, main="Word Cluster Dendogram")
#################################
#### Urban Outfitters tweets ####
#################################
#load("Session8.Rdata")
# 1. Data identification
# Visit http://www.twitter.com/urbanoutfitters
# 2.1 Data extraction (connection)
library(devtools)
install_github("twitteR", username="geoffjentry") # We use a workaround for the connection
library(twitteR)
api_key = "TO BE REPLACED"
api_secret = "TO BE REPLACED"
access_token = "TO BE REPLACED"
access_token_secret = "TO BE REPLACED"
setup_twitter_oauth(api_key,api_secret,access_token,access_token_secret)
# 2.1 Data extraction (parsing)
# Extract 1000 tweets from the Urban Outfitters timelines
UO.tweets <- userTimeline("@UrbanOutfitters", n=1000)
# Transform tweets into a data frame
tweets.df <- twListToDF(UO.tweets)
#save.image(file="Session8.RData")
# 3. Data pre-treatment
# 3.1. Use of the tm package
#load("Session8.RData")
# 3.1.1. Transformation to make it readable
# convert string to vector of words
dattemp <- unlist(strsplit(tweets.df$text, split=", "))
# remove usernames
datatemp<-gsub("@[[:alnum:]]*","",dattemp)
# to ASCII
datatemp <- iconv(datatemp, "latin1", "ASCII", sub="")
datatemp <- str_replace_all(datatemp,"[^[:graph:]]", " ")
# remove punctuation
datatemp<-gsub("[[:punct:]]", "", datatemp)
# remove htpp
datatemp<-gsub("http[[:alnum:]]*","",datatemp)
# remove numbers
datatemp<-gsub("\\d", "",datatemp)
# remove "stop words"
myStopWords <-c(stopwords('english'))
datatemp<-removeWords(datatemp,myStopWords)
# Strip whitespace
datatemp<-stripWhitespace(datatemp)
# to lowercase
datatemp <-tolower(datatemp)
# 3.2. Use of the snowballC package - stemming of the strings (i.e. truncation to stem)
# Creation of a "dictionnary" from all the words
#myCorpusdict<-rownames(TermDocumentMatrix(Corpus(VectorSource(datatemp))))
datacomplete<-datatemp # Will be used to store the stemmed result
library(SnowballC)
k<-length(datatemp)
for (i in 1:k) {
temp<-strsplit(datatemp[[i]]," ") # Selection of the tweet i
temp<-stemDocument(temp[[1]]) # Stemming of the tweet i
#temp<-stemCompletion(temp,dictionary=myCorpusdict)
datacomplete[[i]]<-paste(temp,collapse=" ") # Form it as a sentence
}
myCorpusComplete<-Corpus(VectorSource(datacomplete))
#View(dattemp)
#View(datacomplete)
# 3.3. Transform the corpus into a Term-Document Matrix
tweets.tm <- TermDocumentMatrix(myCorpusComplete)
#inspect(tweets.tm[1:10,1:10])
#4.0 Wordcloud associated to the UO tweets
word.cloud <- wordcloud(words=paste(datacomplete,collapse=''),scale=c(8,.2),max.words=100, random.order=FALSE, rot.per=.15, colors=brewer.pal(8,"Dark2"))
#4.1.1 Frequent terms analysis
findFreqTerms(tweets.tm, lowfreq=25)
#4.1.2 Association analysis
# Find the correlation around the term "omg"
findAssocs(tweets.tm, "omg", 0.2)
# Find the correlation around the term "love"
findAssocs(tweets.tm, "love", 0.2)
#4.2 Hierarchical cluster analysis preparation
# Remove sparse terms
tweets.tm2 <- removeSparseTerms(tweets.tm, sparse=0.98)
tweets.tm2 <- as.matrix(tweets.tm2)
# Compute the distance matrix
distMatrix <- dist(scale(tweets.tm2))
# Operate the Hierarchical clustering
UO.cluster <- hclust(distMatrix, method="ward.D")
#4.2 Hierarchical cluster analysis output
plot(UO.cluster, cex=0.9, hang=-1, main="Word Cluster Dendogram")
# Cut the tree into 5 clusters
rect.hclust(UO.cluster, k=5)
View(tweets.df[,c(3,5,12,15:16)])
#4.3. Temporal analysis
library(ggplot2)
#4.3.1 Plot the number of tweets created by day
ggplot(tweets.df,aes(x=created))+geom_bar(aes(y = (..count..)))
#4.3.2 Plot the number of retweets per day of the week
#First, label your tweets with hour and weekday number
#label a tweet with the hour
tweets.df$hour=sapply(tweets.df$created, function(x) {p=as.POSIXlt(x);p$hour})
#label a tweet with a number corresponding to the day of the week
tweets.df$wday=sapply(tweets.df$created, function(x) {p=as.POSIXlt(x);p$wday})
ggplot(tweets.df,aes(x=wday))+geom_bar(aes(x=wday, y=retweetCount), stat="identity")
#4.3.3 Plot Day/Hour
ggplot(tweets.df)+geom_jitter(aes(x=wday,y=hour))
##########################################
#### Sentiment Analysis across brands ####
##########################################
# 1. Data identification
# Visit http://www.twitter.com/urbanoutfitters
# Visit http://www.twitter.com/Abercrombie
# Visit http://www.twitter.com/Forever21
# 2.1 Data extraction (connection)
# Load the twitteR, tm, ggplot2, wordcloud and snowballC packages
# 2.1 Data extraction (parsing)
# Extract 1000 tweets from Comcast and Forever 21 timelines
CC.tweets=searchTwitter('@comcast', n=1000)
F21.tweets=searchTwitter('@Forever21', n=1000)
# 2.2 Extract text from lexicons
pos.words = scan('positive-words.txt',what='character', comment.char=';')
neg.words = scan('negative-words.txt',what='character', comment.char=';')
# 3.1.2 Write in function to score sentiment
library(plyr)
score.sentiment = function(sentences, pos.words, neg.words, .progress='none')
{ # function to score the sentiments
require(plyr)
require(stringr)
# we got a vector of sentences. plyr will handle a list
# or a vector as an "l" for us
# we want a simple array ("a") of scores back, so we use
# "l" + "a" + "ply" = "laply":
scores = laply(sentences, function(sentence, pos.words, neg.words) {
# clean up sentences with R's regex-driven global substitute, gsub():
sentence = gsub('[[:punct:]]', '', sentence)
sentence = gsub('[[:cntrl:]]', '', sentence)
sentence = gsub('\\d+', '', sentence)
# and convert to lower case:
sentence = tolower(sentence)
# split into words. str_split is in the stringr package
word.list = str_split(sentence, '\\s+')
# sometimes a list() is one level of hierarchy too much
words = unlist(word.list)
# compare our words to the dictionaries of positive & negative terms
pos.matches = match(words, pos.words)
neg.matches = match(words, neg.words)
# match() returns the position of the matched term or NA
# we just want a TRUE/FALSE:
pos.matches = !is.na(pos.matches)
neg.matches = !is.na(neg.matches)
# and conveniently enough, TRUE/FALSE will be treated as 1/0 by sum():
score = sum(pos.matches) - sum(neg.matches)
return(score)
}, pos.words, neg.words, .progress=.progress )
scores.df = data.frame(score=scores, text=sentences)
return(scores.df)
}
clean.tweets <- function(tweets.df){ # Function to clean the data
twlist<-twListToDF(tweets.df)
dattemp <- unlist(strsplit(twlist$text, split=", "))
# remove usernames
datatemp<-gsub("@[[:alnum:]]*","",dattemp)
# to ASCII
datatemp <- iconv(datatemp, "latin1", "ASCII", sub="")
datatemp <- str_replace_all(datatemp,"[^[:graph:]]", " ")
# remove punctuation
datatemp<-gsub("[[:punct:]]", "", datatemp)
# remove htpp
datatemp<-gsub("http[[:alnum:]]*","",datatemp)
# remove numbers
datatemp<-gsub("\\d", "",datatemp)
# remove unrecognized chars
datatemp<-gsub("�", "",datatemp)
# remove "stop words"
myStopWords <-c(stopwords('english'))
datatemp<-removeWords(datatemp,myStopWords)
# Strip whitespace
datatemp<-stripWhitespace(datatemp)
# to lowercase
datatemp <-tolower(datatemp)
return(datatemp)
}
# 4.1 Score tweets' sentiment
UO.score=score.sentiment(clean.tweets(UO.tweets), pos.words, neg.words, .progress='text')
CC.score=score.sentiment(clean.tweets(CC.tweets), pos.words, neg.words, .progress='text')
F21.score=score.sentiment(clean.tweets(F21.tweets), pos.words, neg.words, .progress='text')
# 4.2 Configure colums for further plotting
UO.score$brand="Urban Outfitters"
UO.score$code="UO"
CC.score$brand="Comcast"
CC.score$code="CC"
F21.score$brand="Forever 21"
F21.score$code="F21"
# 4.3 Bind scores for brands
brands.score=rbind(UO.score, CC.score, F21.score)
# 5 Data visualization
# plot of the score by brand
g = ggplot(data=brands.score, mapping=aes(x=score, fill=brand) )
g = g + geom_bar(binwidth=1) # Do a histogram
g = g + facet_grid(brand~.) # Have a different plot for each brand
g = g + theme_bw() + scale_fill_brewer() # Define the colors (blue in a b&w theme)
g
#########################
#### GeoLocalisation ####
#########################
library(maps)
# 1 Data preparation
france<-map(database="france")
departements <- match.map(france,france$names) # Select only the departments index
radius<-'40km' # gross estimate of the average radius of a departement
#create a string of the lat, long, and radius for entry into searchTwitter()
for(i in 1:length(departements)){
france$search.twitter.entry[departements[i]]<-toString(c(france$y[departements[i]],france$x[departements[i]],radius))
}
# take out spaces in the string
france$search.twitter.entry<-gsub(" ","", france$search.twitter.entry ,fixed=TRUE)
# 2 Data extraction
#Search twitter at each location, check how many tweets and put into dataframe
searchtext<-'@fhollande'
maxtweets<-200
for(i in 1:length(departements)){
if(!is.na(france$y[departements[i]])) {
france$number.of.tweets[departements[i]]<-
length(searchTwitter(searchString=searchtext,n=maxtweets,geocode=france$search.twitter.entry[departements[i]]))
}}
france$number.of.tweets[is.na(france$number.of.tweets)]<-0
# 3 Data visualization
gray.colors<-function(n) gray(rev(0:(n-1)/1.5)/n)
colors<-gray.colors(maxtweets)[france$number.of.tweets[departements]]
#making the map of France
map(database="france", fill=TRUE,col=colors,resolution=0)
save.image(file="Session8.Rdata")
|
/BDA_8_RSCRIPT.R
|
no_license
|
kkc-krish/BDA-1
|
R
| false
| false
| 12,661
|
r
|
###########################################################
# Big Data Analytics #
# Session 8 - Text mining #
# #
# Student survey analysis #
# Urban Outfitters tweets #
# Sentiment analysis across brands #
# #
# Authors: Nicolas Glady and Michael Surlereaux #
###########################################################
###########################################################
# Disclaimer: this script is used to produce the examples #
# presented during the course Big Data Analytics. The #
# authors are not responsible in any way for any problem #
# encountered during this code execution. #
###########################################################
#################################
#### Student survey analysis ####
#################################
# 1 Data extraction
library(openxlsx)
rawSurveydata <- read.xlsx("TQ_BigDataAnalyt.xlsx", sheet = "BigDataAnalyt", startRow = 1, colNames = TRUE)
View(Survey[,c(12:15,26)]) # Show the "open entry" results only
Texttomine<-paste(Survey[,12],Survey[,13],Survey[,14],Survey[,15],Survey[,26],sep=" ") # All the columns are concatenated
# 2 Data Visualization with a Wordcloud
library(wordcloud)
word.cloud <- wordcloud(words=paste(Texttomine,collapse=''),scale=c(8,.2),min.freq=1,
max.words=Inf, random.order=FALSE, rot.per=.15, colors=brewer.pal(8,"Dark2"))
# 3.1 Data transformation - Use of the tm package
library(tm)
MyCorpus <- Corpus(VectorSource(Texttomine))
inspect(MyCorpus) # Contains all the different n data entries (here 34) in a "Corpus" (a structure)
tdm <- TermDocumentMatrix(MyCorpus) # A TermDocumentMatrix is more easy to work with
inspect(tdm) # A matrix of n x number of words (here 34 x number of words) with the number of times the words appears in each entry
findFreqTerms(tdm, lowfreq=10) # Words appearing more than 10 times
# 3.2 Remove irrelavant data (data cleaning)
MyCorpus<-tm_map(MyCorpus,stripWhitespace)
MyCorpus<-tm_map(MyCorpus, removePunctuation)
myStopWords <-c(stopwords('english'),'also') # all the words that are irrelevant
MyCorpus<-tm_map(MyCorpus, removeWords,myStopWords) # Remove irrelevant words like (about, etc.)
tdmclean <- TermDocumentMatrix(MyCorpus)
# 4 Data analysis
findFreqTerms(tdmclean, lowfreq=10) # Words appearing more than 10 times
findAssocs(tdmclean,"data",0.5) # Words correlated (more than 50%) with data
findAssocs(tdmclean,"business",0.7) # Words correlated (more than 70%) with business
# 5 Hierarchical cluster analysis
# 5.1 Remove sparse terms
tdm2 <- removeSparseTerms(tdmclean, sparse=0.8) # Let's trim the unfrequent (sparse) words
# 5.2 Compute the distance matrix
tdm2 <- as.matrix(tdm2)
distMatrix <- dist(scale(tdm2))
# 5.3 Operate the Hierarchical clustering
Survey.cluster <- hclust(distMatrix, method="ward.D")
# 5.4 Dendogram
plot(Survey.cluster, cex=0.9, hang=-1, main="Word Cluster Dendogram")
#################################
#### Urban Outfitters tweets ####
#################################
#load("Session8.Rdata")
# 1. Data identification
# Visit http://www.twitter.com/urbanoutfitters
# 2.1 Data extraction (connection)
library(devtools)
install_github("twitteR", username="geoffjentry") # We use a workaround for the connection
library(twitteR)
api_key = "TO BE REPLACED"
api_secret = "TO BE REPLACED"
access_token = "TO BE REPLACED"
access_token_secret = "TO BE REPLACED"
setup_twitter_oauth(api_key,api_secret,access_token,access_token_secret)
# 2.1 Data extraction (parsing)
# Extract 1000 tweets from the Urban Outfitters timelines
UO.tweets <- userTimeline("@UrbanOutfitters", n=1000)
# Transform tweets into a data frame
tweets.df <- twListToDF(UO.tweets)
#save.image(file="Session8.RData")
# 3. Data pre-treatment
# 3.1. Use of the tm package
#load("Session8.RData")
# 3.1.1. Transformation to make it readable
# convert string to vector of words
dattemp <- unlist(strsplit(tweets.df$text, split=", "))
# remove usernames
datatemp<-gsub("@[[:alnum:]]*","",dattemp)
# to ASCII
datatemp <- iconv(datatemp, "latin1", "ASCII", sub="")
datatemp <- str_replace_all(datatemp,"[^[:graph:]]", " ")
# remove punctuation
datatemp<-gsub("[[:punct:]]", "", datatemp)
# remove htpp
datatemp<-gsub("http[[:alnum:]]*","",datatemp)
# remove numbers
datatemp<-gsub("\\d", "",datatemp)
# remove "stop words"
myStopWords <-c(stopwords('english'))
datatemp<-removeWords(datatemp,myStopWords)
# Strip whitespace
datatemp<-stripWhitespace(datatemp)
# to lowercase
datatemp <-tolower(datatemp)
# 3.2. Use of the snowballC package - stemming of the strings (i.e. truncation to stem)
# Creation of a "dictionnary" from all the words
#myCorpusdict<-rownames(TermDocumentMatrix(Corpus(VectorSource(datatemp))))
datacomplete<-datatemp # Will be used to store the stemmed result
library(SnowballC)
k<-length(datatemp)
for (i in 1:k) {
temp<-strsplit(datatemp[[i]]," ") # Selection of the tweet i
temp<-stemDocument(temp[[1]]) # Stemming of the tweet i
#temp<-stemCompletion(temp,dictionary=myCorpusdict)
datacomplete[[i]]<-paste(temp,collapse=" ") # Form it as a sentence
}
myCorpusComplete<-Corpus(VectorSource(datacomplete))
#View(dattemp)
#View(datacomplete)
# 3.3. Transform the corpus into a Term-Document Matrix
tweets.tm <- TermDocumentMatrix(myCorpusComplete)
#inspect(tweets.tm[1:10,1:10])
#4.0 Wordcloud associated to the UO tweets
word.cloud <- wordcloud(words=paste(datacomplete,collapse=''),scale=c(8,.2),max.words=100, random.order=FALSE, rot.per=.15, colors=brewer.pal(8,"Dark2"))
#4.1.1 Frequent terms analysis
findFreqTerms(tweets.tm, lowfreq=25)
#4.1.2 Association analysis
# Find the correlation around the term "omg"
findAssocs(tweets.tm, "omg", 0.2)
# Find the correlation around the term "love"
findAssocs(tweets.tm, "love", 0.2)
#4.2 Hierarchical cluster analysis preparation
# Remove sparse terms
tweets.tm2 <- removeSparseTerms(tweets.tm, sparse=0.98)
tweets.tm2 <- as.matrix(tweets.tm2)
# Compute the distance matrix
distMatrix <- dist(scale(tweets.tm2))
# Operate the Hierarchical clustering
UO.cluster <- hclust(distMatrix, method="ward.D")
#4.2 Hierarchical cluster analysis output
plot(UO.cluster, cex=0.9, hang=-1, main="Word Cluster Dendogram")
# Cut the tree into 5 clusters
rect.hclust(UO.cluster, k=5)
View(tweets.df[,c(3,5,12,15:16)])
#4.3. Temporal analysis
library(ggplot2)
#4.3.1 Plot the number of tweets created by day
ggplot(tweets.df,aes(x=created))+geom_bar(aes(y = (..count..)))
#4.3.2 Plot the number of retweets per day of the week
#First, label your tweets with hour and weekday number
#label a tweet with the hour
tweets.df$hour=sapply(tweets.df$created, function(x) {p=as.POSIXlt(x);p$hour})
#label a tweet with a number corresponding to the day of the week
tweets.df$wday=sapply(tweets.df$created, function(x) {p=as.POSIXlt(x);p$wday})
ggplot(tweets.df,aes(x=wday))+geom_bar(aes(x=wday, y=retweetCount), stat="identity")
#4.3.3 Plot Day/Hour
ggplot(tweets.df)+geom_jitter(aes(x=wday,y=hour))
##########################################
#### Sentiment Analysis across brands ####
##########################################
# 1. Data identification
# Visit http://www.twitter.com/urbanoutfitters
# Visit http://www.twitter.com/Abercrombie
# Visit http://www.twitter.com/Forever21
# 2.1 Data extraction (connection)
# Load the twitteR, tm, ggplot2, wordcloud and snowballC packages
# 2.1 Data extraction (parsing)
# Extract 1000 tweets from Comcast and Forever 21 timelines
CC.tweets=searchTwitter('@comcast', n=1000)
F21.tweets=searchTwitter('@Forever21', n=1000)
# 2.2 Extract text from lexicons
pos.words = scan('positive-words.txt',what='character', comment.char=';')
neg.words = scan('negative-words.txt',what='character', comment.char=';')
# 3.1.2 Write in function to score sentiment
library(plyr)
score.sentiment = function(sentences, pos.words, neg.words, .progress='none')
{ # function to score the sentiments
require(plyr)
require(stringr)
# we got a vector of sentences. plyr will handle a list
# or a vector as an "l" for us
# we want a simple array ("a") of scores back, so we use
# "l" + "a" + "ply" = "laply":
scores = laply(sentences, function(sentence, pos.words, neg.words) {
# clean up sentences with R's regex-driven global substitute, gsub():
sentence = gsub('[[:punct:]]', '', sentence)
sentence = gsub('[[:cntrl:]]', '', sentence)
sentence = gsub('\\d+', '', sentence)
# and convert to lower case:
sentence = tolower(sentence)
# split into words. str_split is in the stringr package
word.list = str_split(sentence, '\\s+')
# sometimes a list() is one level of hierarchy too much
words = unlist(word.list)
# compare our words to the dictionaries of positive & negative terms
pos.matches = match(words, pos.words)
neg.matches = match(words, neg.words)
# match() returns the position of the matched term or NA
# we just want a TRUE/FALSE:
pos.matches = !is.na(pos.matches)
neg.matches = !is.na(neg.matches)
# and conveniently enough, TRUE/FALSE will be treated as 1/0 by sum():
score = sum(pos.matches) - sum(neg.matches)
return(score)
}, pos.words, neg.words, .progress=.progress )
scores.df = data.frame(score=scores, text=sentences)
return(scores.df)
}
clean.tweets <- function(tweets.df){ # Function to clean the data
twlist<-twListToDF(tweets.df)
dattemp <- unlist(strsplit(twlist$text, split=", "))
# remove usernames
datatemp<-gsub("@[[:alnum:]]*","",dattemp)
# to ASCII
datatemp <- iconv(datatemp, "latin1", "ASCII", sub="")
datatemp <- str_replace_all(datatemp,"[^[:graph:]]", " ")
# remove punctuation
datatemp<-gsub("[[:punct:]]", "", datatemp)
# remove htpp
datatemp<-gsub("http[[:alnum:]]*","",datatemp)
# remove numbers
datatemp<-gsub("\\d", "",datatemp)
# remove unrecognized chars
datatemp<-gsub("�", "",datatemp)
# remove "stop words"
myStopWords <-c(stopwords('english'))
datatemp<-removeWords(datatemp,myStopWords)
# Strip whitespace
datatemp<-stripWhitespace(datatemp)
# to lowercase
datatemp <-tolower(datatemp)
return(datatemp)
}
# 4.1 Score tweets' sentiment
UO.score=score.sentiment(clean.tweets(UO.tweets), pos.words, neg.words, .progress='text')
CC.score=score.sentiment(clean.tweets(CC.tweets), pos.words, neg.words, .progress='text')
F21.score=score.sentiment(clean.tweets(F21.tweets), pos.words, neg.words, .progress='text')
# 4.2 Configure colums for further plotting
UO.score$brand="Urban Outfitters"
UO.score$code="UO"
CC.score$brand="Comcast"
CC.score$code="CC"
F21.score$brand="Forever 21"
F21.score$code="F21"
# 4.3 Bind scores for brands
brands.score=rbind(UO.score, CC.score, F21.score)
# 5 Data visualization
# plot of the score by brand
g = ggplot(data=brands.score, mapping=aes(x=score, fill=brand) )
g = g + geom_bar(binwidth=1) # Do a histogram
g = g + facet_grid(brand~.) # Have a different plot for each brand
g = g + theme_bw() + scale_fill_brewer() # Define the colors (blue in a b&w theme)
g
#########################
#### GeoLocalisation ####
#########################
library(maps)
# 1 Data preparation
france<-map(database="france")
departements <- match.map(france,france$names) # Select only the departments index
radius<-'40km' # gross estimate of the average radius of a departement
#create a string of the lat, long, and radius for entry into searchTwitter()
for(i in 1:length(departements)){
france$search.twitter.entry[departements[i]]<-toString(c(france$y[departements[i]],france$x[departements[i]],radius))
}
# take out spaces in the string
france$search.twitter.entry<-gsub(" ","", france$search.twitter.entry ,fixed=TRUE)
# 2 Data extraction
#Search twitter at each location, check how many tweets and put into dataframe
searchtext<-'@fhollande'
maxtweets<-200
for(i in 1:length(departements)){
if(!is.na(france$y[departements[i]])) {
france$number.of.tweets[departements[i]]<-
length(searchTwitter(searchString=searchtext,n=maxtweets,geocode=france$search.twitter.entry[departements[i]]))
}}
france$number.of.tweets[is.na(france$number.of.tweets)]<-0
# 3 Data visualization
gray.colors<-function(n) gray(rev(0:(n-1)/1.5)/n)
colors<-gray.colors(maxtweets)[france$number.of.tweets[departements]]
#making the map of France
map(database="france", fill=TRUE,col=colors,resolution=0)
save.image(file="Session8.Rdata")
|
#' @export
coxKernelnet <- function(x, y, t, nfolds, stdbeta, alpha)
{
# Correlation matrix
rbf <- as.matrix(getLaplacian(x, "RBF"))
# cross validation
foldid<-coxsplit(y, nfolds)
fit <- Coxnet(x, y, Omega = rbf, penalty="Net", alpha=alpha, foldid=foldid, isd=stdbeta)
beta <- fit$Beta
dt <- data.frame(cbind(y, x))
dt <- dt[order(dt[,1]),]
z <- dt[,3:ncol(dt)]
event <- subset(dt, dt$status==1 & dt$time <= t)
unique_time <- unique(event[,1])
R <- sapply(unique_time, function(t){
which(dt[,1]>=t)})
h0 <- sum(sapply(1:length(unique_time), function(i){
temp <- z[R[[i]],]
1/sum((exp(as.matrix(temp) %*% as.matrix(beta))))
}))
s0 <- exp(-h0)
# Fetch the evaluation metrics
eval_metrics <- eval(x, y, t, beta, h0, s0, foldid, nfolds)
# Append the evaluation metrics to the Coxnet object and return
return(c(fit, s0=s0, unlist(eval_metrics)))
}
|
/RegCox/R/cox-KernelNet.R
|
no_license
|
aastha3/RegCox
|
R
| false
| false
| 861
|
r
|
#' @export
coxKernelnet <- function(x, y, t, nfolds, stdbeta, alpha)
{
# Correlation matrix
rbf <- as.matrix(getLaplacian(x, "RBF"))
# cross validation
foldid<-coxsplit(y, nfolds)
fit <- Coxnet(x, y, Omega = rbf, penalty="Net", alpha=alpha, foldid=foldid, isd=stdbeta)
beta <- fit$Beta
dt <- data.frame(cbind(y, x))
dt <- dt[order(dt[,1]),]
z <- dt[,3:ncol(dt)]
event <- subset(dt, dt$status==1 & dt$time <= t)
unique_time <- unique(event[,1])
R <- sapply(unique_time, function(t){
which(dt[,1]>=t)})
h0 <- sum(sapply(1:length(unique_time), function(i){
temp <- z[R[[i]],]
1/sum((exp(as.matrix(temp) %*% as.matrix(beta))))
}))
s0 <- exp(-h0)
# Fetch the evaluation metrics
eval_metrics <- eval(x, y, t, beta, h0, s0, foldid, nfolds)
# Append the evaluation metrics to the Coxnet object and return
return(c(fit, s0=s0, unlist(eval_metrics)))
}
|
# Clip rasters by polygon
clip.by.polygon <- function(raster, # Raster object
shape # Polygon object
) {
a1_crop<-crop(raster, shape)
step1<-rasterize(shape, a1_crop)
a1_crop*step1
}
|
/F_clip_raster_by_polygon.R
|
no_license
|
tufui57/SAI
|
R
| false
| false
| 263
|
r
|
# Clip rasters by polygon
clip.by.polygon <- function(raster, # Raster object
shape # Polygon object
) {
a1_crop<-crop(raster, shape)
step1<-rasterize(shape, a1_crop)
a1_crop*step1
}
|
# simple case density
# q_density = function(x_vec){
# if (x_vec[1] <= 10 & x_vec[1] >= -10 & x_vec[2]>= -10 & x_vec[2]<= 10) {
# tmpphix = (1/3)*exp((-1/2)*sum((x_vec+5)^2)) + (2/3)*exp((-1/2)*sum((x_vec-5)^2))
# return(tmpphix)
# }else if(x_vec[1] <= 11 & x_vec[1] >= 10 & x_vec[2]>= -10 & x_vec[2]<= 10){
# tmpphix = 0.05
# return(tmpphix)
# }else{
# return(0)
# }
# }
# only
# q_density = function(x_vec){
# tmpphix = (1/3)*exp((-1/2)*sum((x_vec+5)^2)) + (2/3)*exp((-1/2)*sum((x_vec-5)^2))
# return(tmpphix)
# }
# only multidimensional
# q_density = function(x_vec){
# tmpphix = exp((-1/2)*sum((x_vec+5)^2))
# return(tmpphix)
# }
# multimode
# q_density = function(x_vec){
# tmpphix = (1/15)*exp((-1/2)*sum((x_vec+1)^2)) + (2/15)*exp((-1/2)*sum((x_vec-2)^2))
# +(3/15)*exp((-1/2)*sum((x_vec+3)^2))+(4/15)*exp((-1/2)*sum((x_vec-4)^2))+(5/15)*exp((-1/2)*sum((x_vec+5)^2))
# return(tmpphix)
# }
# new version that mode is separated
q_density = function(x_vec){
tmpphix = (1/15)*exp((-1/2)*sum((x_vec+11)^2)) + (2/15)*exp((-1/2)*sum((x_vec-12)^2)) + (3/15)*exp((-1/2)*sum((x_vec+8)^2))+(4/15)*exp((-1/2)*sum((x_vec-7)^2))+(5/15)*exp((-1/2)*sum((x_vec+2)^2))
return(tmpphix)
}
q_density_est = function(x_vec,log = TRUE){
result1 = rep(0,dim(x_vec)[1])
for (i in (1:dim(x_vec)[1])) {
x_vec1 = x_vec[i, ]
tmpphix = (1/15)*exp((-1/2)*sum((x_vec1+11)^2)) + (2/15)*exp((-1/2)*sum((x_vec1-12)^2)) + (3/15)*exp((-1/2)*sum((x_vec1+8)^2))+(4/15)*exp((-1/2)*sum((x_vec1-7)^2))+(5/15)*exp((-1/2)*sum((x_vec1+2)^2))
result1[i] = tmpphix
}
return(log(result1))
}
### ignore
## compare with MCMC with phi_mix as proposal. -proposal_density(y|x)
# Stand_MCMC = function(Num_sample, proposal_density, proposal_sample_fun, w_star = NULL,Dim_w,dens){
# w_cur <- w_star
# sample_get <- matrix(0,nrow = Num_sample, ncol = Dim_w)
# for (i in 1:Num_sample) {
# w_next <- proposal_sample_fun(w_cur,dens)
# t_num = proposal_density(w_cur,dens)
# t_den = proposal_density(w_next,dens)
# qden_num = q_density(w_next)
# qden_den = q_density(w_cur)
# tmp_accept = (t_num*qden_num)/(t_den*qden_den)
# p_accept = min(tmp_accept,1)
# tmp_compare = runif(1,min = 0,max = 1)
# if (tmp_compare <= p_accept) {
# w_cur = w_next
# }
# sample_get[i, ] <- w_cur
# }
# return(sample_get)
# }
# proposal density function -- phi_mix
proposal_density = function(w,dens){
tmpw <-as.vector(w)
tmpw <- matrix(tmpw,nrow = 1)
print('w')
print(w)
d2 = predict(dens, tmpw, what = c("dens"), logarithm = FALSE)
phi_mix = d2[1]
return(phi_mix)
}
# proposal sample function -- phi_mix
proposal_sample_fun = function(w = NULL,dens){
component_num = dens$G
pi_vec = dens$parameters$pro
# mean_vec = dens$parameters$mean[variable_num,]
# sigma_vec = c()
# for (i in 1:component_num) {
# tmpsigma = dens$parameters$variance$sigma[,,i][variable_num,variable_num]
# sigma_vec = c(sigma_vec,tmpsigma)
# }
tmpsample1 = sample(x =1:component_num ,size = 1,replace = TRUE,prob = pi_vec)
mean_cur = dens$parameters$mean[,tmpsample1]
Sigma_cur = dens$parameters$variance
Sigma_cur = Sigma_cur$sigma[,,tmpsample1]
tmpsample2 = mvrnorm(n = 1,mean_cur,Sigma_cur)
return(tmpsample2)
}
|
/functions_used/simple_case_density.R
|
no_license
|
feiding333/Bayesian-codes-of-the-exoplanet.
|
R
| false
| false
| 3,295
|
r
|
# simple case density
# q_density = function(x_vec){
# if (x_vec[1] <= 10 & x_vec[1] >= -10 & x_vec[2]>= -10 & x_vec[2]<= 10) {
# tmpphix = (1/3)*exp((-1/2)*sum((x_vec+5)^2)) + (2/3)*exp((-1/2)*sum((x_vec-5)^2))
# return(tmpphix)
# }else if(x_vec[1] <= 11 & x_vec[1] >= 10 & x_vec[2]>= -10 & x_vec[2]<= 10){
# tmpphix = 0.05
# return(tmpphix)
# }else{
# return(0)
# }
# }
# only
# q_density = function(x_vec){
# tmpphix = (1/3)*exp((-1/2)*sum((x_vec+5)^2)) + (2/3)*exp((-1/2)*sum((x_vec-5)^2))
# return(tmpphix)
# }
# only multidimensional
# q_density = function(x_vec){
# tmpphix = exp((-1/2)*sum((x_vec+5)^2))
# return(tmpphix)
# }
# multimode
# q_density = function(x_vec){
# tmpphix = (1/15)*exp((-1/2)*sum((x_vec+1)^2)) + (2/15)*exp((-1/2)*sum((x_vec-2)^2))
# +(3/15)*exp((-1/2)*sum((x_vec+3)^2))+(4/15)*exp((-1/2)*sum((x_vec-4)^2))+(5/15)*exp((-1/2)*sum((x_vec+5)^2))
# return(tmpphix)
# }
# new version that mode is separated
q_density = function(x_vec){
tmpphix = (1/15)*exp((-1/2)*sum((x_vec+11)^2)) + (2/15)*exp((-1/2)*sum((x_vec-12)^2)) + (3/15)*exp((-1/2)*sum((x_vec+8)^2))+(4/15)*exp((-1/2)*sum((x_vec-7)^2))+(5/15)*exp((-1/2)*sum((x_vec+2)^2))
return(tmpphix)
}
q_density_est = function(x_vec,log = TRUE){
result1 = rep(0,dim(x_vec)[1])
for (i in (1:dim(x_vec)[1])) {
x_vec1 = x_vec[i, ]
tmpphix = (1/15)*exp((-1/2)*sum((x_vec1+11)^2)) + (2/15)*exp((-1/2)*sum((x_vec1-12)^2)) + (3/15)*exp((-1/2)*sum((x_vec1+8)^2))+(4/15)*exp((-1/2)*sum((x_vec1-7)^2))+(5/15)*exp((-1/2)*sum((x_vec1+2)^2))
result1[i] = tmpphix
}
return(log(result1))
}
### ignore
## compare with MCMC with phi_mix as proposal. -proposal_density(y|x)
# Stand_MCMC = function(Num_sample, proposal_density, proposal_sample_fun, w_star = NULL,Dim_w,dens){
# w_cur <- w_star
# sample_get <- matrix(0,nrow = Num_sample, ncol = Dim_w)
# for (i in 1:Num_sample) {
# w_next <- proposal_sample_fun(w_cur,dens)
# t_num = proposal_density(w_cur,dens)
# t_den = proposal_density(w_next,dens)
# qden_num = q_density(w_next)
# qden_den = q_density(w_cur)
# tmp_accept = (t_num*qden_num)/(t_den*qden_den)
# p_accept = min(tmp_accept,1)
# tmp_compare = runif(1,min = 0,max = 1)
# if (tmp_compare <= p_accept) {
# w_cur = w_next
# }
# sample_get[i, ] <- w_cur
# }
# return(sample_get)
# }
# proposal density function -- phi_mix
proposal_density = function(w,dens){
tmpw <-as.vector(w)
tmpw <- matrix(tmpw,nrow = 1)
print('w')
print(w)
d2 = predict(dens, tmpw, what = c("dens"), logarithm = FALSE)
phi_mix = d2[1]
return(phi_mix)
}
# proposal sample function -- phi_mix
proposal_sample_fun = function(w = NULL,dens){
component_num = dens$G
pi_vec = dens$parameters$pro
# mean_vec = dens$parameters$mean[variable_num,]
# sigma_vec = c()
# for (i in 1:component_num) {
# tmpsigma = dens$parameters$variance$sigma[,,i][variable_num,variable_num]
# sigma_vec = c(sigma_vec,tmpsigma)
# }
tmpsample1 = sample(x =1:component_num ,size = 1,replace = TRUE,prob = pi_vec)
mean_cur = dens$parameters$mean[,tmpsample1]
Sigma_cur = dens$parameters$variance
Sigma_cur = Sigma_cur$sigma[,,tmpsample1]
tmpsample2 = mvrnorm(n = 1,mean_cur,Sigma_cur)
return(tmpsample2)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generated_client.R
\name{storage_hosts_post}
\alias{storage_hosts_post}
\title{Create a new storage host}
\usage{
storage_hosts_post(provider, bucket, name, s3_options = NULL)
}
\arguments{
\item{provider}{string required. The storage provider.One of: s3.}
\item{bucket}{string required. The bucket for this storage host.}
\item{name}{string required. The human readable name for the storage host.}
\item{s3_options}{list optional. A list containing the following elements:
\itemize{
\item region string, The region for this storage host (ex. "us-east-1")
}}
}
\value{
A list containing the following elements:
\item{id}{integer, The ID of the storage host.}
\item{name}{string, The human readable name for the storage host.}
\item{provider}{string, The storage provider.One of: s3.}
\item{bucket}{string, The bucket for this storage host.}
\item{s3Options}{list, A list containing the following elements:
\itemize{
\item region string, The region for this storage host (ex. "us-east-1")
}}
}
\description{
Create a new storage host
}
|
/man/storage_hosts_post.Rd
|
no_license
|
elsander/civis-r
|
R
| false
| true
| 1,118
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generated_client.R
\name{storage_hosts_post}
\alias{storage_hosts_post}
\title{Create a new storage host}
\usage{
storage_hosts_post(provider, bucket, name, s3_options = NULL)
}
\arguments{
\item{provider}{string required. The storage provider.One of: s3.}
\item{bucket}{string required. The bucket for this storage host.}
\item{name}{string required. The human readable name for the storage host.}
\item{s3_options}{list optional. A list containing the following elements:
\itemize{
\item region string, The region for this storage host (ex. "us-east-1")
}}
}
\value{
A list containing the following elements:
\item{id}{integer, The ID of the storage host.}
\item{name}{string, The human readable name for the storage host.}
\item{provider}{string, The storage provider.One of: s3.}
\item{bucket}{string, The bucket for this storage host.}
\item{s3Options}{list, A list containing the following elements:
\itemize{
\item region string, The region for this storage host (ex. "us-east-1")
}}
}
\description{
Create a new storage host
}
|
best <- function(state, outcome) {
## Read outcome data
outcome_data <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
## Check that state and outcome are valid
list_state <- unique(outcome_data$State)
if (!state %in% list_state) stop("invalid state")
list_outcome <- c('heart attack', 'heart failure', 'pneumonia')
if (!outcome %in% list_outcome) stop("invalid outcome")
## Return hospital name in that state with lowest 30-day death
## rate
heart_attack_outcome_by_state <- subset(outcome_data, outcome_data$State == outcome)
retval <- subset(heart_attack_outcome_by_state[2], heart_attack_outcome_by_state[,11] == min(heart_attack_outcome_by_state[,11], na.rm=TRUE))
}
|
/best.R
|
no_license
|
rnugraha/rprog-data-ProgAssignment3-data
|
R
| false
| false
| 723
|
r
|
best <- function(state, outcome) {
## Read outcome data
outcome_data <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
## Check that state and outcome are valid
list_state <- unique(outcome_data$State)
if (!state %in% list_state) stop("invalid state")
list_outcome <- c('heart attack', 'heart failure', 'pneumonia')
if (!outcome %in% list_outcome) stop("invalid outcome")
## Return hospital name in that state with lowest 30-day death
## rate
heart_attack_outcome_by_state <- subset(outcome_data, outcome_data$State == outcome)
retval <- subset(heart_attack_outcome_by_state[2], heart_attack_outcome_by_state[,11] == min(heart_attack_outcome_by_state[,11], na.rm=TRUE))
}
|
#!/usr/bin/Rscript
library('ProjectTemplate')
try(load.project())
logit.fit <- glm(Installed ~ LogDependencyCount +
LogSuggestionCount +
LogImportCount +
LogViewsIncluding +
LogPackagesMaintaining +
CorePackage +
RecommendedPackage,
data = training.data,
family = binomial(link = 'logit'))
summary(logit.fit)
|
/example_model.R
|
no_license
|
m4xl1n/r_recommendation_system
|
R
| false
| false
| 520
|
r
|
#!/usr/bin/Rscript
library('ProjectTemplate')
try(load.project())
logit.fit <- glm(Installed ~ LogDependencyCount +
LogSuggestionCount +
LogImportCount +
LogViewsIncluding +
LogPackagesMaintaining +
CorePackage +
RecommendedPackage,
data = training.data,
family = binomial(link = 'logit'))
summary(logit.fit)
|
# Developing Data Products, by Coursera
# Minna Asplund, 2018
#
library(shiny)
shinyServer(function(input, output) {
answer <- reactive({
a <- input$frstNmbr
b <- input$scndNmbr
while (a != b)
{
if (a > b)
{
a <- a - b
}
else
{
b <- b - a
}
}
return(a)
})
output$gcd_answer <- renderText({
paste("The calculated GCD is ", answer())
})
})
|
/server.R
|
no_license
|
Tiitseri/DDP
|
R
| false
| false
| 477
|
r
|
# Developing Data Products, by Coursera
# Minna Asplund, 2018
#
library(shiny)
shinyServer(function(input, output) {
answer <- reactive({
a <- input$frstNmbr
b <- input$scndNmbr
while (a != b)
{
if (a > b)
{
a <- a - b
}
else
{
b <- b - a
}
}
return(a)
})
output$gcd_answer <- renderText({
paste("The calculated GCD is ", answer())
})
})
|
# Software Carpentry Workshop
# University of Chicago
# 2016-09-16
# First version of function to calculate a summary statistic, which is the mean
# of the columns (cols) in df.
calc_sum_stat <- function(df, cols) {
df_sub <- df[, cols]
sum_stat <- apply(df_sub, 1, mean)
return(sum_stat)
}
|
/code/calc_sum_stat_v01.R
|
permissive
|
jdblischak/2016-09-15-chicago
|
R
| false
| false
| 298
|
r
|
# Software Carpentry Workshop
# University of Chicago
# 2016-09-16
# First version of function to calculate a summary statistic, which is the mean
# of the columns (cols) in df.
calc_sum_stat <- function(df, cols) {
df_sub <- df[, cols]
sum_stat <- apply(df_sub, 1, mean)
return(sum_stat)
}
|
outcomeData <- NULL
states <- NULL
outcomes <- c("heart attack", "heart failure", "pneumonia")
outcomeNumber <- c(11, 17, 23)
names(outcomeNumber) <- outcomes
best <- function(state, outcome) {
## Read outcome data
if (is.null(outcomeData)){
outcomeData <<- read.csv("outcome-of-care-measures.csv", colClasses = "character")
states <<- unique(outcomeData$State)
# convert the outcomes columns to numbers
for(i in outcomeNumber) {
outcomeData[, i] <<- as.numeric(outcomeData[, i])
}
}
## Check that state and outcome are valid
if(!state %in% states) {
stop("invalid state")
}
if(!outcome %in% outcomes) {
stop("invalid outcome")
}
# keep only hospitals (rows) which match state, keep
# the hospital name column (2 in the original data)
# and outcomes (columns) that match outcome
stateData <- outcomeData[outcomeData$State ==
state,c(2, outcomeNumber[[outcome]])]
# now sort that data in ascending order of outcome, breaking
# ties on the ascending alphabetical order of the hospital name
stateData <- stateData[order(stateData[ ,2], stateData[ ,1]),]
## Return hospital name in that state with lowest 30-day death
## rate
stateData[1,1]
}
|
/best.R
|
no_license
|
Goatflakes/r-prog-ass3
|
R
| false
| false
| 1,251
|
r
|
outcomeData <- NULL
states <- NULL
outcomes <- c("heart attack", "heart failure", "pneumonia")
outcomeNumber <- c(11, 17, 23)
names(outcomeNumber) <- outcomes
best <- function(state, outcome) {
## Read outcome data
if (is.null(outcomeData)){
outcomeData <<- read.csv("outcome-of-care-measures.csv", colClasses = "character")
states <<- unique(outcomeData$State)
# convert the outcomes columns to numbers
for(i in outcomeNumber) {
outcomeData[, i] <<- as.numeric(outcomeData[, i])
}
}
## Check that state and outcome are valid
if(!state %in% states) {
stop("invalid state")
}
if(!outcome %in% outcomes) {
stop("invalid outcome")
}
# keep only hospitals (rows) which match state, keep
# the hospital name column (2 in the original data)
# and outcomes (columns) that match outcome
stateData <- outcomeData[outcomeData$State ==
state,c(2, outcomeNumber[[outcome]])]
# now sort that data in ascending order of outcome, breaking
# ties on the ascending alphabetical order of the hospital name
stateData <- stateData[order(stateData[ ,2], stateData[ ,1]),]
## Return hospital name in that state with lowest 30-day death
## rate
stateData[1,1]
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/additive.R
\name{additive}
\alias{additive}
\title{adiitive}
\usage{
additive(geno, n, samp, p, pi)
}
\description{
additive
}
|
/man/additive.Rd
|
no_license
|
jyc7385/infolab7
|
R
| false
| true
| 205
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/additive.R
\name{additive}
\alias{additive}
\title{adiitive}
\usage{
additive(geno, n, samp, p, pi)
}
\description{
additive
}
|
library(Devore7)
### Name: ex13.02
### Title: R Data set: ex13.02
### Aliases: ex13.02
### Keywords: datasets
### ** Examples
data(ex13.02)
str(ex13.02)
|
/data/genthat_extracted_code/Devore7/examples/ex13.02.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 160
|
r
|
library(Devore7)
### Name: ex13.02
### Title: R Data set: ex13.02
### Aliases: ex13.02
### Keywords: datasets
### ** Examples
data(ex13.02)
str(ex13.02)
|
#' Conveniently message dataframe
#'
#' Conveniently message dataframe using sprintf syntax.
#' Use place holder '%s' for data.frame.
#'
#' @param format_string sprintf style format string
#' @param x data.frame
#' @return NULL
#' @examples
#' x <- data.frame(feature_id = c('F001', 'F002'), symbol = c('FEAT1', 'FEAT2'))
#' cmessage_df('\t%s', x)
#'
#' x <- c(rep('PASS', 25), rep('FAIL', 25))
#' cmessage_df(format_string = '%s', table(x))
#' @importFrom magrittr %>%
#' @export
cmessage_df <- function(format_string, x){
format_string %>%
sprintf(capture.output(print(x))) %>%
paste0(collapse = '\n') %>%
enc2utf8() %>%
message()
}
#' Conveniently message
#'
#' Print message to screen with sprintf syntax
#'
#' @param format_string sprintf format string
#' @param ... additional arguments passed to sprintf
#' @examples
#' cmessage('\t%s\t%s', 'Hi', 'there')
#' @importFrom magrittr %>%
#' @export
cmessage <- function(format_string, ...){
format_string %>%
sprintf(...) %>%
message()
}
|
/autonomics.support/R/message.R
|
no_license
|
bhagwataditya/autonomics0
|
R
| false
| false
| 1,042
|
r
|
#' Conveniently message dataframe
#'
#' Conveniently message dataframe using sprintf syntax.
#' Use place holder '%s' for data.frame.
#'
#' @param format_string sprintf style format string
#' @param x data.frame
#' @return NULL
#' @examples
#' x <- data.frame(feature_id = c('F001', 'F002'), symbol = c('FEAT1', 'FEAT2'))
#' cmessage_df('\t%s', x)
#'
#' x <- c(rep('PASS', 25), rep('FAIL', 25))
#' cmessage_df(format_string = '%s', table(x))
#' @importFrom magrittr %>%
#' @export
cmessage_df <- function(format_string, x){
format_string %>%
sprintf(capture.output(print(x))) %>%
paste0(collapse = '\n') %>%
enc2utf8() %>%
message()
}
#' Conveniently message
#'
#' Print message to screen with sprintf syntax
#'
#' @param format_string sprintf format string
#' @param ... additional arguments passed to sprintf
#' @examples
#' cmessage('\t%s\t%s', 'Hi', 'there')
#' @importFrom magrittr %>%
#' @export
cmessage <- function(format_string, ...){
format_string %>%
sprintf(...) %>%
message()
}
|
##Reading data
test_activity <- read.table("./UCI HAR Dataset/test/Y_test.txt",header = FALSE)
train_activity <- read.table("./UCI HAR Dataset/train/Y_train.txt",header = FALSE)
test_subject <- read.table("./UCI HAR Dataset/test/subject_test.txt",header = FALSE)
train_subject <- read.table("./UCI HAR Dataset/train/subject_train.txt",header = FALSE)
test_features <- read.table("./UCI HAR Dataset/test/X_test.txt",header = FALSE)
train_features <- read.table("./UCI HAR Dataset/train/X_train.txt",header = FALSE)
##Combining test and train tables
data_activity <- rbind(test_activity,train_activity)
data_subject <- rbind(test_subject,train_subject)
data_features <- rbind(test_features,train_features)
##Naming variables
names(data_activity) <-c("activity")
names(data_subject) <- c("subject")
names_features <- read.table("./UCI HAR Dataset/features.txt",header = FALSE)
names(data_features) <- names_features$V2
##Combining all tables together
alldata <- cbind(data_features,data_subject,data_activity)
##Selecting only mean and std data
selected_features <- names(data_features)[grep("mean\\b|std\\b", names(data_features))]
selected_names <- c(selected_features,names(data_subject),names(data_activity))
data <- subset(alldata,select=selected_names)
##Using descriptive activity names
activity_labels <- read.table("./UCI HAR Dataset/activity_labels.txt",header = FALSE)
data$activity <- factor(data$activity)
levels(data$activity) <- activity_labels$V2
##Labeling the variables with descriptive names
names(data)<-gsub("^t", "time", names(data))
names(data)<-gsub("^f", "frequency", names(data))
names(data)<-gsub("Acc", "accelerometer", names(data))
names(data)<-gsub("Gyro", "gyroscope", names(data))
names(data)<-gsub("Mag", "magnitude", names(data))
names(data)<-gsub("BodyBody", "body", names(data))
names(data)<-gsub("\\-|\\(|\\)", "", names(data))
names(data)<-tolower(names(data))
##Creating tidy data set
library(dplyr)
tidy<-data%>% group_by(subject,activity)%>%summarise_each(funs(mean))
tidy<-arrange(tidy,subject,activity)
tidy<-ungroup(tidy)
write.table(tidy, file = "tidydata.txt",row.name=FALSE)
|
/run_analysis.R
|
no_license
|
nazymkm/tidydata
|
R
| false
| false
| 2,174
|
r
|
##Reading data
test_activity <- read.table("./UCI HAR Dataset/test/Y_test.txt",header = FALSE)
train_activity <- read.table("./UCI HAR Dataset/train/Y_train.txt",header = FALSE)
test_subject <- read.table("./UCI HAR Dataset/test/subject_test.txt",header = FALSE)
train_subject <- read.table("./UCI HAR Dataset/train/subject_train.txt",header = FALSE)
test_features <- read.table("./UCI HAR Dataset/test/X_test.txt",header = FALSE)
train_features <- read.table("./UCI HAR Dataset/train/X_train.txt",header = FALSE)
##Combining test and train tables
data_activity <- rbind(test_activity,train_activity)
data_subject <- rbind(test_subject,train_subject)
data_features <- rbind(test_features,train_features)
##Naming variables
names(data_activity) <-c("activity")
names(data_subject) <- c("subject")
names_features <- read.table("./UCI HAR Dataset/features.txt",header = FALSE)
names(data_features) <- names_features$V2
##Combining all tables together
alldata <- cbind(data_features,data_subject,data_activity)
##Selecting only mean and std data
selected_features <- names(data_features)[grep("mean\\b|std\\b", names(data_features))]
selected_names <- c(selected_features,names(data_subject),names(data_activity))
data <- subset(alldata,select=selected_names)
##Using descriptive activity names
activity_labels <- read.table("./UCI HAR Dataset/activity_labels.txt",header = FALSE)
data$activity <- factor(data$activity)
levels(data$activity) <- activity_labels$V2
##Labeling the variables with descriptive names
names(data)<-gsub("^t", "time", names(data))
names(data)<-gsub("^f", "frequency", names(data))
names(data)<-gsub("Acc", "accelerometer", names(data))
names(data)<-gsub("Gyro", "gyroscope", names(data))
names(data)<-gsub("Mag", "magnitude", names(data))
names(data)<-gsub("BodyBody", "body", names(data))
names(data)<-gsub("\\-|\\(|\\)", "", names(data))
names(data)<-tolower(names(data))
##Creating tidy data set
library(dplyr)
tidy<-data%>% group_by(subject,activity)%>%summarise_each(funs(mean))
tidy<-arrange(tidy,subject,activity)
tidy<-ungroup(tidy)
write.table(tidy, file = "tidydata.txt",row.name=FALSE)
|
# Decision Tree Classification
Titanic_data <- read.csv("/Users/eavy/Downloads/7390/Assignment/Assignment3/Titanic train.csv")
install.packages("rpart")
install.packages("rpart.plot")
library(rpart)
library(rpart.plot)
dTree<-rpart(Survived ~Pclass+Sex+Age+SibSp+Parch+Fare+Embarked, data=Titanic_data, method = "class")
summary(dTree)
rpart.plot(dTree)
# Testing
test_data <- read.csv("/Users/eavy/Downloads/7390/Assignment/Assignment3/Titanic test.csv")
View(test_data)
predict(dTree, test_data,type="class")
# Decision Tree Regression
Energy_data <- read.csv("/Users/eavy/Downloads/7390/Assignment/Assignment3/Energy Efficiency.csv")
Y1_model <- rpart(Y1 ~ X1 + X2 + X3 + X4 + X5 + X6 + X7 + X8, data = Energy_data)
rpart.plot(Y1_model)
Y2_model <- rpart(Y2 ~ X1 + X2 + X3 + X4 + X5 + X6 + X7 + X8, data = Energy_data)
rpart.plot(Y2_model)
|
/DecisionTree.R
|
no_license
|
yuanyuanzho/R_DecisionTree
|
R
| false
| false
| 853
|
r
|
# Decision Tree Classification
Titanic_data <- read.csv("/Users/eavy/Downloads/7390/Assignment/Assignment3/Titanic train.csv")
install.packages("rpart")
install.packages("rpart.plot")
library(rpart)
library(rpart.plot)
dTree<-rpart(Survived ~Pclass+Sex+Age+SibSp+Parch+Fare+Embarked, data=Titanic_data, method = "class")
summary(dTree)
rpart.plot(dTree)
# Testing
test_data <- read.csv("/Users/eavy/Downloads/7390/Assignment/Assignment3/Titanic test.csv")
View(test_data)
predict(dTree, test_data,type="class")
# Decision Tree Regression
Energy_data <- read.csv("/Users/eavy/Downloads/7390/Assignment/Assignment3/Energy Efficiency.csv")
Y1_model <- rpart(Y1 ~ X1 + X2 + X3 + X4 + X5 + X6 + X7 + X8, data = Energy_data)
rpart.plot(Y1_model)
Y2_model <- rpart(Y2 ~ X1 + X2 + X3 + X4 + X5 + X6 + X7 + X8, data = Energy_data)
rpart.plot(Y2_model)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/alg_classes.R
\docType{class}
\name{alg-class}
\alias{alg-class}
\title{Abstract optimization algorithm class}
\description{
An S4 class to represent an abstract optimization algorithm.
}
\section{Slots}{
\describe{
\item{\code{name}}{algorithm name}
}}
|
/man/alg-class.Rd
|
no_license
|
minghao2016/greed
|
R
| false
| true
| 334
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/alg_classes.R
\docType{class}
\name{alg-class}
\alias{alg-class}
\title{Abstract optimization algorithm class}
\description{
An S4 class to represent an abstract optimization algorithm.
}
\section{Slots}{
\describe{
\item{\code{name}}{algorithm name}
}}
|
# internal function of the ms.commander
sample.pars<-function(x){
k<-sample(nrow(x),nrow(x))
for(i in k){
if(c(as.numeric(x[i,4])+as.numeric(x[i,5]))==0){
next
} else {
samp<-do.call(x[i,6],args=list(1,as.numeric(x[i,4]),as.numeric(x[i,5])),quote=F)
while(samp<=0){
samp<-do.call(x[i,6],args=list(1,as.numeric(x[i,4]),as.numeric(x[i,5])),quote=F)
}
}
x[i,4:5]<-samp
}
return(x)
}
# internal function of the ms.commander
sample.w.cond<-function(par.matrix,cond.matrix){
nam<-rownames(cond.matrix)
x<-sample.pars(par.matrix)
y<-which(cond.matrix=="<", arr.ind=T)
if(nrow(y)!=0){
maior<-list(NULL)
for(i in 1:nrow(y)){
mm<-NULL
for(j in 1:2){
m<-which(par.matrix==nam[y[i,j]])
mm<-c(mm,m)
}
maior[[i]]<-mm
}
while(eval.condition(x,y=maior)>0){
for(j in 1:length(maior)){
x[c(maior[[j]][1],maior[[j]][2]),]<-sample.pars(par.matrix[c(maior[[j]][1],maior[[j]][2]),])
}
}
}
z<-which(cond.matrix=="=", arr.ind=T)
z<-z[order(z[,1]),]
if(nrow(z)!=0){
for(i in 1:nrow(z)){
equal<-NULL
for(j in 1:2){
eq<-which(par.matrix==nam[z[i,j]])
equal<-c(equal,eq)
}
x[equal[1],4:5]<-x[equal[2],4:5]
}
}
return(x)
}
# internal function of the Model Builder
eval.condition<-function(x,y){
value<-NULL
for(i in 1:length(y)){
value[i]<-as.numeric(x[y[[i]][1],4])>as.numeric(x[y[[i]][2],4])
}
return(sum(value))
}
# internal function to generate the locus file
get.locfile<-function(model){
locfile<-NULL
for(i in 1:nrow(model$loci)){
for(j in 1:model$I[1,3]){
locfile<-rbind(locfile,c(model$I[i,1],model$I[i,j+3],j,model$loci[i,2],model$loci[i,4],0))
}
colnames(locfile)<-c("id","n","pop","length","mu","rec")
}
return(locfile)
}
# internal function to generate the locus file
sample.mu.rates<-function(model){
MEAN <- runif(1, as.numeric(model$loci[1,4]), as.numeric(model$loci[1,5]))
SD <- runif(1, as.numeric(model$loci[1,4]), as.numeric(model$loci[1,5]))
rates<-rtnorm(nrow(model$loci), MEAN, SD, 1e-12)
rates<-rep(rates, each=as.numeric(model$I[1,3]))
return(list(rates,c(MEAN,SD)))
}
|
/R/parameter_samplers.R
|
no_license
|
gehara/PipeMaster
|
R
| false
| false
| 2,217
|
r
|
# internal function of the ms.commander
sample.pars<-function(x){
k<-sample(nrow(x),nrow(x))
for(i in k){
if(c(as.numeric(x[i,4])+as.numeric(x[i,5]))==0){
next
} else {
samp<-do.call(x[i,6],args=list(1,as.numeric(x[i,4]),as.numeric(x[i,5])),quote=F)
while(samp<=0){
samp<-do.call(x[i,6],args=list(1,as.numeric(x[i,4]),as.numeric(x[i,5])),quote=F)
}
}
x[i,4:5]<-samp
}
return(x)
}
# internal function of the ms.commander
sample.w.cond<-function(par.matrix,cond.matrix){
nam<-rownames(cond.matrix)
x<-sample.pars(par.matrix)
y<-which(cond.matrix=="<", arr.ind=T)
if(nrow(y)!=0){
maior<-list(NULL)
for(i in 1:nrow(y)){
mm<-NULL
for(j in 1:2){
m<-which(par.matrix==nam[y[i,j]])
mm<-c(mm,m)
}
maior[[i]]<-mm
}
while(eval.condition(x,y=maior)>0){
for(j in 1:length(maior)){
x[c(maior[[j]][1],maior[[j]][2]),]<-sample.pars(par.matrix[c(maior[[j]][1],maior[[j]][2]),])
}
}
}
z<-which(cond.matrix=="=", arr.ind=T)
z<-z[order(z[,1]),]
if(nrow(z)!=0){
for(i in 1:nrow(z)){
equal<-NULL
for(j in 1:2){
eq<-which(par.matrix==nam[z[i,j]])
equal<-c(equal,eq)
}
x[equal[1],4:5]<-x[equal[2],4:5]
}
}
return(x)
}
# internal function of the Model Builder
eval.condition<-function(x,y){
value<-NULL
for(i in 1:length(y)){
value[i]<-as.numeric(x[y[[i]][1],4])>as.numeric(x[y[[i]][2],4])
}
return(sum(value))
}
# internal function to generate the locus file
get.locfile<-function(model){
locfile<-NULL
for(i in 1:nrow(model$loci)){
for(j in 1:model$I[1,3]){
locfile<-rbind(locfile,c(model$I[i,1],model$I[i,j+3],j,model$loci[i,2],model$loci[i,4],0))
}
colnames(locfile)<-c("id","n","pop","length","mu","rec")
}
return(locfile)
}
# internal function to generate the locus file
sample.mu.rates<-function(model){
MEAN <- runif(1, as.numeric(model$loci[1,4]), as.numeric(model$loci[1,5]))
SD <- runif(1, as.numeric(model$loci[1,4]), as.numeric(model$loci[1,5]))
rates<-rtnorm(nrow(model$loci), MEAN, SD, 1e-12)
rates<-rep(rates, each=as.numeric(model$I[1,3]))
return(list(rates,c(MEAN,SD)))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lagspec.R
\name{nbetaMT}
\alias{nbetaMT}
\title{Normalized beta probability density function MIDAS weights specification (MATLAB toolbox compatible)
Calculate MIDAS weights according to normalized beta probability density function specification. Compatible with the specification in MATLAB toolbox.}
\usage{
nbetaMT(p, d, m)
}
\arguments{
\item{p}{parameters for normalized beta probability density function}
\item{d}{number of coefficients}
\item{m}{the frequency ratio, currently ignored}
}
\value{
vector of coefficients
}
\description{
Normalized beta probability density function MIDAS weights specification (MATLAB toolbox compatible)
Calculate MIDAS weights according to normalized beta probability density function specification. Compatible with the specification in MATLAB toolbox.
}
\author{
Virmantas Kvedaras, Vaidotas Zemlys
}
|
/man/nbetaMT.Rd
|
no_license
|
englianhu/midasr
|
R
| false
| true
| 921
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lagspec.R
\name{nbetaMT}
\alias{nbetaMT}
\title{Normalized beta probability density function MIDAS weights specification (MATLAB toolbox compatible)
Calculate MIDAS weights according to normalized beta probability density function specification. Compatible with the specification in MATLAB toolbox.}
\usage{
nbetaMT(p, d, m)
}
\arguments{
\item{p}{parameters for normalized beta probability density function}
\item{d}{number of coefficients}
\item{m}{the frequency ratio, currently ignored}
}
\value{
vector of coefficients
}
\description{
Normalized beta probability density function MIDAS weights specification (MATLAB toolbox compatible)
Calculate MIDAS weights according to normalized beta probability density function specification. Compatible with the specification in MATLAB toolbox.
}
\author{
Virmantas Kvedaras, Vaidotas Zemlys
}
|
# -------------------------------------------------------------------------------
# This file is part of Ranger.
#
# Ranger is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ranger is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ranger. If not, see <http://www.gnu.org/licenses/>.
#
# Written by:
#
# Marvin N. Wright
# Institut fuer Medizinische Biometrie und Statistik
# Universitaet zu Luebeck
# Ratzeburger Allee 160
# 23562 Luebeck
# Germany
#
# http://www.imbs-luebeck.de
# -------------------------------------------------------------------------------
#' Tree information in human readable format
#'
#' Extract tree information of a \code{ranger} object.
#'
#' Node and variable ID's are 0-indexed, i.e., node 0 is the root node.
#' If the formula interface is used in the \code{ranger} call, the variable ID's are usually different to the original data used to grow the tree.
#' Refer to the variable name instead to be sure.
#'
#' Splitting at unordered factors (nominal variables) depends on the option \code{respect.unordered.factors} in the \code{ranger} call.
#' For the "ignore" and "order" approaches, all values smaller or equal the \code{splitval} value go to the left and all values larger go to the right, as usual.
#' However, with "order" the values correspond to the order in \code{object$forest$covariate.levels} instead of the original order (usually alphabetical).
#' In the "partition" mode, the \code{splitval} values for unordered factor are comma separated lists of values, representing the factor levels (in the original order) going to the right.
#'
#' @param object \code{ranger} object.
#' @param tree Number of the tree of interest.
#' @return A data.frame with the columns
#' \tabular{ll}{
#' \code{nodeID} \tab The nodeID, 0-indexed. \cr
#' \code{leftChild} \tab ID of the left child node, 0-indexed. \cr
#' \code{rightChild} \tab ID of the right child node, 0-indexed. \cr
#' \code{splitvarID} \tab ID of the splitting variable, 0-indexed. Caution, the variable order changes if the formula interface is used. \cr
#' \code{splitvarName} \tab Name of the splitting variable. \cr
#' \code{splitval} \tab The splitting value. For numeric or ordinal variables, all values smaller or equal go to the left, larger values to the right. For unordered factor variables see above. \cr
#' \code{terminal} \tab Logical, TRUE for terminal nodes. \cr
#' \code{prediction} \tab One column with the predicted class (factor) for classification and the predicted numerical value for regression. One probability per class for probability estimation in several columns. Nothing for survival, refer to \code{object$forest$chf} for the CHF node predictions. \cr
#' }
#' @examples
#' require(ranger)
#' rf <- ranger(Species ~ ., data = iris)
#' treeInfo(rf, 1)
#' @seealso \code{\link{ranger}}
#' @author Marvin N. Wright
#' @export
treeInfo <- function(object, tree = 1) {
if (!inherits(object, "ranger")) {
stop("Error: Invalid class of input object.")
}
forest <- object$forest
if (is.null(forest)) {
stop("Error: No saved forest in ranger object. Please set write.forest to TRUE when calling ranger.")
}
if (is.null(forest$dependent.varID) || is.null(forest$num.trees) ||
is.null(forest$child.nodeIDs) || is.null(forest$split.varIDs) ||
is.null(forest$split.values) || is.null(forest$independent.variable.names) ||
is.null(forest$treetype)) {
stop("Error: Invalid forest object.")
}
if (forest$treetype == "Survival" && (is.null(forest$status.varID) ||
is.null(forest$chf) || is.null(forest$unique.death.times))) {
stop("Error: Invalid forest object.")
}
if (length(forest$child.nodeIDs) != forest$num.trees || length(forest$child.nodeIDs[[1]]) != 2) {
stop("Error: Invalid forest object. Is the forest grown in ranger version <0.3.9? Try with the same version the forest was grown.")
}
if (tree > forest$num.trees) {
stop("Error: Requesting tree ", tree, ", but forest has only ", forest$num.trees, " trees.")
}
result <- data.frame(nodeID = 0:(length(forest$split.values[[tree]]) - 1),
leftChild = forest$child.nodeIDs[[tree]][[1]],
rightChild = forest$child.nodeIDs[[tree]][[2]],
splitvarID = forest$split.varIDs[[tree]],
splitvarName = "X",
splitval = forest$split.values[[tree]],
terminal = FALSE)
result$leftChild[result$leftChild == 0] <- NA
result$rightChild[result$rightChild == 0] <- NA
result$terminal[is.na(result$leftChild)] <- TRUE
result$splitvarID[result$terminal] <- NA
result$splitvarName[result$terminal] <- NA
result$splitval[result$terminal] <- NA
## Get names of splitting variables
# should be -1 for all >= dependent.varID but +1 change for 1-index
# for survival another -1 if >= status.varID
independent.varID <- result$splitvarID
idx <- !is.na(result$splitvarID) & result$splitvarID < forest$dependent.varID
independent.varID[idx] <- result$splitvarID[idx] + 1
if (forest$treetype == "Survival") {
idx <- !is.na(result$splitvarID) & result$splitvarID >= forest$status.varID
independent.varID[idx] <- independent.varID[idx] - 1
}
result$splitvarName <- forest$independent.variable.names[independent.varID]
## Unordered splitting
idx.unordered <- !result$terminal & !forest$is.ordered[result$splitvarID + 1]
if (any(idx.unordered)) {
result$splitval[idx.unordered] <- sapply(result$splitval[idx.unordered], function(x) {
paste(which(as.logical(intToBits(x))), collapse = ",")
})
}
## Prediction
if (forest$treetype == "Classification") {
result$prediction <- forest$split.values[[tree]]
result$prediction[!result$terminal] <- NA
if (!is.null(forest$levels)) {
result$prediction <- factor(result$prediction, levels = forest$class.values, labels = forest$levels)
}
} else if (forest$treetype == "Regression") {
result$prediction <- forest$split.values[[tree]]
result$prediction[!result$terminal] <- NA
} else if (forest$treetype == "Probability estimation") {
predictions <- matrix(nrow = nrow(result), ncol = length(forest$levels))
predictions[result$terminal, ] <- do.call(rbind, forest$terminal.class.counts[[tree]])
colnames(predictions) <- paste0("pred.", forest$levels)
result <- data.frame(result, predictions)
} else if (forest$treetype == "Survival") {
# No prediction for survival (CHF too large?)
} else {
stop("Error: Unknown tree type.")
}
result
}
|
/R/treeInfo.R
|
no_license
|
APN-Pucky/ranger
|
R
| false
| false
| 7,115
|
r
|
# -------------------------------------------------------------------------------
# This file is part of Ranger.
#
# Ranger is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ranger is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ranger. If not, see <http://www.gnu.org/licenses/>.
#
# Written by:
#
# Marvin N. Wright
# Institut fuer Medizinische Biometrie und Statistik
# Universitaet zu Luebeck
# Ratzeburger Allee 160
# 23562 Luebeck
# Germany
#
# http://www.imbs-luebeck.de
# -------------------------------------------------------------------------------
#' Tree information in human readable format
#'
#' Extract tree information of a \code{ranger} object.
#'
#' Node and variable ID's are 0-indexed, i.e., node 0 is the root node.
#' If the formula interface is used in the \code{ranger} call, the variable ID's are usually different to the original data used to grow the tree.
#' Refer to the variable name instead to be sure.
#'
#' Splitting at unordered factors (nominal variables) depends on the option \code{respect.unordered.factors} in the \code{ranger} call.
#' For the "ignore" and "order" approaches, all values smaller or equal the \code{splitval} value go to the left and all values larger go to the right, as usual.
#' However, with "order" the values correspond to the order in \code{object$forest$covariate.levels} instead of the original order (usually alphabetical).
#' In the "partition" mode, the \code{splitval} values for unordered factor are comma separated lists of values, representing the factor levels (in the original order) going to the right.
#'
#' @param object \code{ranger} object.
#' @param tree Number of the tree of interest.
#' @return A data.frame with the columns
#' \tabular{ll}{
#' \code{nodeID} \tab The nodeID, 0-indexed. \cr
#' \code{leftChild} \tab ID of the left child node, 0-indexed. \cr
#' \code{rightChild} \tab ID of the right child node, 0-indexed. \cr
#' \code{splitvarID} \tab ID of the splitting variable, 0-indexed. Caution, the variable order changes if the formula interface is used. \cr
#' \code{splitvarName} \tab Name of the splitting variable. \cr
#' \code{splitval} \tab The splitting value. For numeric or ordinal variables, all values smaller or equal go to the left, larger values to the right. For unordered factor variables see above. \cr
#' \code{terminal} \tab Logical, TRUE for terminal nodes. \cr
#' \code{prediction} \tab One column with the predicted class (factor) for classification and the predicted numerical value for regression. One probability per class for probability estimation in several columns. Nothing for survival, refer to \code{object$forest$chf} for the CHF node predictions. \cr
#' }
#' @examples
#' require(ranger)
#' rf <- ranger(Species ~ ., data = iris)
#' treeInfo(rf, 1)
#' @seealso \code{\link{ranger}}
#' @author Marvin N. Wright
#' @export
treeInfo <- function(object, tree = 1) {
if (!inherits(object, "ranger")) {
stop("Error: Invalid class of input object.")
}
forest <- object$forest
if (is.null(forest)) {
stop("Error: No saved forest in ranger object. Please set write.forest to TRUE when calling ranger.")
}
if (is.null(forest$dependent.varID) || is.null(forest$num.trees) ||
is.null(forest$child.nodeIDs) || is.null(forest$split.varIDs) ||
is.null(forest$split.values) || is.null(forest$independent.variable.names) ||
is.null(forest$treetype)) {
stop("Error: Invalid forest object.")
}
if (forest$treetype == "Survival" && (is.null(forest$status.varID) ||
is.null(forest$chf) || is.null(forest$unique.death.times))) {
stop("Error: Invalid forest object.")
}
if (length(forest$child.nodeIDs) != forest$num.trees || length(forest$child.nodeIDs[[1]]) != 2) {
stop("Error: Invalid forest object. Is the forest grown in ranger version <0.3.9? Try with the same version the forest was grown.")
}
if (tree > forest$num.trees) {
stop("Error: Requesting tree ", tree, ", but forest has only ", forest$num.trees, " trees.")
}
result <- data.frame(nodeID = 0:(length(forest$split.values[[tree]]) - 1),
leftChild = forest$child.nodeIDs[[tree]][[1]],
rightChild = forest$child.nodeIDs[[tree]][[2]],
splitvarID = forest$split.varIDs[[tree]],
splitvarName = "X",
splitval = forest$split.values[[tree]],
terminal = FALSE)
result$leftChild[result$leftChild == 0] <- NA
result$rightChild[result$rightChild == 0] <- NA
result$terminal[is.na(result$leftChild)] <- TRUE
result$splitvarID[result$terminal] <- NA
result$splitvarName[result$terminal] <- NA
result$splitval[result$terminal] <- NA
## Get names of splitting variables
# should be -1 for all >= dependent.varID but +1 change for 1-index
# for survival another -1 if >= status.varID
independent.varID <- result$splitvarID
idx <- !is.na(result$splitvarID) & result$splitvarID < forest$dependent.varID
independent.varID[idx] <- result$splitvarID[idx] + 1
if (forest$treetype == "Survival") {
idx <- !is.na(result$splitvarID) & result$splitvarID >= forest$status.varID
independent.varID[idx] <- independent.varID[idx] - 1
}
result$splitvarName <- forest$independent.variable.names[independent.varID]
## Unordered splitting
idx.unordered <- !result$terminal & !forest$is.ordered[result$splitvarID + 1]
if (any(idx.unordered)) {
result$splitval[idx.unordered] <- sapply(result$splitval[idx.unordered], function(x) {
paste(which(as.logical(intToBits(x))), collapse = ",")
})
}
## Prediction
if (forest$treetype == "Classification") {
result$prediction <- forest$split.values[[tree]]
result$prediction[!result$terminal] <- NA
if (!is.null(forest$levels)) {
result$prediction <- factor(result$prediction, levels = forest$class.values, labels = forest$levels)
}
} else if (forest$treetype == "Regression") {
result$prediction <- forest$split.values[[tree]]
result$prediction[!result$terminal] <- NA
} else if (forest$treetype == "Probability estimation") {
predictions <- matrix(nrow = nrow(result), ncol = length(forest$levels))
predictions[result$terminal, ] <- do.call(rbind, forest$terminal.class.counts[[tree]])
colnames(predictions) <- paste0("pred.", forest$levels)
result <- data.frame(result, predictions)
} else if (forest$treetype == "Survival") {
# No prediction for survival (CHF too large?)
} else {
stop("Error: Unknown tree type.")
}
result
}
|
################################
#Importing data
###############################
#Option 1: make a vector
#entering one column of data directly into R (as a vector)
#use c(number, number, number)
#c means combine or concatenate
#10 pigs on diet1
diet1 <- c(60.8, 67, 65, 68.6, 61.7, 69.6, 77.1, 75.2, 71.5, 60.3)
#and other 10 pigs on diet2
diet2 <- c(62.4, 67.8, 61.3, 58.4, 70.1, 68.6, 64.7, 70, 61.6, 69.1)
str(diet1)
summary(diet1)
mean(diet1)
sd(diet1)
summary(diet2)
mean(diet2)
sd(diet2)
#################
#Option 2: import data from spreadsheet using read.csv
#easiest way is to import csv file
# *R always wants your data in long format*, meaning all the numbers in one column and the group labels in the other column
#create new project in R
#put csv file into project directory
#make R script in that directory
pig <- read.csv("pigs.csv", header = TRUE)
str(pig)
summary(pig)
#to call one column use the $ operator
mean(pig$weight)
class(pig$diet)
################
#Option 3: import data from spreadsheet using read_csv
#as opposed to read.csv, read_csv prints data to the screen more beautifully and is better to use for large datasets
#read_csv comes from a package that we will need to install
#install.packages("tidyverse")
library(tidyverse)
pig2 <- read_csv("pigs.csv")
pig2
str(pig2)
summary(pig2)
#to call one column use the $ operator
mean(pig2$weight)
class(pig2$diet)
#calculate mean weight by diet
#use special symbol %>% to mean "then"
pig2 %>%
group_by(diet) %>%
summarize(meanweight = mean(weight))
|
/02_ImportDataR.R
|
no_license
|
dnm5ca/tutorials
|
R
| false
| false
| 1,545
|
r
|
################################
#Importing data
###############################
#Option 1: make a vector
#entering one column of data directly into R (as a vector)
#use c(number, number, number)
#c means combine or concatenate
#10 pigs on diet1
diet1 <- c(60.8, 67, 65, 68.6, 61.7, 69.6, 77.1, 75.2, 71.5, 60.3)
#and other 10 pigs on diet2
diet2 <- c(62.4, 67.8, 61.3, 58.4, 70.1, 68.6, 64.7, 70, 61.6, 69.1)
str(diet1)
summary(diet1)
mean(diet1)
sd(diet1)
summary(diet2)
mean(diet2)
sd(diet2)
#################
#Option 2: import data from spreadsheet using read.csv
#easiest way is to import csv file
# *R always wants your data in long format*, meaning all the numbers in one column and the group labels in the other column
#create new project in R
#put csv file into project directory
#make R script in that directory
pig <- read.csv("pigs.csv", header = TRUE)
str(pig)
summary(pig)
#to call one column use the $ operator
mean(pig$weight)
class(pig$diet)
################
#Option 3: import data from spreadsheet using read_csv
#as opposed to read.csv, read_csv prints data to the screen more beautifully and is better to use for large datasets
#read_csv comes from a package that we will need to install
#install.packages("tidyverse")
library(tidyverse)
pig2 <- read_csv("pigs.csv")
pig2
str(pig2)
summary(pig2)
#to call one column use the $ operator
mean(pig2$weight)
class(pig2$diet)
#calculate mean weight by diet
#use special symbol %>% to mean "then"
pig2 %>%
group_by(diet) %>%
summarize(meanweight = mean(weight))
|
library(dplyr)
ss.bounds <- readRDS("ss.bounds.rds")
alpha <- 0.025
method <- 'fm'
scenario <- 20
param <- 1
anal_type <- "mice"
ss <- ss.bounds%>%
dplyr::filter(method == "fm", scenario.id == scenario)
do_val <- 0.2
x1 <- parallel::mclapply(X = 1:10000,
mc.cores = parallel::detectCores() - 1,
FUN= function(x) {
library(tidyr, warn.conflicts = F, quietly = T)
library(dplyr, warn.conflicts = F, quietly = T)
library(purrr, warn.conflicts = F, quietly = T)
library(reshape2, warn.conflicts = F, quietly = T)
library(MASS, warn.conflicts = F, quietly = T)
library(nibinom)
set.seed(10000*scenario + x)
#generate full data with desired correlation structure
dt0 <- sim_cont(p_C = ss$p_C, p_T = ss$p_C - ss$M2, n_arm = ss$n.arm,
mu1 = 4, mu2 = 100, sigma1 = 1, sigma2 = 20, r12 = -0.3, b1 = 0.1, b2 = -0.01)
ci.full <- dt0%>%fm_ci(ss$M2,'y', alpha)
#define missingness parameters and do rates
m_param <- mpars(do = do_val, atype = anal_type)
#impose missing values and perform analysis
ci.miss.mnar1 <- m_param%>%
slice(1)%>%
dplyr::mutate(results = purrr::pmap(list(b_trt=bt, b_y=by, b_x1=bx1, b_x2=bx2, b_ty = b.ty),
miss_gen_an, dt = dt0, do = do_val,
ci_method = fm_ci,
sing_anal = F,
mice_anal = T,
m2 = ss$M2, seed = 10000*scenario + x,
seed_mice = 10000*scenario + x,
method = method,
alpha = alpha,
n_mi = 2,
m_mi = 100,
mu_T = 0.7, sd_T = 0.05))%>%
dplyr::select(missing, results)
ci.miss.mnar2 <- m_param%>%
slice(2)%>%
dplyr::mutate(results = purrr::pmap(list(b_trt=bt, b_y=by, b_x1=bx1, b_x2=bx2, b_ty = b.ty),
miss_gen_an, dt = dt0, do = do_val,
ci_method = fm_ci,
sing_anal = F,
mice_anal = T,
m2 = ss$M2, seed = 10000*scenario + x,
seed_mice = 10000*scenario + x,
method = method,
alpha = alpha,
n_mi = 2,
m_mi = 100,
mu_T = 1.55, sd_T = 0.05))%>%
dplyr::select(missing, results)
ci.miss <- bind_rows(ci.miss.mnar1, ci.miss.mnar2)%>%
dplyr::mutate(scenario.id = ss$scenario.id,
p_C = ss$p_C,
M2 = ss$M2,
type = 't.H0',
do = do_val,
sim.id = x)
ci.all <- list(ci.full, ci.miss)%>%purrr::set_names(c("ci.full","ci.miss"))
return(ci.all)
})
#to summarize type-I error and mean relative bias from the simulated data
source('funs/h0.mice.sum.R')
h0.mice.sum(x1, method = 'fm')
|
/sim_pgms/fm/do20/2xcontH0_sc20_do20_mice.R
|
no_license
|
yuliasidi/nibinom_apply
|
R
| false
| false
| 3,317
|
r
|
library(dplyr)
ss.bounds <- readRDS("ss.bounds.rds")
alpha <- 0.025
method <- 'fm'
scenario <- 20
param <- 1
anal_type <- "mice"
ss <- ss.bounds%>%
dplyr::filter(method == "fm", scenario.id == scenario)
do_val <- 0.2
x1 <- parallel::mclapply(X = 1:10000,
mc.cores = parallel::detectCores() - 1,
FUN= function(x) {
library(tidyr, warn.conflicts = F, quietly = T)
library(dplyr, warn.conflicts = F, quietly = T)
library(purrr, warn.conflicts = F, quietly = T)
library(reshape2, warn.conflicts = F, quietly = T)
library(MASS, warn.conflicts = F, quietly = T)
library(nibinom)
set.seed(10000*scenario + x)
#generate full data with desired correlation structure
dt0 <- sim_cont(p_C = ss$p_C, p_T = ss$p_C - ss$M2, n_arm = ss$n.arm,
mu1 = 4, mu2 = 100, sigma1 = 1, sigma2 = 20, r12 = -0.3, b1 = 0.1, b2 = -0.01)
ci.full <- dt0%>%fm_ci(ss$M2,'y', alpha)
#define missingness parameters and do rates
m_param <- mpars(do = do_val, atype = anal_type)
#impose missing values and perform analysis
ci.miss.mnar1 <- m_param%>%
slice(1)%>%
dplyr::mutate(results = purrr::pmap(list(b_trt=bt, b_y=by, b_x1=bx1, b_x2=bx2, b_ty = b.ty),
miss_gen_an, dt = dt0, do = do_val,
ci_method = fm_ci,
sing_anal = F,
mice_anal = T,
m2 = ss$M2, seed = 10000*scenario + x,
seed_mice = 10000*scenario + x,
method = method,
alpha = alpha,
n_mi = 2,
m_mi = 100,
mu_T = 0.7, sd_T = 0.05))%>%
dplyr::select(missing, results)
ci.miss.mnar2 <- m_param%>%
slice(2)%>%
dplyr::mutate(results = purrr::pmap(list(b_trt=bt, b_y=by, b_x1=bx1, b_x2=bx2, b_ty = b.ty),
miss_gen_an, dt = dt0, do = do_val,
ci_method = fm_ci,
sing_anal = F,
mice_anal = T,
m2 = ss$M2, seed = 10000*scenario + x,
seed_mice = 10000*scenario + x,
method = method,
alpha = alpha,
n_mi = 2,
m_mi = 100,
mu_T = 1.55, sd_T = 0.05))%>%
dplyr::select(missing, results)
ci.miss <- bind_rows(ci.miss.mnar1, ci.miss.mnar2)%>%
dplyr::mutate(scenario.id = ss$scenario.id,
p_C = ss$p_C,
M2 = ss$M2,
type = 't.H0',
do = do_val,
sim.id = x)
ci.all <- list(ci.full, ci.miss)%>%purrr::set_names(c("ci.full","ci.miss"))
return(ci.all)
})
#to summarize type-I error and mean relative bias from the simulated data
source('funs/h0.mice.sum.R')
h0.mice.sum(x1, method = 'fm')
|
"
@Project: Early Warning System
@author: ATPL 1049
@date: May 06 2021
"
# load libaries
library(dplyr)
library(dbplyr)
library(DBI)
library(RMySQL)
library(data.table)
library(reshape2)
library(ggplot2)
library(lubridate)
library(survival)
library(reshape2)
library(Information)
# disable scientific notation
options(scipen = 999)
# source if-else logic
source("/Users/arjun/Documents/ATPL1049/Workspace/R/ifElse.R")
# custom functions
`%notin%` <- Negate(`%in%`)
# get customer data
# connect to adhoc schema on pentaho db
conn_adhoc <- DBI::dbConnect(RMySQL::MySQL(), user='pentaho_ro', password='73f1c86914j2P1X',
dbname='adhoc', host='pentahodb.azuga.com')
# using connection connect to table salesforceAccountSync
salesforceAccountSync <- tbl(conn_adhoc, "salesforceAccountSync")
# fetch list of customers along with some metadata
dfAccounts <- salesforceAccountSync %>%
select(`Account Name`,`Account Type`, `Billing Country`, `SIC Code`, `Industry`, `Account Status`,
`Azuga Customer ID`, `Customer Since`, `Revenue Since`, `Account Managed`, `Inactive Date`, `Inactive`,
`Industry_1`, `NAICS Code`, `NAICS Industry Name`, `No. of Revenue(closing) units`, `Number of Vehicles`,
`SIC Industry Name`, `Account Category`, `Total Active Units`, `Rate Plan type`, `Ave. subscription rate`, `ARPU`) %>%
collect()
# remove reference to table, disconnect db and remove connection details
rm(salesforceAccountSync)
dbDisconnect(conn_adhoc)
rm(conn_adhoc)
############################################### ANALYSIS OF CHURN RATE OVER TIME ################################################
# subset revenue customers for our analysis
dfRevenuCustomersCopy <- dfAccounts %>%
filter(`Account Type` == "Revenue")
# data type conversions
dfRevenuCustomersCopy$`Account Type` <- as.factor(dfRevenuCustomersCopy$`Account Type`)
dfRevenuCustomersCopy$`Account Status` <- as.factor(dfRevenuCustomersCopy$`Account Status`)
dfRevenuCustomersCopy$`Account Managed` <- as.factor(dfRevenuCustomersCopy$`Account Managed`)
dfRevenuCustomersCopy$Inactive <- as.factor(dfRevenuCustomersCopy$Inactive)
dfRevenuCustomersCopy$`Account Category` <- as.factor(dfRevenuCustomersCopy$`Account Category`)
dfRevenuCustomersCopy$`Customer Since` <- lubridate::as_date(dfRevenuCustomersCopy$`Customer Since`)
dfRevenuCustomersCopy$`Inactive Date` <- lubridate::as_date(dfRevenuCustomersCopy$`Inactive Date`)
dfRevenuCustomersCopy$`Revenue Since` <- lubridate::as_date(dfRevenuCustomersCopy$`Revenue Since`)
dfRevenuCustomersCopy$`Azuga Customer ID` <- as.integer(dfRevenuCustomersCopy$`Azuga Customer ID`)
# data treatment
# remove customers with no azuga id
dfRevenuCustomersCopy <- dfRevenuCustomersCopy[!is.na(dfRevenuCustomersCopy$`Azuga Customer ID`),]
table(dfRevenuCustomersCopy$`Billing Country`)
# as we can see we have customers from Canada, India, Mexico and United States.
# retain only USA and Canada customer
dfRevenuCustomersCopy <- dfRevenuCustomersCopy[dfRevenuCustomersCopy$`Billing Country` %in% c("United States", "Canada"),]
# remove customers with customer since field as null
dfRevenuCustomersCopy <- dfRevenuCustomersCopy[!is.na(dfRevenuCustomersCopy$`Customer Since`),]
table(dfRevenuCustomersCopy$`Account Status`)
# There is one account with the status as Closed. We will get rid of this account to ensure that the analysis is not diluted.
# remove customer wiht Account Status as Closed
dfRevenuCustomersCopy <- dfRevenuCustomersCopy[dfRevenuCustomersCopy$`Account Status`!="Closed",]
# if Account.Status is Churned/Red, update inactive flag to 1
dfRevenuCustomersCopy$Inactive <- ifelse((dfRevenuCustomersCopy$`Account Status` == "Churned / Red"), 1, 0)
# if Account is not churned, remove Inactive Date
dfRevenuCustomersCopy$`Inactive Date`[dfRevenuCustomersCopy$Inactive == 0] <- NA
# remove customers that churned before Jan, 2019
dfRevenuCustomersCopy <- dfRevenuCustomersCopy[dfRevenuCustomersCopy$`Account Status` == "Active / Green" |
dfRevenuCustomersCopy$`Inactive Date` >= '2016-01-01',]
table(dfRevenuCustomersCopy$`Account Status`)
# calculate customer counts over time - active, acquired and lost
yearFrame <- as.data.frame(seq(ymd('2016-01-01'),ymd('2021-04-01'),by='months'))
colnames(yearFrame)[1] <- "YearQrtr"
dataStats <- data.frame()
for (m in 1:nrow(yearFrame)){
yearMonth <- yearFrame$YearQrtr[m]
nextYearMonth <- yearFrame$YearQrtr[m+1]
activeCustomerCount <- length(dfRevenuCustomersCopy$`Azuga Customer ID`[dfRevenuCustomersCopy$`Customer Since` <= yearMonth & (is.na(dfRevenuCustomersCopy$`Inactive Date`) | dfRevenuCustomersCopy$`Inactive Date` >= yearMonth)])
newCustomerCount <- length(dfRevenuCustomersCopy$`Azuga Customer ID`[dfRevenuCustomersCopy$`Customer Since` >= yearMonth & dfRevenuCustomersCopy$`Customer Since` < nextYearMonth])
churnedCustomerCount <- length(dfRevenuCustomersCopy$`Azuga Customer ID`[dfRevenuCustomersCopy$`Inactive Date` >= yearMonth & dfRevenuCustomersCopy$`Inactive Date` < nextYearMonth & !is.na(dfRevenuCustomersCopy$`Inactive Date`)])
dataStats <- rbind(dataStats, cbind(as.character(yearMonth), as.character(nextYearMonth), as.integer(activeCustomerCount), as.integer(newCustomerCount), as.integer(churnedCustomerCount)))
}
rm(yearFrame)
rm(dataStats)
############################################### ANALYSIS OF CHURN RATE OVER TIME ################################################
############################################### CHURN ANALYSIS - SPECIFIC PERIOD ################################################
# subset revenue customers for our analysis
dfRevenuCustomers <- dfAccounts %>%
filter(`Account Type` == "Revenue")
# data type conversions
dfRevenuCustomers$`Account Type` <- as.factor(dfRevenuCustomers$`Account Type`)
dfRevenuCustomers$`Account Status` <- as.factor(dfRevenuCustomers$`Account Status`)
dfRevenuCustomers$`Account Managed` <- as.factor(dfRevenuCustomers$`Account Managed`)
dfRevenuCustomers$Inactive <- as.factor(dfRevenuCustomers$Inactive)
dfRevenuCustomers$`Account Category` <- as.factor(dfRevenuCustomers$`Account Category`)
dfRevenuCustomers$`Customer Since` <- lubridate::as_date(dfRevenuCustomers$`Customer Since`)
dfRevenuCustomers$`Inactive Date` <- lubridate::as_date(dfRevenuCustomers$`Inactive Date`)
dfRevenuCustomers$`Revenue Since` <- lubridate::as_date(dfRevenuCustomers$`Revenue Since`)
dfRevenuCustomers$`Azuga Customer ID` <- as.integer(dfRevenuCustomers$`Azuga Customer ID`)
# data treatment
# remove customers with no azuga id
dfRevenuCustomers <- dfRevenuCustomers[!is.na(dfRevenuCustomers$`Azuga Customer ID`),]
table(dfRevenuCustomers$`Billing Country`)
# as we can see we have customers from Canada, India, Mexico and United States.
# retain only USA and Canada customer
dfRevenuCustomers <- dfRevenuCustomers[dfRevenuCustomers$`Billing Country` %in% c("United States", "Canada"),]
# remove customers with customer since field as null
dfRevenuCustomers <- dfRevenuCustomers[!is.na(dfRevenuCustomers$`Customer Since`),]
table(dfRevenuCustomers$`Account Status`)
# There is one account with the status as Closed. We will get rid of this account to ensure that the analysis is not diluted.
# remove customer wiht Account Status as Closed
dfRevenuCustomers <- dfRevenuCustomers[dfRevenuCustomers$`Account Status`!="Closed",]
# if Account.Status is Churned/Red, update inactive flag to 1
dfRevenuCustomers$Inactive <- ifelse((dfRevenuCustomers$`Account Status` == "Churned / Red"), 1, 0)
# remove customers who joined after May 1, 2021
dfRevenuCustomers <- dfRevenuCustomers[dfRevenuCustomers$`Customer Since` < '2021-05-01',]
# remove customers that churned before Jan, 2019
dfRevenuCustomers <- dfRevenuCustomers[dfRevenuCustomers$`Account Status` == "Active / Green" |
dfRevenuCustomers$`Inactive Date` >= '2019-01-01',]
# calculate age of the customer based on joining date
# difference between churn date and joining date
dfRevenuCustomers$customerAge <- difftime(dfRevenuCustomers$`Inactive Date`, dfRevenuCustomers$`Customer Since`, units = 'days')
# if customer age is NA (because of the absence of inactive date), calculate age as the difference between joining date and May 01 (end of study period)
dfRevenuCustomers$customerAge <- ifelse(is.na(dfRevenuCustomers$customerAge),
difftime(as.Date("2021-05-01"), dfRevenuCustomers$`Customer Since`, units = "days"),
dfRevenuCustomers$customerAge)
# if customer age is negative (because this is a customer who has rejoined Azuga and our systems do not refresh the old churn date), compute age as the difference between joining date and May 01 (end of study period)
dfRevenuCustomers$customerAge <- ifelse(dfRevenuCustomers$customerAge<0,
difftime(as.Date("2021-05-01"), dfRevenuCustomers$`Customer Since`, units = "days"),
dfRevenuCustomers$customerAge)
# retain customers that have spent atleast 30 days with Azuga
# dfRevenuCustomers <- dfRevenuCustomers[dfRevenuCustomers$customerAge>=30,]
# if Account is not churned, remove Inactive Date
dfRevenuCustomers$`Inactive Date`[dfRevenuCustomers$Inactive == 0] <- NA
# data bucketing
# categorize customers based on age
dfRevenuCustomers$customerAgeGroup <- ie(
i(dfRevenuCustomers$customerAge<=30, 'Less Than a Month'),
i(dfRevenuCustomers$customerAge>30 & dfRevenuCustomers$customerAge <= 90, '1-3 Months'),
i(dfRevenuCustomers$customerAge>90 & dfRevenuCustomers$customerAge <= 180, '3-6 Months'),
i(dfRevenuCustomers$customerAge>180 & dfRevenuCustomers$customerAge <= 365, '6-12 Months'),
i(dfRevenuCustomers$customerAge>365 & dfRevenuCustomers$customerAge <= 730, '1-2 Years'),
i(dfRevenuCustomers$customerAge>730, 'More than 2 Years'),
e('Unknown')
)
# categorize customers based on units billed
dfRevenuCustomers$customerSize <- ie(
i(dfRevenuCustomers$`Total Active Units`<=10, 'Less than 11 Units'),
i(dfRevenuCustomers$`Total Active Units`>10 & dfRevenuCustomers$`Total Active Units` <= 25, '11-25 Units'),
i(dfRevenuCustomers$`Total Active Units`>25 & dfRevenuCustomers$`Total Active Units` <= 50, '26-50 Units'),
i(dfRevenuCustomers$`Total Active Units`>50 & dfRevenuCustomers$`Total Active Units` <= 109, '51-109 Units'),
i(dfRevenuCustomers$`Total Active Units`>109 & dfRevenuCustomers$`Total Active Units` <= 250, '110-250 Units'),
i(dfRevenuCustomers$`Total Active Units`>250 & dfRevenuCustomers$`Total Active Units` <= 500, '251-500 Units'),
e('More than 500 Units')
)
# exploratory data analysis
# check split of active and churned customers
table(dfRevenuCustomers$Inactive)
table(dfRevenuCustomers$`Account Status`)
# calculate customer counts over time - active, acquired and lost
yearFrame <- as.data.frame(seq(ymd('2019-01-01'),ymd('2021-05-01'),by='months'))
colnames(yearFrame)[1] <- "YearMonth"
dataStats <- data.frame()
for (m in 1:nrow(yearFrame)){
yearMonth <- yearFrame$YearMonth[m]
nextYearMonth <- yearFrame$YearMonth[m+1]
activeCustomerCount <- length(dfRevenuCustomers$`Azuga Customer ID`[dfRevenuCustomers$`Customer Since` <= yearMonth & (is.na(dfRevenuCustomers$`Inactive Date`) | dfRevenuCustomers$`Inactive Date` >= yearMonth)])
newCustomerCount <- length(dfRevenuCustomers$`Azuga Customer ID`[dfRevenuCustomers$`Customer Since` >= yearMonth & dfRevenuCustomers$`Customer Since` < nextYearMonth])
churnedCustomerCount <- length(dfRevenuCustomers$`Azuga Customer ID`[dfRevenuCustomers$`Inactive Date` >= yearMonth & dfRevenuCustomers$`Inactive Date` < nextYearMonth & !is.na(dfRevenuCustomers$`Inactive Date`)])
dataStats <- rbind(dataStats, cbind(as.character(yearMonth), as.character(nextYearMonth), as.integer(activeCustomerCount), as.integer(newCustomerCount), as.integer(churnedCustomerCount)))
}
rm(yearFrame)
rm(dataStats)
# Survival Analysis
# generate timeline for the survival plot
t <- seq(from=0, to=900, by=15)
# change the below dates for the different quarters
df <- dfRevenuCustomers[(dfRevenuCustomers$`Customer Since` >= '2020-10-01' &
dfRevenuCustomers$`Customer Since` < '2021-01-01'),]
fit <- survfit(Surv(customerAge, Inactive) ~ 1, data = df)
summary(fit, times=t)
# survival
# survivalObject <- Surv(time = df$customerAge, event = df$Inactive)
# plot(survivalObject)
rm(df, fit)
# impact of customer age on churn
table(dfRevenuCustomers$customerAgeGroup, dfRevenuCustomers$Inactive)
# impact of customer size on churn
table(dfRevenuCustomers$customerSize, dfRevenuCustomers$Inactive)
# impact of account type on churn
table(dfRevenuCustomers$`Account Managed`, dfRevenuCustomers$Inactive)
dfRevenuCustomers$`Account Managed` <- as.character(dfRevenuCustomers$`Account Managed`)
dfRevenuCustomers$`Account Managed`[is.na(dfRevenuCustomers$`Account Managed`)] <- "Blank"
table(dfRevenuCustomers$`Account Managed`, dfRevenuCustomers$Inactive)
# impact of plan type on churn
table(dfRevenuCustomers$`Rate Plan type`)
# one customer can have more than one plan types.. So, lets first split it into rows
# Split rows based on plan type
tempData <- strsplit(dfRevenuCustomers$`Rate Plan type`, split = ":")
tempData <- data.frame(V1 = rep(dfRevenuCustomers$`Azuga Customer ID`, sapply(tempData, length)), V2 = unlist(tempData))
tempData$V1 <- trimws(tempData$V1)
tempData$V2 <- trimws(tempData$V2)
tempData2 <- strsplit(tempData$V2, split = ";")
tempData2 <- data.frame(V1 = rep(tempData$V1, sapply(tempData2, length)), V2 = unlist(tempData2))
tempData2$V1 <- trimws(tempData2$V1)
tempData2$V2 <- trimws(tempData2$V2)
rm(tempData)
table(tempData2$V2)
length(unique(tempData2$V2))
# bucketing of plans
tempData2$planType <- ie(
i(tempData2$V2 %like% 'Package 1', 'Package 1'),
i(tempData2$V2 %like% 'Package 2', 'Package 2'),
i(tempData2$V2 %like% 'Package 3', 'Package 3'),
i(tempData2$V2 %like% 'Package 4', 'Package 4'),
i(tempData2$V2 %like% 'Azuga G2 Bundled', 'Azuga G2 Bundled'),
i(tempData2$V2 %like% 'Azuga G2 Flex', 'Azuga G2 Flex'),
i(tempData2$V2 %like% 'Phlytrac', 'PHLY'),
i(tempData2$V2 %like% 'PhlyTrack', 'PHLY'),
i(tempData2$V2 %like% 'PHLY', 'PHLY'),
i(tempData2$V2 %like% 'SafetyCam', 'SafetyCam'),
i(tempData2$V2 %like% 'Dual', 'SafetyCam'),
i(tempData2$V2 %like% 'Azuga Asset Tracker', 'Azuga Asset Tracker'),
i(tempData2$V2 %like% 'Azuga BasicFleet Bundle', 'Azuga BasicFleet Bundle'),
i(tempData2$V2 %like% 'BYOT', 'eLogs - BYOT'),
i(tempData2$V2 %like% 'e-Logs', 'eLogs - NON-BYOT'),
i(tempData2$V2 %like% 'Azuga Lite', 'Azuga Lite'),
e('Others')
)
tempData2$val <- 1
tempData <-
dcast(tempData2, V1 ~ planType, value.var = 'val')
tempData$RatePlan <- ie(
i(tempData$PHLY > 0 , 'Phly'),
i(tempData$SafetyCam > 0 , 'SafetyCam'),
i(tempData$`eLogs - BYOT` > 0 | tempData$`eLogs - NON-BYOT` > 0 , 'eLogs'),
i(tempData$`Azuga Asset Tracker` > 0 , 'Azuga Asset Tracker'),
i(tempData$`Azuga BasicFleet Bundle` > 0 | tempData$`Azuga G2 Bundled` >0 | tempData$`Azuga G2 Flex` >0 , 'Azuga Bundle'),
i(tempData$`Azuga Lite` > 0 , 'Azuga Lite'),
e('Others')
)
tempData$V1 <- as.integer(tempData$V1)
tempData <-
left_join(tempData, dfRevenuCustomers[,c(7,12)], by = c('V1' = 'Azuga Customer ID'))
table(tempData$RatePlan, tempData$Inactive)
table(tempData2$planType)
tempData2$V1 <- as.integer(tempData2$V1)
tempData2 <- tempData2[!is.na(tempData2$V1),]
tempData2 <-
left_join(tempData2, dfRevenuCustomers[,c(7,12)], by = c('V1' = 'Azuga Customer ID'))
table(tempData2$planType, tempData2$Inactive)
# impact of industry on churn
dfRevenuCustomers$Industry2Digit <- substr(dfRevenuCustomers$`SIC Code`,1,2)
table(dfRevenuCustomers$Industry2Digit, dfRevenuCustomers$Inactive)
# WOE
dfRevenuCustomers$Industry2Digit <- as.factor(dfRevenuCustomers$Industry2Digit)
# revenueCustomers$Inactive <- as.integer(revenueCustomers$Inactive)
IV <- create_infotables(data=dfRevenuCustomers[,c(12,27)], y="Inactive", bins=10, parallel=FALSE)
IV_Value = data.frame(IV$Tables)
plot_infotables(IV, "Industry2Digit")
IV_Value$SIC_Category <- ie(
i(IV_Value$Industry2Digit.WOE == 0, 'SIC_Category 1'),
i(IV_Value$Industry2Digit.WOE < -0.5 , 'SIC_Category 2'),
i(IV_Value$Industry2Digit.WOE <0 & IV_Value$Industry2Digit.WOE >= -0.5, 'SIC_Category 3'),
i(IV_Value$Industry2Digit.WOE >0 & IV_Value$Industry2Digit.WOE <= 0.5, 'SIC_Category 4'),
i(IV_Value$Industry2Digit.WOE >0.5 & IV_Value$Industry2Digit.WOE <= 1, 'SIC_Category 5'),
i(IV_Value$Industry2Digit.WOE >1, 'SIC_Category 6'),
e('Others')
)
rm(IV)
# join back with revenue customers
dfRevenuCustomers <- left_join(dfRevenuCustomers, IV_Value[,c(1,6)], by=c('Industry2Digit' = 'Industry2Digit.Industry2Digit'))
table(dfRevenuCustomers$SIC_Category, dfRevenuCustomers$Inactive)
# count of industries in each category and count of customers in each bracket
dfRevenuCustomers %>%
group_by(SIC_Category) %>%
dplyr::summarise(countOfIndustries=n_distinct(Industry2Digit),
countOfCustomers=n())
industryMappingFile <- dfRevenuCustomers[,c(1, 4, 5, 7, 13, 14, 15, 18, 27, 28)]
write.csv(industryMappingFile, file = 'industryMapping.csv')
|
/earlyWarningSystem.R
|
no_license
|
arjunprasanna-azuga/Early-Warning-System
|
R
| false
| false
| 17,312
|
r
|
"
@Project: Early Warning System
@author: ATPL 1049
@date: May 06 2021
"
# load libaries
library(dplyr)
library(dbplyr)
library(DBI)
library(RMySQL)
library(data.table)
library(reshape2)
library(ggplot2)
library(lubridate)
library(survival)
library(reshape2)
library(Information)
# disable scientific notation
options(scipen = 999)
# source if-else logic
source("/Users/arjun/Documents/ATPL1049/Workspace/R/ifElse.R")
# custom functions
`%notin%` <- Negate(`%in%`)
# get customer data
# connect to adhoc schema on pentaho db
conn_adhoc <- DBI::dbConnect(RMySQL::MySQL(), user='pentaho_ro', password='73f1c86914j2P1X',
dbname='adhoc', host='pentahodb.azuga.com')
# using connection connect to table salesforceAccountSync
salesforceAccountSync <- tbl(conn_adhoc, "salesforceAccountSync")
# fetch list of customers along with some metadata
dfAccounts <- salesforceAccountSync %>%
select(`Account Name`,`Account Type`, `Billing Country`, `SIC Code`, `Industry`, `Account Status`,
`Azuga Customer ID`, `Customer Since`, `Revenue Since`, `Account Managed`, `Inactive Date`, `Inactive`,
`Industry_1`, `NAICS Code`, `NAICS Industry Name`, `No. of Revenue(closing) units`, `Number of Vehicles`,
`SIC Industry Name`, `Account Category`, `Total Active Units`, `Rate Plan type`, `Ave. subscription rate`, `ARPU`) %>%
collect()
# remove reference to table, disconnect db and remove connection details
rm(salesforceAccountSync)
dbDisconnect(conn_adhoc)
rm(conn_adhoc)
############################################### ANALYSIS OF CHURN RATE OVER TIME ################################################
# subset revenue customers for our analysis
dfRevenuCustomersCopy <- dfAccounts %>%
filter(`Account Type` == "Revenue")
# data type conversions
dfRevenuCustomersCopy$`Account Type` <- as.factor(dfRevenuCustomersCopy$`Account Type`)
dfRevenuCustomersCopy$`Account Status` <- as.factor(dfRevenuCustomersCopy$`Account Status`)
dfRevenuCustomersCopy$`Account Managed` <- as.factor(dfRevenuCustomersCopy$`Account Managed`)
dfRevenuCustomersCopy$Inactive <- as.factor(dfRevenuCustomersCopy$Inactive)
dfRevenuCustomersCopy$`Account Category` <- as.factor(dfRevenuCustomersCopy$`Account Category`)
dfRevenuCustomersCopy$`Customer Since` <- lubridate::as_date(dfRevenuCustomersCopy$`Customer Since`)
dfRevenuCustomersCopy$`Inactive Date` <- lubridate::as_date(dfRevenuCustomersCopy$`Inactive Date`)
dfRevenuCustomersCopy$`Revenue Since` <- lubridate::as_date(dfRevenuCustomersCopy$`Revenue Since`)
dfRevenuCustomersCopy$`Azuga Customer ID` <- as.integer(dfRevenuCustomersCopy$`Azuga Customer ID`)
# data treatment
# remove customers with no azuga id
dfRevenuCustomersCopy <- dfRevenuCustomersCopy[!is.na(dfRevenuCustomersCopy$`Azuga Customer ID`),]
table(dfRevenuCustomersCopy$`Billing Country`)
# as we can see we have customers from Canada, India, Mexico and United States.
# retain only USA and Canada customer
dfRevenuCustomersCopy <- dfRevenuCustomersCopy[dfRevenuCustomersCopy$`Billing Country` %in% c("United States", "Canada"),]
# remove customers with customer since field as null
dfRevenuCustomersCopy <- dfRevenuCustomersCopy[!is.na(dfRevenuCustomersCopy$`Customer Since`),]
table(dfRevenuCustomersCopy$`Account Status`)
# There is one account with the status as Closed. We will get rid of this account to ensure that the analysis is not diluted.
# remove customer wiht Account Status as Closed
dfRevenuCustomersCopy <- dfRevenuCustomersCopy[dfRevenuCustomersCopy$`Account Status`!="Closed",]
# if Account.Status is Churned/Red, update inactive flag to 1
dfRevenuCustomersCopy$Inactive <- ifelse((dfRevenuCustomersCopy$`Account Status` == "Churned / Red"), 1, 0)
# if Account is not churned, remove Inactive Date
dfRevenuCustomersCopy$`Inactive Date`[dfRevenuCustomersCopy$Inactive == 0] <- NA
# remove customers that churned before Jan, 2019
dfRevenuCustomersCopy <- dfRevenuCustomersCopy[dfRevenuCustomersCopy$`Account Status` == "Active / Green" |
dfRevenuCustomersCopy$`Inactive Date` >= '2016-01-01',]
table(dfRevenuCustomersCopy$`Account Status`)
# calculate customer counts over time - active, acquired and lost
yearFrame <- as.data.frame(seq(ymd('2016-01-01'),ymd('2021-04-01'),by='months'))
colnames(yearFrame)[1] <- "YearQrtr"
dataStats <- data.frame()
for (m in 1:nrow(yearFrame)){
yearMonth <- yearFrame$YearQrtr[m]
nextYearMonth <- yearFrame$YearQrtr[m+1]
activeCustomerCount <- length(dfRevenuCustomersCopy$`Azuga Customer ID`[dfRevenuCustomersCopy$`Customer Since` <= yearMonth & (is.na(dfRevenuCustomersCopy$`Inactive Date`) | dfRevenuCustomersCopy$`Inactive Date` >= yearMonth)])
newCustomerCount <- length(dfRevenuCustomersCopy$`Azuga Customer ID`[dfRevenuCustomersCopy$`Customer Since` >= yearMonth & dfRevenuCustomersCopy$`Customer Since` < nextYearMonth])
churnedCustomerCount <- length(dfRevenuCustomersCopy$`Azuga Customer ID`[dfRevenuCustomersCopy$`Inactive Date` >= yearMonth & dfRevenuCustomersCopy$`Inactive Date` < nextYearMonth & !is.na(dfRevenuCustomersCopy$`Inactive Date`)])
dataStats <- rbind(dataStats, cbind(as.character(yearMonth), as.character(nextYearMonth), as.integer(activeCustomerCount), as.integer(newCustomerCount), as.integer(churnedCustomerCount)))
}
rm(yearFrame)
rm(dataStats)
############################################### ANALYSIS OF CHURN RATE OVER TIME ################################################
############################################### CHURN ANALYSIS - SPECIFIC PERIOD ################################################
# subset revenue customers for our analysis
dfRevenuCustomers <- dfAccounts %>%
filter(`Account Type` == "Revenue")
# data type conversions
dfRevenuCustomers$`Account Type` <- as.factor(dfRevenuCustomers$`Account Type`)
dfRevenuCustomers$`Account Status` <- as.factor(dfRevenuCustomers$`Account Status`)
dfRevenuCustomers$`Account Managed` <- as.factor(dfRevenuCustomers$`Account Managed`)
dfRevenuCustomers$Inactive <- as.factor(dfRevenuCustomers$Inactive)
dfRevenuCustomers$`Account Category` <- as.factor(dfRevenuCustomers$`Account Category`)
dfRevenuCustomers$`Customer Since` <- lubridate::as_date(dfRevenuCustomers$`Customer Since`)
dfRevenuCustomers$`Inactive Date` <- lubridate::as_date(dfRevenuCustomers$`Inactive Date`)
dfRevenuCustomers$`Revenue Since` <- lubridate::as_date(dfRevenuCustomers$`Revenue Since`)
dfRevenuCustomers$`Azuga Customer ID` <- as.integer(dfRevenuCustomers$`Azuga Customer ID`)
# data treatment
# remove customers with no azuga id
dfRevenuCustomers <- dfRevenuCustomers[!is.na(dfRevenuCustomers$`Azuga Customer ID`),]
table(dfRevenuCustomers$`Billing Country`)
# as we can see we have customers from Canada, India, Mexico and United States.
# retain only USA and Canada customer
dfRevenuCustomers <- dfRevenuCustomers[dfRevenuCustomers$`Billing Country` %in% c("United States", "Canada"),]
# remove customers with customer since field as null
dfRevenuCustomers <- dfRevenuCustomers[!is.na(dfRevenuCustomers$`Customer Since`),]
table(dfRevenuCustomers$`Account Status`)
# There is one account with the status as Closed. We will get rid of this account to ensure that the analysis is not diluted.
# remove customer wiht Account Status as Closed
dfRevenuCustomers <- dfRevenuCustomers[dfRevenuCustomers$`Account Status`!="Closed",]
# if Account.Status is Churned/Red, update inactive flag to 1
dfRevenuCustomers$Inactive <- ifelse((dfRevenuCustomers$`Account Status` == "Churned / Red"), 1, 0)
# remove customers who joined after May 1, 2021
dfRevenuCustomers <- dfRevenuCustomers[dfRevenuCustomers$`Customer Since` < '2021-05-01',]
# remove customers that churned before Jan, 2019
dfRevenuCustomers <- dfRevenuCustomers[dfRevenuCustomers$`Account Status` == "Active / Green" |
dfRevenuCustomers$`Inactive Date` >= '2019-01-01',]
# calculate age of the customer based on joining date
# difference between churn date and joining date
dfRevenuCustomers$customerAge <- difftime(dfRevenuCustomers$`Inactive Date`, dfRevenuCustomers$`Customer Since`, units = 'days')
# if customer age is NA (because of the absence of inactive date), calculate age as the difference between joining date and May 01 (end of study period)
dfRevenuCustomers$customerAge <- ifelse(is.na(dfRevenuCustomers$customerAge),
difftime(as.Date("2021-05-01"), dfRevenuCustomers$`Customer Since`, units = "days"),
dfRevenuCustomers$customerAge)
# if customer age is negative (because this is a customer who has rejoined Azuga and our systems do not refresh the old churn date), compute age as the difference between joining date and May 01 (end of study period)
dfRevenuCustomers$customerAge <- ifelse(dfRevenuCustomers$customerAge<0,
difftime(as.Date("2021-05-01"), dfRevenuCustomers$`Customer Since`, units = "days"),
dfRevenuCustomers$customerAge)
# retain customers that have spent atleast 30 days with Azuga
# dfRevenuCustomers <- dfRevenuCustomers[dfRevenuCustomers$customerAge>=30,]
# if Account is not churned, remove Inactive Date
dfRevenuCustomers$`Inactive Date`[dfRevenuCustomers$Inactive == 0] <- NA
# data bucketing
# categorize customers based on age
dfRevenuCustomers$customerAgeGroup <- ie(
i(dfRevenuCustomers$customerAge<=30, 'Less Than a Month'),
i(dfRevenuCustomers$customerAge>30 & dfRevenuCustomers$customerAge <= 90, '1-3 Months'),
i(dfRevenuCustomers$customerAge>90 & dfRevenuCustomers$customerAge <= 180, '3-6 Months'),
i(dfRevenuCustomers$customerAge>180 & dfRevenuCustomers$customerAge <= 365, '6-12 Months'),
i(dfRevenuCustomers$customerAge>365 & dfRevenuCustomers$customerAge <= 730, '1-2 Years'),
i(dfRevenuCustomers$customerAge>730, 'More than 2 Years'),
e('Unknown')
)
# categorize customers based on units billed
dfRevenuCustomers$customerSize <- ie(
i(dfRevenuCustomers$`Total Active Units`<=10, 'Less than 11 Units'),
i(dfRevenuCustomers$`Total Active Units`>10 & dfRevenuCustomers$`Total Active Units` <= 25, '11-25 Units'),
i(dfRevenuCustomers$`Total Active Units`>25 & dfRevenuCustomers$`Total Active Units` <= 50, '26-50 Units'),
i(dfRevenuCustomers$`Total Active Units`>50 & dfRevenuCustomers$`Total Active Units` <= 109, '51-109 Units'),
i(dfRevenuCustomers$`Total Active Units`>109 & dfRevenuCustomers$`Total Active Units` <= 250, '110-250 Units'),
i(dfRevenuCustomers$`Total Active Units`>250 & dfRevenuCustomers$`Total Active Units` <= 500, '251-500 Units'),
e('More than 500 Units')
)
# exploratory data analysis
# check split of active and churned customers
table(dfRevenuCustomers$Inactive)
table(dfRevenuCustomers$`Account Status`)
# calculate customer counts over time - active, acquired and lost
yearFrame <- as.data.frame(seq(ymd('2019-01-01'),ymd('2021-05-01'),by='months'))
colnames(yearFrame)[1] <- "YearMonth"
dataStats <- data.frame()
for (m in 1:nrow(yearFrame)){
yearMonth <- yearFrame$YearMonth[m]
nextYearMonth <- yearFrame$YearMonth[m+1]
activeCustomerCount <- length(dfRevenuCustomers$`Azuga Customer ID`[dfRevenuCustomers$`Customer Since` <= yearMonth & (is.na(dfRevenuCustomers$`Inactive Date`) | dfRevenuCustomers$`Inactive Date` >= yearMonth)])
newCustomerCount <- length(dfRevenuCustomers$`Azuga Customer ID`[dfRevenuCustomers$`Customer Since` >= yearMonth & dfRevenuCustomers$`Customer Since` < nextYearMonth])
churnedCustomerCount <- length(dfRevenuCustomers$`Azuga Customer ID`[dfRevenuCustomers$`Inactive Date` >= yearMonth & dfRevenuCustomers$`Inactive Date` < nextYearMonth & !is.na(dfRevenuCustomers$`Inactive Date`)])
dataStats <- rbind(dataStats, cbind(as.character(yearMonth), as.character(nextYearMonth), as.integer(activeCustomerCount), as.integer(newCustomerCount), as.integer(churnedCustomerCount)))
}
rm(yearFrame)
rm(dataStats)
# Survival Analysis
# generate timeline for the survival plot
t <- seq(from=0, to=900, by=15)
# change the below dates for the different quarters
df <- dfRevenuCustomers[(dfRevenuCustomers$`Customer Since` >= '2020-10-01' &
dfRevenuCustomers$`Customer Since` < '2021-01-01'),]
fit <- survfit(Surv(customerAge, Inactive) ~ 1, data = df)
summary(fit, times=t)
# survival
# survivalObject <- Surv(time = df$customerAge, event = df$Inactive)
# plot(survivalObject)
rm(df, fit)
# impact of customer age on churn
table(dfRevenuCustomers$customerAgeGroup, dfRevenuCustomers$Inactive)
# impact of customer size on churn
table(dfRevenuCustomers$customerSize, dfRevenuCustomers$Inactive)
# impact of account type on churn
table(dfRevenuCustomers$`Account Managed`, dfRevenuCustomers$Inactive)
dfRevenuCustomers$`Account Managed` <- as.character(dfRevenuCustomers$`Account Managed`)
dfRevenuCustomers$`Account Managed`[is.na(dfRevenuCustomers$`Account Managed`)] <- "Blank"
table(dfRevenuCustomers$`Account Managed`, dfRevenuCustomers$Inactive)
# impact of plan type on churn
table(dfRevenuCustomers$`Rate Plan type`)
# one customer can have more than one plan types.. So, lets first split it into rows
# Split rows based on plan type
tempData <- strsplit(dfRevenuCustomers$`Rate Plan type`, split = ":")
tempData <- data.frame(V1 = rep(dfRevenuCustomers$`Azuga Customer ID`, sapply(tempData, length)), V2 = unlist(tempData))
tempData$V1 <- trimws(tempData$V1)
tempData$V2 <- trimws(tempData$V2)
tempData2 <- strsplit(tempData$V2, split = ";")
tempData2 <- data.frame(V1 = rep(tempData$V1, sapply(tempData2, length)), V2 = unlist(tempData2))
tempData2$V1 <- trimws(tempData2$V1)
tempData2$V2 <- trimws(tempData2$V2)
rm(tempData)
table(tempData2$V2)
length(unique(tempData2$V2))
# bucketing of plans
tempData2$planType <- ie(
i(tempData2$V2 %like% 'Package 1', 'Package 1'),
i(tempData2$V2 %like% 'Package 2', 'Package 2'),
i(tempData2$V2 %like% 'Package 3', 'Package 3'),
i(tempData2$V2 %like% 'Package 4', 'Package 4'),
i(tempData2$V2 %like% 'Azuga G2 Bundled', 'Azuga G2 Bundled'),
i(tempData2$V2 %like% 'Azuga G2 Flex', 'Azuga G2 Flex'),
i(tempData2$V2 %like% 'Phlytrac', 'PHLY'),
i(tempData2$V2 %like% 'PhlyTrack', 'PHLY'),
i(tempData2$V2 %like% 'PHLY', 'PHLY'),
i(tempData2$V2 %like% 'SafetyCam', 'SafetyCam'),
i(tempData2$V2 %like% 'Dual', 'SafetyCam'),
i(tempData2$V2 %like% 'Azuga Asset Tracker', 'Azuga Asset Tracker'),
i(tempData2$V2 %like% 'Azuga BasicFleet Bundle', 'Azuga BasicFleet Bundle'),
i(tempData2$V2 %like% 'BYOT', 'eLogs - BYOT'),
i(tempData2$V2 %like% 'e-Logs', 'eLogs - NON-BYOT'),
i(tempData2$V2 %like% 'Azuga Lite', 'Azuga Lite'),
e('Others')
)
tempData2$val <- 1
tempData <-
dcast(tempData2, V1 ~ planType, value.var = 'val')
tempData$RatePlan <- ie(
i(tempData$PHLY > 0 , 'Phly'),
i(tempData$SafetyCam > 0 , 'SafetyCam'),
i(tempData$`eLogs - BYOT` > 0 | tempData$`eLogs - NON-BYOT` > 0 , 'eLogs'),
i(tempData$`Azuga Asset Tracker` > 0 , 'Azuga Asset Tracker'),
i(tempData$`Azuga BasicFleet Bundle` > 0 | tempData$`Azuga G2 Bundled` >0 | tempData$`Azuga G2 Flex` >0 , 'Azuga Bundle'),
i(tempData$`Azuga Lite` > 0 , 'Azuga Lite'),
e('Others')
)
tempData$V1 <- as.integer(tempData$V1)
tempData <-
left_join(tempData, dfRevenuCustomers[,c(7,12)], by = c('V1' = 'Azuga Customer ID'))
table(tempData$RatePlan, tempData$Inactive)
table(tempData2$planType)
tempData2$V1 <- as.integer(tempData2$V1)
tempData2 <- tempData2[!is.na(tempData2$V1),]
tempData2 <-
left_join(tempData2, dfRevenuCustomers[,c(7,12)], by = c('V1' = 'Azuga Customer ID'))
table(tempData2$planType, tempData2$Inactive)
# impact of industry on churn
dfRevenuCustomers$Industry2Digit <- substr(dfRevenuCustomers$`SIC Code`,1,2)
table(dfRevenuCustomers$Industry2Digit, dfRevenuCustomers$Inactive)
# WOE
dfRevenuCustomers$Industry2Digit <- as.factor(dfRevenuCustomers$Industry2Digit)
# revenueCustomers$Inactive <- as.integer(revenueCustomers$Inactive)
IV <- create_infotables(data=dfRevenuCustomers[,c(12,27)], y="Inactive", bins=10, parallel=FALSE)
IV_Value = data.frame(IV$Tables)
plot_infotables(IV, "Industry2Digit")
IV_Value$SIC_Category <- ie(
i(IV_Value$Industry2Digit.WOE == 0, 'SIC_Category 1'),
i(IV_Value$Industry2Digit.WOE < -0.5 , 'SIC_Category 2'),
i(IV_Value$Industry2Digit.WOE <0 & IV_Value$Industry2Digit.WOE >= -0.5, 'SIC_Category 3'),
i(IV_Value$Industry2Digit.WOE >0 & IV_Value$Industry2Digit.WOE <= 0.5, 'SIC_Category 4'),
i(IV_Value$Industry2Digit.WOE >0.5 & IV_Value$Industry2Digit.WOE <= 1, 'SIC_Category 5'),
i(IV_Value$Industry2Digit.WOE >1, 'SIC_Category 6'),
e('Others')
)
rm(IV)
# join back with revenue customers
dfRevenuCustomers <- left_join(dfRevenuCustomers, IV_Value[,c(1,6)], by=c('Industry2Digit' = 'Industry2Digit.Industry2Digit'))
table(dfRevenuCustomers$SIC_Category, dfRevenuCustomers$Inactive)
# count of industries in each category and count of customers in each bracket
dfRevenuCustomers %>%
group_by(SIC_Category) %>%
dplyr::summarise(countOfIndustries=n_distinct(Industry2Digit),
countOfCustomers=n())
industryMappingFile <- dfRevenuCustomers[,c(1, 4, 5, 7, 13, 14, 15, 18, 27, 28)]
write.csv(industryMappingFile, file = 'industryMapping.csv')
|
# You might need to run this:
# install.packages("DBI","RSQLite","reshape2","ggplot2","scales")
library(dplyr)
library(DBI)
library(reshape2)
library(ggplot2)
library(scales)
# These next lines will need to be modified for your DB
# and system.
path.to.db <- "C:\\Users\\jchan\\Dropbox\\Teaching\\2018_Fall\\AppliedData\\week_10\\"
db.name <- "Anthony_Layton_WedgeDB.db"
owner.sales.table.name <- "Sales_yb_Owner_by_Year_by_Month"
con <- dbConnect(RSQLite::SQLite(), dbname = paste0(path.to.db,db.name))
owner.tbl <- tbl(con,
owner.sales.table.name)
owner.tbl %>%
head
# What was the total owner sales in 2015? 2016?
owner.tbl %>%
filter(card_no!=3) %>%
group_by(year) %>%
summarize(sum(spend,na.rm=T))
# For fun let's do owner and non-owner by year and plot it
owner.tbl %>%
mutate(is_owner = if_else(card_no==3,"no","yes")) %>%
group_by(year,is_owner) %>%
summarize(total_spend=sum(spend,na.rm=T)) %>%
filter(year!=2017) %>% #drop partial year
ggplot(aes(x=year,y=total_spend,
group=is_owner,color=is_owner)) +
geom_line() +
theme_minimal() +
scale_y_continuous(label=dollar) +
labs(x="",
y="Spend",
title="Spending by Year for Owners and Non-Owners")
# one other trick. Imagine you wanted my spending.
d <- owner.tbl %>%
filter(card_no==18736)
# how many rows are there?
nrow(d)
# this is NA because dplyr doesn't pull the data
# over to R unless you explicitly ask for it. This
# "lazy evaluation" is useful, but tricky when you
# first encounter it.
d <- owner.tbl %>%
filter(card_no==18736) %>%
collect
nrow(d)
# Now your turn. Knock out these other questions. No
# need to do any plotting.
# Which owner had the 50th highest spend in 2015? How much did they spend in 2016?
spend.2015 <- owner.tbl %>%
filter(year==2015,card_no != 3) %>%
group_by(card_no) %>%
summarize(total_spend = sum(spend)) %>%
arrange(desc(total_spend)) %>%
collect
spend.2015[50,]
20596
owner.tbl %>%
filter(year==2016,card_no == 20596) %>%
group_by(card_no) %>%
summarize(total_spend = sum(spend))
# 11067
# How many owners spent at least $1000 in 2015 and spent $0 in 2016?
spend.2015 <- owner.tbl %>%
filter(year==2015,card_no != 3) %>%
group_by(card_no) %>%
summarize(total_spend = sum(spend,na.rm=T)) %>%
filter(total_spend >= 1000) %>%
collect
spend.2016 <- owner.tbl %>%
filter(year==2016,card_no != 3) %>%
group_by(card_no) %>%
summarize(total_spend = sum(spend,na.rm=T)) %>%
filter(total_spend != 0) %>%
collect
spend.2015 %>%
filter(!(card_no %in% spend.2016$card_no))
# What product had the largest increase in sales (in raw dollars) between 2015 and 2016?
prod.tbl <- tbl(con,
"Sales_by_Product_by_Year_by_Month")
d <- prod.tbl %>%
filter(year==2015) %>%
group_by(description) %>%
summarise(total_spend = sum(spend)) %>%
collect
d <- d %>%
left_join(prod.tbl %>%
filter(year==2016) %>%
group_by(description) %>%
summarise(total_spend = sum(spend)) %>%
collect,
by="description")
names(d) <- c("description","spend.2015","spend.2016")
d <- d %>%
mutate(increase = spend.2016 - spend.2015) %>%
arrange(desc(increase))
ggplot(d,
aes(x=increase)) +
geom_density()+
theme_minimal() +
scale_x_continuous(limits=c(-5000,5000),
label=dollar)
|
/dplyr/dplyr_for_sql.R
|
no_license
|
a25murray/ada-master
|
R
| false
| false
| 3,458
|
r
|
# You might need to run this:
# install.packages("DBI","RSQLite","reshape2","ggplot2","scales")
library(dplyr)
library(DBI)
library(reshape2)
library(ggplot2)
library(scales)
# These next lines will need to be modified for your DB
# and system.
path.to.db <- "C:\\Users\\jchan\\Dropbox\\Teaching\\2018_Fall\\AppliedData\\week_10\\"
db.name <- "Anthony_Layton_WedgeDB.db"
owner.sales.table.name <- "Sales_yb_Owner_by_Year_by_Month"
con <- dbConnect(RSQLite::SQLite(), dbname = paste0(path.to.db,db.name))
owner.tbl <- tbl(con,
owner.sales.table.name)
owner.tbl %>%
head
# What was the total owner sales in 2015? 2016?
owner.tbl %>%
filter(card_no!=3) %>%
group_by(year) %>%
summarize(sum(spend,na.rm=T))
# For fun let's do owner and non-owner by year and plot it
owner.tbl %>%
mutate(is_owner = if_else(card_no==3,"no","yes")) %>%
group_by(year,is_owner) %>%
summarize(total_spend=sum(spend,na.rm=T)) %>%
filter(year!=2017) %>% #drop partial year
ggplot(aes(x=year,y=total_spend,
group=is_owner,color=is_owner)) +
geom_line() +
theme_minimal() +
scale_y_continuous(label=dollar) +
labs(x="",
y="Spend",
title="Spending by Year for Owners and Non-Owners")
# one other trick. Imagine you wanted my spending.
d <- owner.tbl %>%
filter(card_no==18736)
# how many rows are there?
nrow(d)
# this is NA because dplyr doesn't pull the data
# over to R unless you explicitly ask for it. This
# "lazy evaluation" is useful, but tricky when you
# first encounter it.
d <- owner.tbl %>%
filter(card_no==18736) %>%
collect
nrow(d)
# Now your turn. Knock out these other questions. No
# need to do any plotting.
# Which owner had the 50th highest spend in 2015? How much did they spend in 2016?
spend.2015 <- owner.tbl %>%
filter(year==2015,card_no != 3) %>%
group_by(card_no) %>%
summarize(total_spend = sum(spend)) %>%
arrange(desc(total_spend)) %>%
collect
spend.2015[50,]
20596
owner.tbl %>%
filter(year==2016,card_no == 20596) %>%
group_by(card_no) %>%
summarize(total_spend = sum(spend))
# 11067
# How many owners spent at least $1000 in 2015 and spent $0 in 2016?
spend.2015 <- owner.tbl %>%
filter(year==2015,card_no != 3) %>%
group_by(card_no) %>%
summarize(total_spend = sum(spend,na.rm=T)) %>%
filter(total_spend >= 1000) %>%
collect
spend.2016 <- owner.tbl %>%
filter(year==2016,card_no != 3) %>%
group_by(card_no) %>%
summarize(total_spend = sum(spend,na.rm=T)) %>%
filter(total_spend != 0) %>%
collect
spend.2015 %>%
filter(!(card_no %in% spend.2016$card_no))
# What product had the largest increase in sales (in raw dollars) between 2015 and 2016?
prod.tbl <- tbl(con,
"Sales_by_Product_by_Year_by_Month")
d <- prod.tbl %>%
filter(year==2015) %>%
group_by(description) %>%
summarise(total_spend = sum(spend)) %>%
collect
d <- d %>%
left_join(prod.tbl %>%
filter(year==2016) %>%
group_by(description) %>%
summarise(total_spend = sum(spend)) %>%
collect,
by="description")
names(d) <- c("description","spend.2015","spend.2016")
d <- d %>%
mutate(increase = spend.2016 - spend.2015) %>%
arrange(desc(increase))
ggplot(d,
aes(x=increase)) +
geom_density()+
theme_minimal() +
scale_x_continuous(limits=c(-5000,5000),
label=dollar)
|
testlist <- list(pts = integer(0), ends = NULL, starts = NULL, sorted_ends = c(0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), sorted_starts = c(0L, -1627389952L, 682962941L, 546746628L, 599542L, 1800501216L, 1649393244L, -1712958485L, -1313178345L, 1470710473L, 2108708770L, -1965616612L, -533569700L, -771330099L, 853833112L, 2030715618L, -1261966754L, -129171080L, -642760964L, 779827246L, 1878602521L))
result <- do.call(IntervalSurgeon:::rcpp_depth,testlist)
str(result)
|
/IntervalSurgeon/inst/testfiles/rcpp_depth/AFL_rcpp_depth/rcpp_depth_valgrind_files/1609857243-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false
| false
| 713
|
r
|
testlist <- list(pts = integer(0), ends = NULL, starts = NULL, sorted_ends = c(0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), sorted_starts = c(0L, -1627389952L, 682962941L, 546746628L, 599542L, 1800501216L, 1649393244L, -1712958485L, -1313178345L, 1470710473L, 2108708770L, -1965616612L, -533569700L, -771330099L, 853833112L, 2030715618L, -1261966754L, -129171080L, -642760964L, 779827246L, 1878602521L))
result <- do.call(IntervalSurgeon:::rcpp_depth,testlist)
str(result)
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\docType{package}
\name{rmapaus}
\alias{rmapaus}
\alias{rmapaus-package}
\title{rmapaus: mapping Australia}
\description{
The rmapaus package provides spatial boundaries for various Australian regions,
including postcodes and Australian Bureau of Statistics statistical regions.
}
|
/man/rmapaus.Rd
|
no_license
|
TuanAnh207/rmapaus
|
R
| false
| false
| 335
|
rd
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\docType{package}
\name{rmapaus}
\alias{rmapaus}
\alias{rmapaus-package}
\title{rmapaus: mapping Australia}
\description{
The rmapaus package provides spatial boundaries for various Australian regions,
including postcodes and Australian Bureau of Statistics statistical regions.
}
|
hb = 0.9
hn = 0.5
hw = 0.1
hb_pr = .05
hn_pr = .9
hw_pr = 0.05
eb = hb*hb_pr + hn*hn_pr + hw*hw_pr
ew = 1 - eb
getE = function(hbp, hnp, hwp){
return (hb*hbp+hn*hnp+hw*hwp)
}
update = function(h,e,chance){
top = h*chance
return (top/e)
}
hb_pr2 = update(hb_pr,eb,hb)
hw_pr2 = update(hw_pr,eb,hw)
hn_pr2 = update(hn_pr,eb,hn)
eb2 = getE(hb_pr2,hn_pr2, hw_pr2)
eb2
hb_pr3 = update(hb_pr2,eb2,hb)
hw_pr3 = update(hw_pr2,eb2,hw)
hn_pr3 = update(hn_pr2,eb2,hn)
eb3 = getE(hb_pr3,hn_pr3, hw_pr3)
hb_pr4 = update(hb_pr3,eb3,hb)
hw_pr4 = update(hw_pr3,eb3,hw)
hn_pr4 = update(hn_pr3,eb3,hn)
exloss = function(expected, state, prob){
loss = (state-expected)^2
return (prob*loss)
}
exloss(hw,hb,hw_pr)
a = 1
b = 1
sum_x = 500
n = 1000
an = a + sum_x
bn = b + n - sum_x
loss_fn = function(theta,action){
return ((theta-action)^2)
}
post = function(action, s = 5000){
theta = rbeta(s,an,bn)
loss = rep(NA,s)
for (i in 1:s){
loss[i] = loss_fn(theta[i],action)
}
risk=mean(loss)
}
fn = function(a1,b1,x=500,n=500,s=100000){
a2 = a1 + x
b2 = b1 + n - x
e1 = a1/(a1+b1)
e2 = a2/(a2+b2)
m = 0.505
n = 0.495
p1 = pbeta(m,a1,b1) - pbeta(n,a1,b1)
l1 = p1*(0.5-e1)^2
p2 = pbeta(m,a2,b2) - pbeta(n,a2,b2)
l2 = p2*(0.5-e2)^2
return (abs(l1 - l2))
}
br = seq(0,1,by=0.005)
mid = (br[1:200] + br[2:201])/2
a1 = 10
b1 = 10
x = 50
n = 50
s = 1000000
getw = function(a1,b1,s=100000){
br = seq(0,1,by=0.005)
mid = (br[1:200] + br[2:201])/2
e1 = a1/(a1+b1)
r1 = rbeta(s,a1,b1)
c1 = cut(r1,breaks=br)
t1 = table(c1)/s
loss1 = (mid - e1)^2
w1 = loss1*t1
return (w1)
}
a1 = 2
b1 = 2
a2 = 52
b2 = 52
w1 = getw(a1,b1)
w2 = getw(a2,b2)
par(mfrow=c(1,1))
plot(xSeq, dbeta(xSeq,2,2))
w3 = getw(7,2)
w4 = getw(12,2)
w5 = getw(4,2)
plot(w1,xlab="Chance Hypothesis",ylab="Expected Loss",xaxt = "n",yaxt = "n", main="Loss Comparison")
lines(w5,col="yellow")
lines(w3,col="red")
lines(w4,col="blue")
axis(1, at=c(0,100,200), labels=c(0,0.5,1))
legend( 0.3,1.508033e-04, legend=c("Prior","5 heads(j-weight:0.45)","10 heads(j-weight:0.435"), col=c("black","red","blue"),cex=.8, lty=1,bg="lightblue")
par(mfrow=c(2,2))
xSeq = seq(0,1,by=0.005)
plot(xSeq,dbeta(xSeq,a1,b1),type='l', xlab="Chance Hypothesis",ylab="Density",xaxt = "n",yaxt = "n", main="Naive Prior Distribution")
plot(w1,type='l', xlab="Chance Hypothesis",ylab="Expected Loss",xaxt = "n",yaxt = "n", main="Prior Expected Loss")
axis(1, at=c(0,100,200), labels=c(0,0.5,1))
plot(xSeq,dbeta(xSeq,a2,b2),type='l', xlab="Chance Hypothesis",ylab="Density",xaxt = "n",yaxt = "n", main="Posterior Distribution")
plot(w2,type='l', xlab="Chance Hypothesis",ylab="Expected Loss",xaxt = "n",yaxt = "n", main="Posterior Expected Loss")
axis(1, at=c(0,100,200), labels=c(0,0.5,1))
x = 1:100
n = 1:100
times = 50
vw1 = rep(NA,times)
vw2 = rep(NA,times)
for (i in 1:times){
tempw = getw(a1+i,b1)
vw1[i] = sum(abs(tempw-w1))
tempw2 = getw(a2+i,b2)
vw2[i] = sum(abs(tempw2-w2))
}
x = 1:times
w1plot(x,w)
plot
sum(abs(w2-w1))
r1 = rbeta(s,a1,b1)
c1 = cut(r1,breaks=br)
t1 = table(c1)/s
loss1 = (mid - e1)^2
w1 = loss1*t1
a2 = a1 + x
b2 = b1 + n - x
e2 = a2/(a2+b2)
r2 = rbeta(s,a2,b2)
sweight = function(a,b,x,n){
prior = a/(a+b)
post = (a+x)/(a+b+n)
return (abs(prior-post))
}
sweight(2000,2000,50,50)
sw1 = sweight(2,2,1:50,1:50)
sw2 = sweight(52,52,1:50,1:50)
par(mfrow=c(1,2))
plot(x,vw1,ylim=c(0,0.05),type='l',col="red",xlab="Number of Heads", ylab="Joycean Weight", main="Joycean Weight Comparison")
lines(vw2,col="blue")
legend( 30,.033, legend=c("Beta(2,2)","Beta(52,52)"), col=c("red","blue"),cex=.8, lty=1,bg="lightblue")
plot(x,sw1,ylim=c(0,0.5),type='l',col="red",xlab="Number of Heads", ylab="Skyrmsian Weight", main="Skyrmsian Weight Comparison")
lines(sw2,col="blue")
legend( 30,.31, legend=c("Beta(2,2)","Beta(52,52)"), col=c("red","blue"),cex=.8, lty=1,bg="lightblue")
plot(1:50,sw1,type='l',col='red',ylim=c(0.5,1))
lines(sw2,type='l',col='blue')
plot(sw2)
a1 = 1:100
b1 = 1:100
y = fn(a1,b1)
plot(a1,y)
x=0:100
thatFn = function(a,b){
return (a/(a+b))
}
y1 =
plot(x,thatFn(1+x,1), type='l',col='red',xlab="Hypothethical Data(x out of x)",ylab="Posterior Probability", main="Comparison of Changes in Posterior Probabilities", lty=1)
lines(x,thatFn(20+x,20),col='blue',type='l',lty=2)
lines(x,thatFn(500+x,500),col='black',type='l',lty=5)
legend(70,.8, legend=c("Weak Prior(2,2)","Moderate Prior(50,50)","Ideal Evidence Prior(5000,5000)"), col=c("red","blue","black"),cex=.8, lty=c(1,2,5),bg="lightblue")
plot(x,thatFn(1+x,1+(100-x)), type='l',col='red',xlab="Hypothethical Data(x out of 100)",ylab="Posterior Probability", main="Comparison of Changes in Posterior Probabilities", lty=1)
lines(x,thatFn(50+x,50+(100-x)),col='blue',type='l',lty=2)
lines(x,thatFn(5000+x,5000+(100-x)),col='black',type='l',lty=5)
legend(70,.8, legend=c("Weak Prior(1,1)","Moderate Prior(50,50)","Ideal Evidence Prior(5000,5000)"), col=c("red","blue","black"),cex=.8, lty=c(1,2,5),bg="lightblue")
plot(x,thatFn(1+x,1+(100-x)), type='l',col='red',xlab="Hypothethical Data(x out of 100)",ylab="Posterior Probability", main="Comparison of Changes in Posterior Probabilities", lty=1)
lines(x,thatFn(50+x,50+(100-x)),col='blue',type='l',lty=2)
lines(x,thatFn(5000+x,5000+(100-x)),col='black',type='l',lty=5)
legend(70,.8, legend=c("Weak Prior(1,1)","Moderate Prior(50,50)","Ideal Evidence Prior(5000,5000)"), col=c("red","blue","black"),cex=.8, lty=c(1,2,5),bg="lightblue")
|
/src/expectedlossexample.R
|
no_license
|
Lokgic/diss-repo
|
R
| false
| false
| 5,515
|
r
|
hb = 0.9
hn = 0.5
hw = 0.1
hb_pr = .05
hn_pr = .9
hw_pr = 0.05
eb = hb*hb_pr + hn*hn_pr + hw*hw_pr
ew = 1 - eb
getE = function(hbp, hnp, hwp){
return (hb*hbp+hn*hnp+hw*hwp)
}
update = function(h,e,chance){
top = h*chance
return (top/e)
}
hb_pr2 = update(hb_pr,eb,hb)
hw_pr2 = update(hw_pr,eb,hw)
hn_pr2 = update(hn_pr,eb,hn)
eb2 = getE(hb_pr2,hn_pr2, hw_pr2)
eb2
hb_pr3 = update(hb_pr2,eb2,hb)
hw_pr3 = update(hw_pr2,eb2,hw)
hn_pr3 = update(hn_pr2,eb2,hn)
eb3 = getE(hb_pr3,hn_pr3, hw_pr3)
hb_pr4 = update(hb_pr3,eb3,hb)
hw_pr4 = update(hw_pr3,eb3,hw)
hn_pr4 = update(hn_pr3,eb3,hn)
exloss = function(expected, state, prob){
loss = (state-expected)^2
return (prob*loss)
}
exloss(hw,hb,hw_pr)
a = 1
b = 1
sum_x = 500
n = 1000
an = a + sum_x
bn = b + n - sum_x
loss_fn = function(theta,action){
return ((theta-action)^2)
}
post = function(action, s = 5000){
theta = rbeta(s,an,bn)
loss = rep(NA,s)
for (i in 1:s){
loss[i] = loss_fn(theta[i],action)
}
risk=mean(loss)
}
fn = function(a1,b1,x=500,n=500,s=100000){
a2 = a1 + x
b2 = b1 + n - x
e1 = a1/(a1+b1)
e2 = a2/(a2+b2)
m = 0.505
n = 0.495
p1 = pbeta(m,a1,b1) - pbeta(n,a1,b1)
l1 = p1*(0.5-e1)^2
p2 = pbeta(m,a2,b2) - pbeta(n,a2,b2)
l2 = p2*(0.5-e2)^2
return (abs(l1 - l2))
}
br = seq(0,1,by=0.005)
mid = (br[1:200] + br[2:201])/2
a1 = 10
b1 = 10
x = 50
n = 50
s = 1000000
getw = function(a1,b1,s=100000){
br = seq(0,1,by=0.005)
mid = (br[1:200] + br[2:201])/2
e1 = a1/(a1+b1)
r1 = rbeta(s,a1,b1)
c1 = cut(r1,breaks=br)
t1 = table(c1)/s
loss1 = (mid - e1)^2
w1 = loss1*t1
return (w1)
}
a1 = 2
b1 = 2
a2 = 52
b2 = 52
w1 = getw(a1,b1)
w2 = getw(a2,b2)
par(mfrow=c(1,1))
plot(xSeq, dbeta(xSeq,2,2))
w3 = getw(7,2)
w4 = getw(12,2)
w5 = getw(4,2)
plot(w1,xlab="Chance Hypothesis",ylab="Expected Loss",xaxt = "n",yaxt = "n", main="Loss Comparison")
lines(w5,col="yellow")
lines(w3,col="red")
lines(w4,col="blue")
axis(1, at=c(0,100,200), labels=c(0,0.5,1))
legend( 0.3,1.508033e-04, legend=c("Prior","5 heads(j-weight:0.45)","10 heads(j-weight:0.435"), col=c("black","red","blue"),cex=.8, lty=1,bg="lightblue")
par(mfrow=c(2,2))
xSeq = seq(0,1,by=0.005)
plot(xSeq,dbeta(xSeq,a1,b1),type='l', xlab="Chance Hypothesis",ylab="Density",xaxt = "n",yaxt = "n", main="Naive Prior Distribution")
plot(w1,type='l', xlab="Chance Hypothesis",ylab="Expected Loss",xaxt = "n",yaxt = "n", main="Prior Expected Loss")
axis(1, at=c(0,100,200), labels=c(0,0.5,1))
plot(xSeq,dbeta(xSeq,a2,b2),type='l', xlab="Chance Hypothesis",ylab="Density",xaxt = "n",yaxt = "n", main="Posterior Distribution")
plot(w2,type='l', xlab="Chance Hypothesis",ylab="Expected Loss",xaxt = "n",yaxt = "n", main="Posterior Expected Loss")
axis(1, at=c(0,100,200), labels=c(0,0.5,1))
x = 1:100
n = 1:100
times = 50
vw1 = rep(NA,times)
vw2 = rep(NA,times)
for (i in 1:times){
tempw = getw(a1+i,b1)
vw1[i] = sum(abs(tempw-w1))
tempw2 = getw(a2+i,b2)
vw2[i] = sum(abs(tempw2-w2))
}
x = 1:times
w1plot(x,w)
plot
sum(abs(w2-w1))
r1 = rbeta(s,a1,b1)
c1 = cut(r1,breaks=br)
t1 = table(c1)/s
loss1 = (mid - e1)^2
w1 = loss1*t1
a2 = a1 + x
b2 = b1 + n - x
e2 = a2/(a2+b2)
r2 = rbeta(s,a2,b2)
sweight = function(a,b,x,n){
prior = a/(a+b)
post = (a+x)/(a+b+n)
return (abs(prior-post))
}
sweight(2000,2000,50,50)
sw1 = sweight(2,2,1:50,1:50)
sw2 = sweight(52,52,1:50,1:50)
par(mfrow=c(1,2))
plot(x,vw1,ylim=c(0,0.05),type='l',col="red",xlab="Number of Heads", ylab="Joycean Weight", main="Joycean Weight Comparison")
lines(vw2,col="blue")
legend( 30,.033, legend=c("Beta(2,2)","Beta(52,52)"), col=c("red","blue"),cex=.8, lty=1,bg="lightblue")
plot(x,sw1,ylim=c(0,0.5),type='l',col="red",xlab="Number of Heads", ylab="Skyrmsian Weight", main="Skyrmsian Weight Comparison")
lines(sw2,col="blue")
legend( 30,.31, legend=c("Beta(2,2)","Beta(52,52)"), col=c("red","blue"),cex=.8, lty=1,bg="lightblue")
plot(1:50,sw1,type='l',col='red',ylim=c(0.5,1))
lines(sw2,type='l',col='blue')
plot(sw2)
a1 = 1:100
b1 = 1:100
y = fn(a1,b1)
plot(a1,y)
x=0:100
thatFn = function(a,b){
return (a/(a+b))
}
y1 =
plot(x,thatFn(1+x,1), type='l',col='red',xlab="Hypothethical Data(x out of x)",ylab="Posterior Probability", main="Comparison of Changes in Posterior Probabilities", lty=1)
lines(x,thatFn(20+x,20),col='blue',type='l',lty=2)
lines(x,thatFn(500+x,500),col='black',type='l',lty=5)
legend(70,.8, legend=c("Weak Prior(2,2)","Moderate Prior(50,50)","Ideal Evidence Prior(5000,5000)"), col=c("red","blue","black"),cex=.8, lty=c(1,2,5),bg="lightblue")
plot(x,thatFn(1+x,1+(100-x)), type='l',col='red',xlab="Hypothethical Data(x out of 100)",ylab="Posterior Probability", main="Comparison of Changes in Posterior Probabilities", lty=1)
lines(x,thatFn(50+x,50+(100-x)),col='blue',type='l',lty=2)
lines(x,thatFn(5000+x,5000+(100-x)),col='black',type='l',lty=5)
legend(70,.8, legend=c("Weak Prior(1,1)","Moderate Prior(50,50)","Ideal Evidence Prior(5000,5000)"), col=c("red","blue","black"),cex=.8, lty=c(1,2,5),bg="lightblue")
plot(x,thatFn(1+x,1+(100-x)), type='l',col='red',xlab="Hypothethical Data(x out of 100)",ylab="Posterior Probability", main="Comparison of Changes in Posterior Probabilities", lty=1)
lines(x,thatFn(50+x,50+(100-x)),col='blue',type='l',lty=2)
lines(x,thatFn(5000+x,5000+(100-x)),col='black',type='l',lty=5)
legend(70,.8, legend=c("Weak Prior(1,1)","Moderate Prior(50,50)","Ideal Evidence Prior(5000,5000)"), col=c("red","blue","black"),cex=.8, lty=c(1,2,5),bg="lightblue")
|
require(caret) #select tuning parameters
require(e1071) #SVM
require(MASS)
train <- read.csv('train.csv', as.is = T);
test <- read.csv('test.csv', as.is = T)
train <- train[,c(1,2,5,10,14,18,26,27,28,29,30,31,32,33,34,35,36,37,38,40,41)]
test <- test[,c(1,2,5,10,14,18,26,27,28,29,30,31,32,33,34,35,36,37,38,40)]
train$extraction_type_group <- as.factor(train$extraction_type_group)
train$extraction_type_class <- as.factor(train$extraction_type_class)
train$management <- as.factor(train$management)
train$management_group <- as.factor(train$management_group)
train$payment_type <- as.factor(train$payment_type)
train$payment <- as.factor(train$payment)
train$water_quality <- as.factor(train$water_quality)
train$quality_group <- as.factor(train$quality_group)
train$quantity <- as.factor(train$quantity)
train$quantity_group <- as.factor(train$quantity_group)
train$source <- as.factor(train$source)
train$source_type <- as.factor(train$source_type)
train$source_class <- as.factor(train$source_class)
train$waterpoint_type <- as.factor(train$waterpoint_type)
train$waterpoint_type_group <- as.factor(train$waterpoint_type_group)
train$status_group <- as.factor(train$status_group)
test$extraction_type_group <- as.factor(test$extraction_type_group)
test$extraction_type_class <- as.factor(test$extraction_type_class)
test$management <- as.factor(test$management)
test$management_group <- as.factor(test$management_group)
test$payment_type <- as.factor(test$payment_type)
test$payment <- as.factor(test$payment)
test$water_quality <- as.factor(test$water_quality)
test$quality_group <- as.factor(test$quality_group)
test$quantity <- as.factor(test$quantity)
test$quantity_group <- as.factor(test$quantity_group)
test$source <- as.factor(test$source)
test$source_type <- as.factor(test$source_type)
test$source_class <- as.factor(test$source_class)
test$waterpoint_type <- as.factor(test$waterpoint_type)
test$waterpoint_type_group <- as.factor(test$waterpoint_type_group)
train <- na.omit(train)
test<- na.omit(test)
#knn
knn4 <- train(status_group ~ ., data = train, method = "knn",
preProcess = c("center", "scale"), tuneLength = 10,
trControl = trainControl(method = "cv"))
update(knn4, list(.k = 9))
knn4_pred <- predict(knn4,newdata = test)
knn4_pred
FINALSUB <-data.frame(test$id, knn4_pred)
write.csv(file = "final1.csv", FINALSUB)
## RF
pd.rf = randomForest(status_group~., data=train, mtry = 4, importance = TRUE)
pd.rf
tree.pred= predict(pd.rf, test, type = "class")
FINALSUB2 <-data.frame(test$id, tree.pred)
write.csv(file = "finalrf.csv", FINALSUB2)
#Neural NEt
dm.navg <- avNNet(train[c(2:22)], train$status_group, size = 2, rang
= 0.5, decay = 5e-6, maxit = 200, repeats = 50)
predict(dm.navg, dm[-
inTrain,], type = "class")
|
/knn-pumpit.R
|
no_license
|
knpraveen/PumpItUp
|
R
| false
| false
| 2,892
|
r
|
require(caret) #select tuning parameters
require(e1071) #SVM
require(MASS)
train <- read.csv('train.csv', as.is = T);
test <- read.csv('test.csv', as.is = T)
train <- train[,c(1,2,5,10,14,18,26,27,28,29,30,31,32,33,34,35,36,37,38,40,41)]
test <- test[,c(1,2,5,10,14,18,26,27,28,29,30,31,32,33,34,35,36,37,38,40)]
train$extraction_type_group <- as.factor(train$extraction_type_group)
train$extraction_type_class <- as.factor(train$extraction_type_class)
train$management <- as.factor(train$management)
train$management_group <- as.factor(train$management_group)
train$payment_type <- as.factor(train$payment_type)
train$payment <- as.factor(train$payment)
train$water_quality <- as.factor(train$water_quality)
train$quality_group <- as.factor(train$quality_group)
train$quantity <- as.factor(train$quantity)
train$quantity_group <- as.factor(train$quantity_group)
train$source <- as.factor(train$source)
train$source_type <- as.factor(train$source_type)
train$source_class <- as.factor(train$source_class)
train$waterpoint_type <- as.factor(train$waterpoint_type)
train$waterpoint_type_group <- as.factor(train$waterpoint_type_group)
train$status_group <- as.factor(train$status_group)
test$extraction_type_group <- as.factor(test$extraction_type_group)
test$extraction_type_class <- as.factor(test$extraction_type_class)
test$management <- as.factor(test$management)
test$management_group <- as.factor(test$management_group)
test$payment_type <- as.factor(test$payment_type)
test$payment <- as.factor(test$payment)
test$water_quality <- as.factor(test$water_quality)
test$quality_group <- as.factor(test$quality_group)
test$quantity <- as.factor(test$quantity)
test$quantity_group <- as.factor(test$quantity_group)
test$source <- as.factor(test$source)
test$source_type <- as.factor(test$source_type)
test$source_class <- as.factor(test$source_class)
test$waterpoint_type <- as.factor(test$waterpoint_type)
test$waterpoint_type_group <- as.factor(test$waterpoint_type_group)
train <- na.omit(train)
test<- na.omit(test)
#knn
knn4 <- train(status_group ~ ., data = train, method = "knn",
preProcess = c("center", "scale"), tuneLength = 10,
trControl = trainControl(method = "cv"))
update(knn4, list(.k = 9))
knn4_pred <- predict(knn4,newdata = test)
knn4_pred
FINALSUB <-data.frame(test$id, knn4_pred)
write.csv(file = "final1.csv", FINALSUB)
## RF
pd.rf = randomForest(status_group~., data=train, mtry = 4, importance = TRUE)
pd.rf
tree.pred= predict(pd.rf, test, type = "class")
FINALSUB2 <-data.frame(test$id, tree.pred)
write.csv(file = "finalrf.csv", FINALSUB2)
#Neural NEt
dm.navg <- avNNet(train[c(2:22)], train$status_group, size = 2, rang
= 0.5, decay = 5e-6, maxit = 200, repeats = 50)
predict(dm.navg, dm[-
inTrain,], type = "class")
|
library(shiny)
# Define UI for app that draws a histogram ----
ui <- fluidPage(
title = "Cool Maps",
# Sidebar layout with input and output definitions ----
dropdownLayout(
# Sidebar panel for inputs ----
sidebarPanel(
# Input: Slider for the number of bins ----
sliderInput(inputId = "bins",
label = "Number of bins:",
min = 1,
max = 50,
value = 30)
),
# Main panel for displaying outputs ----
mainPanel(
# Output: Histogram ----
plotOutput(outputId = "distPlot")
)
)
)
# Define server logic required to draw a histogram ----
server <- function(input, output) {
# Histogram of the Old Faithful Geyser Data ----
# with requested number of bins
# This expression that generates a histogram is wrapped in a call
# to renderPlot to indicate that:
#
# 1. It is "reactive" and therefore should be automatically
# re-executed when inputs (input$bins) change
# 2. Its output type is a plot
output$distPlot <- renderPlot({
x <- faithful$waiting
bins <- seq(min(x), max(x), length.out = input$bins + 1)
hist(x, breaks = bins, col = "#75AADB", border = "white",
xlab = "Waiting time to next eruption (in mins)",
main = "Histogram of waiting times")
})
}
# Create Shiny app ----
shinyApp(ui = ui, server = server)
|
/app.r
|
no_license
|
landon-thompson/spatial-project-college
|
R
| false
| false
| 1,511
|
r
|
library(shiny)
# Define UI for app that draws a histogram ----
ui <- fluidPage(
title = "Cool Maps",
# Sidebar layout with input and output definitions ----
dropdownLayout(
# Sidebar panel for inputs ----
sidebarPanel(
# Input: Slider for the number of bins ----
sliderInput(inputId = "bins",
label = "Number of bins:",
min = 1,
max = 50,
value = 30)
),
# Main panel for displaying outputs ----
mainPanel(
# Output: Histogram ----
plotOutput(outputId = "distPlot")
)
)
)
# Define server logic required to draw a histogram ----
server <- function(input, output) {
# Histogram of the Old Faithful Geyser Data ----
# with requested number of bins
# This expression that generates a histogram is wrapped in a call
# to renderPlot to indicate that:
#
# 1. It is "reactive" and therefore should be automatically
# re-executed when inputs (input$bins) change
# 2. Its output type is a plot
output$distPlot <- renderPlot({
x <- faithful$waiting
bins <- seq(min(x), max(x), length.out = input$bins + 1)
hist(x, breaks = bins, col = "#75AADB", border = "white",
xlab = "Waiting time to next eruption (in mins)",
main = "Histogram of waiting times")
})
}
# Create Shiny app ----
shinyApp(ui = ui, server = server)
|
context("Report Integration")
library(ORFik)
# Make test data
template <- create.experiment(dir = system.file("extdata", "", package = "ORFik"),
exper = "ORFik", txdb = system.file("extdata",
"annotations.gtf",
package = "ORFik"),
viewTemplate = FALSE)
template$X5[6] <- "heart"
test_that("Experiment class works as intended", {
# test from example table in orfik
df <- read.experiment(template)
expect_equal(ncol(df), 6)
# load file
outputLibs(df)
expect_equal(exists("ORFik_cage"), TRUE)
})
|
/tests/testthat/test_report.R
|
permissive
|
lukun06/ORFik
|
R
| false
| false
| 653
|
r
|
context("Report Integration")
library(ORFik)
# Make test data
template <- create.experiment(dir = system.file("extdata", "", package = "ORFik"),
exper = "ORFik", txdb = system.file("extdata",
"annotations.gtf",
package = "ORFik"),
viewTemplate = FALSE)
template$X5[6] <- "heart"
test_that("Experiment class works as intended", {
# test from example table in orfik
df <- read.experiment(template)
expect_equal(ncol(df), 6)
# load file
outputLibs(df)
expect_equal(exists("ORFik_cage"), TRUE)
})
|
##Load all packages ----
library(fossil)
library(adehabitatHR)
library(maps)
library(grid)
library(gridExtra)
library(lubridate)
library(tidyverse)
library(rgdal)
library(rworldmap)
library(rgeos)
library(RODBC) ##connect to data base
library(sp)
select <- dplyr::select
##other packages used in this script: cowplot, data.table
##Load all user defined functions.----
norm_vec <- function(x) sqrt(sum(x^2))
new_point <- function(p0, p1, di) { # Finds point in distance di from point p0 in direction of point p1
v = p1 - p0
u = v / norm_vec(v)
return (p0 + u * di)
}
## A function to calculate the distances between consecutive points ##
## Output in meters ##
pt2pt.distance <- function(latitude, longitude, lag = 1){
require(fossil)
distance <- NA
for(i in 2:length(latitude)){
distance[i] <- deg.dist(long1= longitude[i-lag],lat1 = latitude[i-lag], long2 = longitude[i], lat2 = latitude[i] )*1000 }
return(distance)
}
## A function to calculate the time increment between consecutive points ##
## Default output in seconds, other options are "auto", "mins", "hours","days", or "weeks" ##
pt2pt.duration <- function(datetime, output.units='secs'){
duration <- NA
for(i in 2:length(datetime)){
duration[i] <- difftime(datetime[i], datetime[i-1], units=output.units) }
return(duration)
}
## A function to calculate the speed of movement between consecutive points ##
pt2pt.speed <- function(distance, duration){
return(distance/duration)
}
###A function that finds the closest points to a regular (e.g. hourly) time series
trackSubSamp = function(df, int=1,unit='hours')
{
id.i = unique(df$id)
n.id = length(id.i)
df.sub = list(n.id)
timestep = paste(int,unit)
# breakdown to datasets per bird
for (i in 1:n.id)
{
df.i = df[df$id==id.i[i],]
dat.seq <- seq(from = min(df.i$time, na.rm=T), to = max(df.i$time, na.rm=T), by = timestep)
id.sub = sapply(dat.seq, function(x) which.min(abs(difftime(df.i$time, x, units='mins')))) #find gps points minimizing distance to each ts in dat.seq
df.sub[[i]] = unique(df.i[id.sub,])
# the function unique makes sure that the rows in df.i[idx,] are unique - so no duplicate points
}
df.sub <- data.table::rbindlist(df.sub)
}
#
# ##Same as trackSubSamp, but usable in a loop
# trackSubSamp.birdyear.loop = function(id.df,int=1,unit='hours'){
# timestep = paste(int,unit)
# dat.seq <- seq(from = min(id.df$ts, na.rm=T), to = max(id.df$ts, na.rm=T), by = timestep)
# id.sub = sapply(dat.seq, function(x) which.min(abs(difftime(df.i$ts, x, units='secs')))) #find gps points minimizing distance to each ts in dat.seq
#
# df.sub[[i]] = unique(df.i[id.sub,])
# # the function unique makes sure that the rows in Dtemp[idx,] are unique - so no duplicate points
# return(df.sub)
# }
#laea.proj <- "+proj=laea +lat_0=34.9 +lon_0=-4.16 +x_0=4321000 +y_0=3210000 +towgs84=0,0,0,0,0,0,0 +units=m"
laea.proj <- "+proj=laea +lat_0=52 +lon_0=10 +x_0=4321000 +y_0=3210000 +towgs84=0,0,0,0,0,0,0 +units=m"
##defult project projection is lambert equal-area projection, which is centered on the spatialy center of the data (midpoint between most extreme values). This projection uses meters and is recommended for statistic analysis.
mkSpatial = function(df, CRS.in = "+init=epsg:4326",
CRS.out = laea.proj) {
library(sp)
library(rgdal)
df$all = rep('all',nrow(df))
df = SpatialPointsDataFrame(coords = cbind(df$lon,df$lat), data = df[,c('id','all','time', 'lon', 'lat', 'birdyear')],
proj4string = CRS(CRS.in)) # the data is in WSG84 lat-lon
df = spTransform(df, CRS(CRS.out))
}
mkgrid <- function(lat=NULL, lon = NULL, resolution=10000, buffer = 100000,
projection = laea.proj ){
#lat is a vector of latitudes
#lon is a vector of longitudes
#resolution is the cell size of the raster.
#1 = ~ 133X165 km
#.1 = ~ 13.9 x 17.1 km
#.01 ~ 1.39 x 1.72 km .... in WSG84
#buffer adds space beyond the points in the grid. a buffer of 1 should equal ~ 111 km
#projection: projection of the data.
##projection = laea, measured in m
#resolution 1 000 = 1 km2...
xmax <- max(lon)
xmin <- min(lon)
ymax <- max(lat)
ymin <- min(lat)
extent<- matrix(c(xmin - buffer, xmax + buffer, ymin - buffer, ymax + buffer), ncol=2)
extent <- SpatialPoints(coords = extent, proj4string = CRS(projection)) # the data is in WSG84 lat-lon
rast<- ascgen(extent, cellsize=resolution)
}
# ## Cleaning ----
gull <- readRDS("multitrack_gull_clean.RDS") ##running the above results in different daily.by??
gull <- gull %>% group_by(device_info_serial) %>%
mutate(birdyear = year(date_time-days(152)) - year(min(date_time)), ## a bird year starts on year day 152 (usually jun1, except leap years),
id_birdyear = paste(device_info_serial, birdyear, sep = ".")) %>%
filter(birdyear >=0 & date_time < ymd(paste0(year(max(date_time)),'-06-01'))) %>% #remove points in last year following jun 1 that don't contain migration/winter points
ungroup()
gull <- select(gull, id = device_info_serial, birdyear, id_birdyear, time = date_time, lat = latitude, lon = longitude, alt = altitude,
start_date, start_lat = start_latitude, start_lon = start_longitude, mass, sex, dur, dist, speed.pts,
col = key_name) %>% mutate(year = year(time))
original.n <- c(length(unique(gull$id_birdyear)), length(unique(gull$id)))
gull <- filter(gull, !(id == 757)) ## changed colonies!
gull <- filter(gull, !(id == 4023)) ## Doesn't migrate!
gull <- filter(gull, !(id_birdyear %in% c("325.1", "497.1","540.2"))) ## no arrival to colony (gps didn't finish downloading )
gull$col <- factor(gull$col)
gull.meta <- gull %>% select(id, start_date, start_lat, start_lon, mass, sex, col) %>% distinct()
gull <- gull %>% select(-mass, -sex, -col)
##Calculate distance to colony
gull <- gull %>% mutate(d2_col = deg.dist(lon, lat, start_lon, start_lat))
#Find first (=arrival time) and last (=departure time) visit to colony in a bird year
arrival.date <- gull %>% filter(year != year(start_date) & #no arrival in first year
d2_col <= 10) %>% ##select points within the 10k buffer of colony
group_by(id, year) %>%
summarise(col.arrival = min(time)) %>% ungroup() ##Find the first point within 10k of colony
departure.date <- gull %>% group_by(id) %>% mutate(end.year = max(year)) %>%
ungroup() %>%
filter(year != end.year &
d2_col <= 10) %>% ##select points within the 10k buffer of colony
group_by(id, year) %>%
summarise(col.depart = max(time)) %>% ungroup() ##Find the first point within 10k of colony
##join with gull to re-specify birdyear
arrival.date <- gull.meta %>% select(id, start_date) %>% full_join(arrival.date) %>%
mutate(birdyear = year(col.arrival) - year(start_date) -1) %>% ## -1 because bird year of arrival corresponds to previous years departure
select(id, col.arrival, birdyear)
departure.date <- gull.meta %>% select(id, start_date) %>% full_join(departure.date) %>%
mutate(birdyear = year(col.depart) - year(start_date)) %>% select(id, col.depart, birdyear)
mig.date <- full_join(arrival.date, departure.date) %>% group_by(id) %>%
arrange(birdyear) %>%
mutate(year.start = (col.depart - lag(col.arrival))/2 + lag(col.arrival)) %>%
left_join(gull.meta) %>%
mutate(year.start = as.POSIXct(ifelse(birdyear == 0, start_date, year.start),
origin = ymd("1970-01-01"))) %>%
select(-col, -start_date, - start_lat, -start_lon) %>%
mutate(year.end = lead(year.start)) %>%
mutate(year.end = as.POSIXct(ifelse(birdyear == max(birdyear), col.arrival, year.end),
origin = ymd("1970-01-01"))) %>% ungroup() %>%
filter(!is.na(year.start) & !is.na(year.end)) ##remove birds with missing departures/arrivals (e.g. year long gaps, no return data, etc. )
gull <- gull %>% select(-start_lon, -start_lat, -birdyear, -id_birdyear, -start_date)
gull <- gull %>% right_join(mig.date) %>%
filter(time <= year.end & time >= year.start) %>%
mutate(id_birdyear = paste(id, birdyear, sep = ".")) %>%
select(-year.start, -year.end)
nopair <- names(table((gull %>% select(id, birdyear) %>% distinct())$id))[which(table((gull %>% select(id, birdyear) %>% distinct())$id)<2)]
gull <- filter(gull, ! id %in% nopair)
nonbreed <- gull %>%
filter((time >= floor_date(col.depart, unit = "days")) &
(time <= ceiling_date(col.arrival, unit = "days"))) %>% ##use rounded start and end dates to ensure arrival points are included in data. This is important for identifying gaps at the end of the year
select(-col.depart, -col.arrival)
gull <- select(gull, -col.arrival, -col.depart)
rm(gull, mig.date, nopair)
## gull.glm, birdyear summary info----
tmp <- full_join(departure.date, arrival.date)
gull.glm <- left_join(gull.meta, tmp) %>% rename(start = start_date)
rm(gull.meta)
## KDEs, daily ----
#subsample data
nonbreed.d <- as.data.frame(trackSubSamp(nonbreed,12)) ## 2 points per day
## point2point
nonbreed.d <- nonbreed.d %>% group_by(id_birdyear) %>%
mutate(dur = pt2pt.duration(time), dist = pt2pt.distance(lat, lon), speed.pts = pt2pt.speed(dist, dur)) %>%
ungroup()
##daily id
##Find birdyears with 1 pt per day
daily.by <- nonbreed %>% group_by(id, birdyear) %>%
summarise(max.gap = max(dur, na.rm=T)/3600) %>%
filter(max.gap <= 24) ##131 birdyears with no gaps!!
id.v <- unique(daily.by$id)
#Conver data to spatial points df
snonbreed.d <- mkSpatial(nonbreed.d)
#create grid
snonbreed.pt <- as.data.frame(snonbreed.d@coords)
ud.grid <- mkgrid(lat=snonbreed.pt$coords.x2,
lon = snonbreed.pt$coords.x1,
resolution = 10000,
buffer = 450000,
projection = laea.proj)
# get map
map <- getMap("coarse")
laea.map <- spTransform(map, CRS(laea.proj))
laea.map <- fortify(laea.map)
map <- fortify(map)
##KDEs for ids with daily points
##calculate utilization distribution for each individual/year
multi.yr <- lapply(1:length(id.v), function(i) {
id.d <- filter(nonbreed.d, id == id.v[i]) ## get all points for one id
sid <- mkSpatial(id.d) ##turn into spatial df
ud <- kernelUD(sid['birdyear'], h = 100000, grid = ud.grid)}) ##calculate UD per birdyear (sid)
## get 50% UD
multi.yr.50 <- lapply(1:length(id.v), function(i){
name <- paste("ver50", id.v[i], sep = ".")
assign(name, getverticeshr(multi.yr[[i]], 50))
})
#Split 50% core area into seperate polygons
multi.yr.50p <- lapply(1:length(multi.yr.50), function(i) {
p <- disaggregate(multi.yr.50[[i]])
p@data$birdyear <- p@data$id #clarify variable name
p@data$id <- rownames(p@data) #create id variable of rownames so data can be merged with fortified polygon
p
})
## Gap: determin threshold and remove ----
##Filter points outside of any core area
#or breeding colony-
#this is so when a bird moves outside of the core area polygon
#(e.g. long central placed foraging trip),
#and returns to the same polygon, it does not count as a new visit to this core area.
nonbreed.core <- data.frame()
for (i in 1:length(daily.by$id)) {
id.n <- daily.by[i,]$id
by <- daily.by[i,]$birdyear
pts <- nonbreed.d[nonbreed.d$id_birdyear %in% paste(id.n, by, sep = "."),]
pol <- multi.yr.50p[[which(id.n == id.v)]]
s.pts <- mkSpatial(pts)
pol.y <- pol[pol@data$birdyear == by,]
overlap <- s.pts[pol.y,] ##id and year matched
overlap@data$poly <- over(overlap, pol.y)$id #which polygon it overlapped with
r.le <- rle(overlap@data$poly) #number of points in each polygon per each temporal visit
overlap@data$p.visit <- rep(seq(1:length(r.le$lengths)), r.le$lengths) #each temporally distinct visit to a polygon has a unique id (e.g. if moved from poly 1, to poly 2, then back to poly 1, the corresponding p.visits will be 1, 2, and 3 )
nonbreed.core <- rbind(nonbreed.core, overlap@data) ##overlap points from one id
}
## Select first and last point in each new polygon
core.ee <- nonbreed.core %>%
group_by(id, birdyear, poly, p.visit) %>%
mutate(ee = ifelse(time == min(time), "en", ifelse (time == max(time), "ex", NA))) %>% ## a few p.visits only have 1 point in their last p visit (considered entrance). This helps filter the right points to calculate gap length
filter(time == min(time) | time == max(time)) %>%
ungroup()
##Select birdyears with 1 pt per day
poly.dur <- core.ee %>% select(id, time, ee, poly, p.visit, birdyear) %>% spread(ee, time) %>%
mutate(dur = difftime(ex,en, units = "days")) %>% group_by(id, birdyear, poly) %>%
summarise(total_dur = as.numeric(sum(dur, na.rm = T)))
poly.dur %>% arrange(total_dur) ##shortest duration = 21 days (one of zero because migrated through fall poly in spring)
#ids with gap > 21 days
id.w.gap <- filter(nonbreed.d, dur >= 3600*24*21) %>% select(id_birdyear) %>%
distinct() %>% pull(id_birdyear)
## remove ids with gap, and ids that were then left without a pair
nonbreed.nogap.d <- filter(nonbreed.d, !(id_birdyear %in% id.w.gap))
pair.miss <- nonbreed.nogap.d %>% select(id, birdyear) %>% distinct() %>% group_by(id) %>% summarise(n=n()) %>% filter(n==1) %>% pull(id) #an additional 20 rows lost
nonbreed.nogap.d <- filter(nonbreed.nogap.d, ! (id %in% pair.miss)) ## 84 birds remain, with 237 years
id.v <- unique(nonbreed.nogap.d$id)
nonbreed.nogap <- filter(nonbreed, !(id_birdyear %in% id.w.gap))
pair.miss <- nonbreed.nogap %>% select(id, birdyear) %>% distinct() %>% group_by(id) %>% summarise(n=n()) %>% filter(n==1) %>% pull(id) #an additional 20 rows lost
nonbreed.nogap <- filter(nonbreed.nogap, ! (id %in% pair.miss)) ## 84 birds remain, with 237 years
saveRDS(nonbreed, "nonbreed.RDS")
rm(nonbreed, nonbreed.d)
## KDEs - no b.y. with gap ----
## create new KDE list
nogap_multi.yr<- lapply(1:length(id.v), function(i) {
id.h <- filter(nonbreed.nogap.d, id == id.v[i])
sid <- mkSpatial(id.h)
ud <- kernelUD(sid['birdyear'], h = 100000, grid = ud.grid)})
names(nogap_multi.yr) <- id.v
nogap_multi.yr.50 <- lapply(1:length(id.v), function(i){
name <- paste("ver50", id.v[i], sep = ".")
assign(name, getverticeshr(nogap_multi.yr[[i]], 50))
}) ##core areas
nogap_multi.yr.50p <- lapply(1:length(nogap_multi.yr.50), function(i) {
p <- disaggregate(nogap_multi.yr.50[[i]])
p@data$birdyear <- p@data$id #clarify variable name
p@data$id <- rownames(p@data) #create id variable of rownames so data can be merged with fortified polygon
p
}) #Split 50% core area into seperate polygons
##nonbreeding Overlap----
##BA overlap of 95% KDE
UD_overlap <- lapply(1:length(nogap_multi.yr), function(i){
kerneloverlaphr(nogap_multi.yr[[i]], meth = "BA", percent = 95, conditional = T)
})
UD_overlap_range <- data.frame(matrix(ncol = 2, nrow = length(UD_overlap)))
for(i in 1:length(UD_overlap)){
overlap <- UD_overlap[[i]]
for(j in 1:length(overlap[1,])){ ###remove overlap within same years
overlap[j,j] <- NA
}
range <- range(overlap, na.rm = T)
UD_overlap_range[i,] <- range
}
colnames(UD_overlap_range) <- c("min_overlap", "max_overlap")
UD_overlap_range$id <- names(nogap_multi.yr) ##order in loop
## max is 0.95 (overlap with)
UD_overlap_mn <- data.frame(matrix(ncol = 1, nrow = length(UD_overlap)))
for(i in 1:length(UD_overlap)){
overlap <- UD_overlap[[i]]
for(j in 1:length(overlap[1,])){ ###remove overlap within same years
overlap[j,j] <- NA
}
mean.overlap <- mean(overlap, na.rm = T)
UD_overlap_mn[i,] <- mean.overlap
}
colnames(UD_overlap_mn) <- c("mn_overlap")
UD_overlap_mn$id <- names(nogap_multi.yr) ##order in loop
## Gull.glm, add overlap ----
UD_overlap_range <- left_join(UD_overlap_range, UD_overlap_mn)
UD_overlap_range$id <- as.numeric(UD_overlap_range$id)
gull.glm <- left_join(gull.glm, UD_overlap_range)
## Seperate Core Areas ----
## convert polygons into a dataframe for plotting with ggplot
nogap_multi.yr.50p <- lapply(nogap_multi.yr.50p,
function(x) spTransform(x, CRS("+init=epsg:4326")))
nogap_multi.yr.50df <- lapply(1:length(nogap_multi.yr.50p), function(i) {
df <- fortify(nogap_multi.yr.50p[[i]]) ##convert to dataframe
merge(df, nogap_multi.yr.50p[[i]]@data, by = "id") #add original data
}) ## convert polygons into a dataframe for plotting with ggplot, each id is one element of list.
nogap_multi.yr.50df <- mapply(cbind, nogap_multi.yr.50df, "bird_id"=id.v, SIMPLIFY=F)
##Fragmented winter polygons
ggplot(map, aes(long, lat, group = group)) + geom_polygon(fill = "white", col = "black") +
geom_polygon(data = filter(nogap_multi.yr.50df[[which(id.v == 395)]], birdyear == 0), aes(col = birdyear, fill = id, group = id), alpha = .3) + ##birdyear = year, id = polygon number
coord_fixed(xlim = c(-21, 17.8),
ylim = c(10, 58.1))
ggplot(map, aes(long, lat, group = group)) + geom_polygon(fill = "white", col = "black") +
geom_polygon(data = filter(nogap_multi.yr.50df[[which(id.v == 5296)]], birdyear == 0), aes(col = id, fill = id, group = id), alpha = .3) + ##birdyear = year, id = polygon number
coord_fixed(xlim = c(-21, 17.8),
ylim = c(10, 58.1))
ggplot(map, aes(long, lat, group = group)) + geom_polygon(fill = "white", col = "black") +
geom_polygon(data = filter(nogap_multi.yr.50df[[which(id.v == 5296)]], birdyear == 1), aes(col = id, fill = id, group = id), alpha = .3) + ##birdyear = year, id = polygon number
coord_fixed(xlim = c(-21, 17.8),
ylim = c(10, 58.1))
ggplot(map, aes(long, lat, group = group)) + geom_polygon(fill = "white", col = "black") +
geom_polygon(data = filter(nogap_multi.yr.50df[[which(id.v == 5335)]], birdyear == 1), aes(col = id, fill = id, group = id), alpha = .3) + ##birdyear = year, id = polygon number
coord_fixed(xlim = c(-21, 17.8),
ylim = c(10, 58.1))
ggplot(map, aes(long, lat, group = group)) + geom_polygon(fill = "white", col = "black") +
geom_polygon(data = filter(nogap_multi.yr.50df[[which(id.v == 5535)]], birdyear == 1), aes(col = id, fill = id, group = id), alpha = .3) + ##birdyear = year, id = polygon number
coord_fixed(xlim = c(-21, 17.8),
ylim = c(10, 58.1))
# The following have fragmented polygons in 1 year that are joined in others. These years need to be merged:
#
# 395.0: p1 + p2
# 5296.0: p1 + p2 + p3
# 5296.1: p5 + p6
# 5335.1: p3 + p4
# 5535.1: p3 + p4
##fragmented WA poly
nogap_multi.yr.50df[[which(id.v == 395)]][nogap_multi.yr.50df[[which(id.v == 395)]]$id == 2,]$id <- 1
nogap_multi.yr.50df[[which(id.v == 5296)]][nogap_multi.yr.50df[[which(id.v == 5296)]]$id %in% c(2,3),]$id <- 1
nogap_multi.yr.50df[[which(id.v == 5296)]][nogap_multi.yr.50df[[which(id.v == 5296)]]$id == 6,]$id <- 5
nogap_multi.yr.50df[[which(id.v == 5335)]][nogap_multi.yr.50df[[which(id.v == 5335)]]$id == 4,]$id <- 3
nogap_multi.yr.50df[[which(id.v == 5535)]][nogap_multi.yr.50df[[which(id.v == 5535)]]$id == 4,]$id <- 3
###Create 1 dataframe for all polygons
poly.df <- lapply(1:length(id.v), function(i){
df <- nogap_multi.yr.50df[[i]]
df <- df %>% select( polygon = id,id = bird_id, birdyear, lat, lon = long, order)
})
poly.df <- data.table::rbindlist(poly.df)
poly.df$uniq.p <- paste(poly.df$id, poly.df$birdyear, poly.df$polygon, sep = ".")
##create a list of polygons per core area (instead of a list per year)
poly.p <- lapply(split(poly.df[, c("lat","lon")], poly.df[, "uniq.p"]), Polygon)
##Find centroid of each core area
centroid <- lapply(1:length(poly.p), function(i){
lat <- poly.p[[i]]@labpt[1]
lon <- poly.p[[i]]@labpt[2]
uniq.p <- split(poly.df[, 7], poly.df[, 7])[[i]][1]
df <- data.frame(lat, lon, uniq.p)
df
})
centroid <- data.table::rbindlist(centroid)
tmp <- poly.df %>% select(-lat, -lon, -order) %>% distinct()
centroid <- left_join(centroid, tmp)
#centroid$birdyear <- as.numeric(centroid$birdyear) - 1 #birdyear was renumber starting at 1 instead of 0, so change back to join
centroid <- gull.glm %>% select(id, start_lat, start_lon) %>% distinct() %>% right_join(centroid)
## Define Winter area ----
## distance between colony and polygon centroid
centroid <- mutate(centroid, mig.dist = deg.dist(lon, lat, start_lon, start_lat))
#Calculate enter and exit times per polygon
##select only points in the polygons
gc()
remove(poly.pts)
poly.p <- poly.p[centroid$uniq.p] ##orders poly.p
poly.pts <- data.frame()
for (i in 1:length(centroid$uniq.p)) { ##find points within each core area
ID <- centroid[i,]$id
by <- centroid[i,]$birdyear
p <- centroid[i,]$uniq.p
pts <- nonbreed.nogap[nonbreed.nogap$id == ID & nonbreed.nogap$birdyear == by,] ## select points for the right id and bird year
pts <- select(pts, id, time, lat, lon, birdyear, dur)
pol <- poly.p[[p]] ## select the core area polygon
overlap.log <- point.in.polygon(pts$lon, pts$lat, pol@coords[,2], pol@coords[,1]) ## Find points overlaping the polygon
overlap.log <- overlap.log == 1 ##1 = overlaping
overlap <- pts[overlap.log,] ##overlapping points from the furthest polygon in i individual birdyear
overlap$poly <- p
poly.pts <- rbind(poly.pts, overlap) ####list of all points in polygons.
}
poly.pts <- ungroup(poly.pts)
poly.pts <- poly.pts %>% group_by(id) %>% arrange(time, .by_group = TRUE) %>% ungroup() #order points by time
poly.pts$p.index <- rep(1:length(rle(poly.pts$poly)$lengths), rle(poly.pts$poly)$lengths)
#Find first and last point in a polygon
en.ex <- poly.pts %>% group_by(id, birdyear, poly, p.index) %>% summarise(enter = min(time), exit = max(time))
en.ex$time.in.poly <- difftime(en.ex$exit, en.ex$enter, units = "days") ## Find duration in polygon
#
# Selection of wintering area:
# duration during the winter
winter.time.poly <- en.ex %>% mutate(enter = if_else(yday(enter) > 335 | yday(enter) < 90, yday(enter),
if_else(yday(enter) > 152, 335, 90)),
exit = if_else(yday(exit) > 335 | yday(exit) < 90, yday(exit),
if_else(yday(exit) > 152, 335, 90))) %>%
mutate(enter = ifelse(enter < 152, enter+365, enter), exit = ifelse(exit < 152, exit +365, exit), wtime = exit-enter) %>% group_by(id, birdyear)%>% filter(wtime == max(wtime))
winter.poly.v <- winter.time.poly$poly
##89 days is the maximum possible duration duration (number of day from Dec 1 - Mar 1)
## furthest polygon.
far_poly <- centroid %>% #in km
group_by(id, birdyear) %>% filter(mig.dist == max(mig.dist)) %>%
ungroup() %>% select(id, birdyear, polygon, mig.dist, uniq.p)
far.poly.v <- far_poly$uniq.p
#was the primary used between December - March in all but 2 cases (from same individual).
#table(far.poly.v %in% winter.poly.v)
## Based on maps, the resuls of time during winter make more sense - individual spent autumn on coast of portugal (furthest poly), moving to central spain for the winter months.
## Select centroid from polygons in winter.poly.v, to use as migration distance
## Migration distance ----
winter_poly <- centroid %>% group_by(id, birdyear) %>% filter(uniq.p %in% winter.poly.v) %>%
ungroup() %>% select(id, birdyear, polygon, mig.dist, uniq.p)
##Winter arrival & Departure ----
## Select polygon of wintering area
winter.poly.p<- poly.p[winter_poly$uniq.p] ##In most cases, two polygons in a similar region, so no change in migration distance (but see 4032.1).
##select only points in wintering area polygon in each year
remove(winter.poly.pts)
winter.poly.pts <- data.frame()
nonbreed.nogap.d <- ungroup(nonbreed.nogap.d)
for (i in 1:length(winter_poly$id)) {
ID <- winter_poly[i,]$id
by <- winter_poly[i,]$birdyear
pts <- nonbreed.nogap.d[nonbreed.nogap.d$id == ID & nonbreed.nogap.d$birdyear == by,]
pol <- winter.poly.p[[i]]
overlap.log <- point.in.polygon(pts$lon, pts$lat, pol@coords[,2], pol@coords[,1])
overlap.log <- overlap.log == 1
overlap <- pts[overlap.log,] ##overlapping points from the furthest polygon in i individual birdyear
winter.poly.pts <- rbind(winter.poly.pts, overlap)
}
## Select first and last point in each winter area (times at which a polygon was either entered or exited)
winter.poly.ee <- winter.poly.pts %>%
group_by(id, birdyear) %>%
mutate(ee = ifelse(time == min(time), "en", ifelse (time == max(time), "ex", NA))) %>%
filter(!is.na(ee)) %>%
ungroup()
##winter departure = last point in winter area
ex <- filter(winter.poly.ee, ee == "ex") %>% select(id, birdyear, winter.depart = time)
##Winter arrival = first point in winter area
en <- filter(winter.poly.ee, ee == "en") %>% select(id, birdyear, winter.arrive = time)
winter_poly$birdyear <- as.numeric(as.character(winter_poly$birdyear))
winter_poly<- left_join(winter_poly, ex) %>% left_join(en)
tmp <- winter_poly %>% select(id, birdyear, mig.dist, winter.arrive, winter.depart)
## Gull.glm, add mig.dist & winter time ----
gull.glm <- left_join(gull.glm, tmp)
###more obs in gull.glm than in winter_poly because data with gaps can still be used in analysis of arrival and departure from colony (if mig.dist not included as a fixed factor) ##
## remove arrivals and departures that occur during gap ----
gap <- nonbreed.nogap %>% group_by(id, birdyear) %>% mutate(start.gap = lag(time)) %>% ungroup() %>%
filter(dur > 60*60*24*2) %>% select(id, birdyear, start.gap, end.gap = time)
gap <- left_join(gap, select(gull.glm, id, birdyear, winter.arrive, winter.depart, col.depart, col.arrival))
filter(gap, winter.arrive >= start.gap & winter.arrive <= end.gap)
filter(gap, winter.depart >= start.gap & winter.depart <= end.gap)
filter(gap, col.depart >= start.gap & col.depart <= end.gap)
filter(gap, col.arrival >= start.gap & col.arrival <= end.gap)
##5215 by 0 col.depart, 5554 by 0 winter.arrive, 459 by 1 winter arrive
#removal done in .rmd
## Trajectory df ----
#For trajectories: we want all 'travel points' between colony and wintering area (autumn and spring).
#Core areas enroute will be replaced with their polygon centroid so that the trajectory is smoothed through these areas.
id.v <- unique(nonbreed.nogap.d$id)
###want to use original polygons (not combined WA), because joins are otherwise wonky
nogap_multi.yr.50df.noagg <- lapply(1:length(nogap_multi.yr.50p), function(i) {
df <- fortify(nogap_multi.yr.50p[[i]]) ##convert to dataframe
merge(df, nogap_multi.yr.50p[[i]]@data, by = "id") #add original data
}) ## convert polygons into a dataframe for plotting with ggplot, each id is one element of list.
nogap_multi.yr.50df.noagg <- mapply(cbind, nogap_multi.yr.50df.noagg, "bird_id"=id.v, SIMPLIFY=F)
###Create 1 dataframe for all polygons
poly.df.noagg <- lapply(1:length(id.v), function(i){
df <- nogap_multi.yr.50df.noagg[[i]]
df <- df %>% select( polygon = id,id = bird_id, birdyear, lat, lon = long, order)
})
poly.df.noagg <- data.table::rbindlist(poly.df.noagg)
poly.df.noagg$uniq.p <- paste(poly.df.noagg$id, poly.df.noagg$birdyear, poly.df.noagg$polygon, sep = ".")
poly.p.noagg <- lapply(split(poly.df.noagg[, c("lat","lon")], poly.df.noagg[, "uniq.p"]), Polygon)
centroid.noagg <- lapply(1:length(poly.p.noagg), function(i){
lat <- poly.p.noagg[[i]]@labpt[1]
lon <- poly.p.noagg[[i]]@labpt[2]
uniq.p <- split(poly.df.noagg[, 7], poly.df.noagg[, 7])[[i]][1]
df <- data.frame(lat, lon, uniq.p)
df
})
centroid.noagg <- data.table::rbindlist(centroid.noagg)
tmp <- poly.df.noagg %>% select(-lat, -lon, -order) %>% distinct()
centroid.noagg <- left_join(centroid.noagg, tmp)
centroid.noagg$birdyear <- as.numeric(centroid.noagg$birdyear) #- 1 #birdyear was re-numbered starting at 1 instead of 0, so change back to join
##select only points in the polygons
nonbreed.nogap <- ungroup(nonbreed.nogap) ### use non subsampled for duration calculation.
gc()
remove(poly.pts.noagg)
poly.p.noagg <- poly.p.noagg[centroid.noagg$uniq.p] ##orders poly.p.noagg
poly.pts.noagg<- data.frame()
for (i in 1:length(centroid.noagg$uniq.p)) {
ID <- centroid.noagg[i,]$id
by <- centroid.noagg[i,]$birdyear
p <- centroid.noagg[i,]$uniq.p
pts <- nonbreed.nogap[nonbreed.nogap$id == ID & nonbreed.nogap$birdyear == by,]
pts <- select(pts, id, time, lat, lon, birdyear, dur)
pol <- poly.p.noagg[[i]]
overlap.log <- point.in.polygon(pts$lon, pts$lat, pol@coords[,2], pol@coords[,1])
overlap.log <- overlap.log == 1
overlap <- pts[overlap.log,] ##overlapping points from the furthest polygon in i individual birdyear
overlap$poly <- p
poly.pts.noagg <- rbind(poly.pts.noagg, overlap) ####list of all points in polygons.
}
poly.pts.noagg <- ungroup(poly.pts.noagg)
poly.pts.noagg <- poly.pts.noagg %>% group_by(id) %>% arrange(time, .by_group = TRUE) %>% ungroup()
poly.pts.noagg$p.index <- rep(1:length(rle(poly.pts.noagg$poly)$lengths), rle(poly.pts.noagg$poly)$lengths)
#find first and last point in core area
en.ex.noagg <- poly.pts.noagg %>% group_by(id, birdyear, poly, p.index) %>% summarise(enter = min(time), exit = max(time))
en.ex.noagg$time.in.poly <- difftime(en.ex.noagg$exit, en.ex.noagg$enter, units = "days")
##Replace points during time bird was in core area, replace with centroid.
#this stops distance from accumulating during central point foraging, as well as cleans up the actual migratory track.
gull.glm$id_birdyear <- paste(gull.glm$id, gull.glm$birdyear, sep = ".")
smooth_pts <- filter(nonbreed.nogap, id_birdyear %in% gull.glm$id_birdyear) %>% ##hourly
full_join(en.ex.noagg) %>%
filter(time > enter & time < exit) %>%
select(id, birdyear, time, poly)
tmp <- centroid.noagg %>% select(c.lat = lat, c.lon = lon, uniq.p)
nonbreed.UDcentroid <- left_join(nonbreed.nogap, smooth_pts) %>% ##DF of points, core areas replaced by centroid
left_join(tmp, by = c("poly" = "uniq.p")) %>%
mutate(lat = if_else(is.na(poly), lat, c.lat),
lon = if_else(is.na(poly), lon, c.lon))
##Find point between colony exit and winter entrance (autumn), and winter exit and colony entrance (spring)
all_traj <- nonbreed.UDcentroid %>% left_join(gull.glm) %>%
mutate(direction = if_else(time >= col.depart & time <= winter.arrive, "Autumn",
if_else(time >= winter.depart & time <= col.arrival, "Spring", "NA"))) %>%
filter(!direction=="NA") %>% arrange(time) %>%
select(id, lon, lat, birdyear, id_birdyear, direction, time, poly) %>%
distinct()
##Only use trajectories with 1 point per day during travel periods (outside of core areas)
all_traj <- all_traj %>% group_by(id_birdyear, direction) %>%
mutate(dur = as.numeric(as.character(difftime(time, lag(time), units = "secs")))) %>% ungroup()
id.w.gap <- all_traj %>% filter(is.na(poly) & dur >= 3600*24) %>% select(id_birdyear, direction) %>% #points outside polygons have no polygon name (NA)
distinct() %>% mutate(gap = T) #if there is a dur > 24 h, lable this traj with gap = T
all_traj <- all_traj %>% left_join(id.w.gap) %>% filter(is.na(gap)) %>% select(-gap) #remove points from traj where gap = T
pair.miss <- all_traj %>% select(id, birdyear,direction) %>% distinct() %>% ## Find traj left with no pair
group_by(id, direction) %>% summarise(n=n()) %>% filter(n==1) %>%
select(id, direction) %>% mutate(gap = T) #an additional 18 rows lost
all_traj <- all_traj %>% left_join(pair.miss) %>% filter(is.na(gap)) %>% select(-gap) # Remove traj with no pair
saveRDS(all_traj, "all_traj.RDS")
## N core areas & core area overlap ----
##core areas with no overlap
### find winter areas with no overlap
##Fragmented winter area p
nogap_multi.yr.50p[[which(id.v == 395)]]@data[2,]$id <- "1"
nogap_multi.yr.50p[[which(id.v == 5296)]]@data[c(2,3),]$id <- "1"
nogap_multi.yr.50p[[which(id.v == 5296)]]@data[6,]$id <- "5"
nogap_multi.yr.50p[[which(id.v == 5335)]]@data[4,]$id <- "3"
nogap_multi.yr.50p[[which(id.v == 5535)]]@data[4,]$id <- "3"
no.wa.overlap <- vector()
for(i in 1:length(nogap_multi.yr.50p)){
id.i <- id.v[i] #vector of bird ids
sp <- nogap_multi.yr.50p[[i]] ## spatial polygon list of all core areas for a bird id
p <- nogap_multi.yr.50p[[i]]@data ### all core areas across years
p$poly <- paste(rep(id.i, times = length(p$id)) , p$birdyear, p$id, sep = ".")
wa.id <- filter(winter.time.poly, id == id.i) %>% pull(poly) ## list of wa polys for that id
for(j in 1:length(wa.id)){ ##for each wa polygon
p1 <- sp[which(p$poly == wa.id[j]),]
other.wa <- wa.id[which(wa.id != wa.id[j])]
no.overlap.v <- logical(length(other.wa))
for(k in 1:length(other.wa)){ ## see if it overlaps with other wa polygons
p2 <-sp[which(p$poly == other.wa[k]),]
int <- gIntersection(p1,p2)
no.overlap.v[k] <- is.null(int)
## if there is 1 T, then wa in 1 yer doesn't overlap with wa in another year
}
if(!all(!no.overlap.v)){ ## if there is 1 or more True values (i.e. a wa that doesn't overlap with a wa in another year)
no.wa.overlap <- rbind(no.wa.overlap, id.i)
}
}
}
no.wa.overlap <- unique(no.wa.overlap)
###534 spent year 3 in uk only
ggplot(map, aes(long, lat, group = group)) + geom_polygon(fill = "white", col = "black") +
geom_polygon(data = filter(nogap_multi.yr.50df[[which(id.v == 606)]], birdyear == 1), aes(col = id, fill = id, group = id), alpha = .3) + ##birdyear = year, id = polygon number
coord_fixed(xlim = c(-21, 17.8),
ylim = c(10, 58.1))
no.so.overlap <- data.frame()
for (i in 1:length(nogap_multi.yr.50p)){
id.i <- id.v[i] #vector of bird ids
sp <- nogap_multi.yr.50p[[i]] ## spatial polygon list of all core areas for a bird id
p <- nogap_multi.yr.50p[[i]]@data ### all core areas across years
p$poly <- paste(rep(id.i, times = length(p$id)) , p$birdyear, p$id, sep = ".")
wa.id <- filter(winter.time.poly, id == id.i) %>% pull(poly) ## list of wa polys for that id
so.poly <- p[which(!(p$poly %in% wa.id)),]
if(length(so.poly$id)==0) next ## if no stopovers, skip to next id
for(j in 1:length(so.poly$id)){
p.id <- so.poly[j,1]
by <- so.poly[j,3]
## list all polygons in different years
id.pair <- as.numeric(filter(p, birdyear != by) %>% pull(id))
no.overlap.v <- logical(length(id.pair))
p1<-sp[sp$id == p.id,]
for(k in 1:length(id.pair)){
p2<-sp[sp$id == id.pair[k],]
int <- gIntersection(p1,p2)
no.overlap.v[k] <- is.null(int)
}
if(all(no.overlap.v)){
x<-data.frame(id.i, by, p.id)
no.so.overlap <- rbind(no.so.overlap, x) ##core area with no overlap in any year.
}
}
}
no.overlap <- select(no.so.overlap, id=id.i, birdyear=by) %>% distinct() %>% mutate(p.no.overlap = T)
## how many polygons per birdyear?
remove(n.winter.area)
n.winter.area <- data.frame()
for (i in 1:length(nogap_multi.yr.50p)){
year <- unique(nogap_multi.yr.50p[[i]]@data$birdyear) #vector of year ids
id <- id.v[i] #vector of bird ids
p <- nogap_multi.yr.50p[[i]]@data
p <- select(p, id, birdyear) %>% distinct()
n <- as.numeric(rle(as.numeric(p$birdyear))$lengths)
df <- data.frame(id, year, n)
n.winter.area <- rbind(n.winter.area, df)
}
#
n.winter.area$year <- as.numeric(as.character(n.winter.area$year))
names(n.winter.area) <- c("id","year","n.winter.p")
## Gull.glm, add n core areas ----
gull.glm <- left_join(gull.glm, n.winter.area, by = c("id", "birdyear" = "year"))
no.overlap <- no.overlap %>% mutate(birdyear = as.numeric(as.character(birdyear)))
gull.glm <- left_join(gull.glm, no.overlap)
gull.glm$change.wa <- ifelse(gull.glm$id %in% no.wa.overlap[,1], T, F)
## tracking summary ----
#migration duration
mig.dur <- nonbreed.nogap.d %>% group_by(id, birdyear) %>%
summarise(min = min(time), max = max(time), mig.dur = as.numeric(difftime(max, min, units = "days")))
#days with fix by birdyear
days.w.fix <- nonbreed.nogap.d %>% mutate(yday = yday(time)) %>% select(id, birdyear, yday) %>% distinct() %>%
group_by(id, birdyear) %>% summarise(days.with.fix = n())
days.w.fix <- days.w.fix %>% left_join(mig.dur) %>% mutate(p.days.w.fix = days.with.fix/mig.dur) %>% ungroup()
days.w.fix %>% summarise(mean = mean(p.days.w.fix), min = min(p.days.w.fix),
max = max(p.days.w.fix), median = median(p.days.w.fix))
days.w.fix <- nonbreed.nogap.d %>% group_by(id, birdyear) %>%
summarise(max.gap = max(as.numeric(difftime(time, lag(time), units = "days")), na.rm = T)) %>% left_join(days.w.fix) %>%
select(id, birdyear, mig.dur, p.days.w.fix, max.gap) %>% ungroup()
days.w.fix %>% summarise(mean = mean(max.gap), min = min(max.gap),
max = max(max.gap), median = median(max.gap))
## WA site fidelity ----
library(raster)
nonbreed <- readRDS("nonbreed.RDS")
nonbreed <- ungroup(nonbreed)
#
wa.pts <- data.frame()
for (i in 1:length(gull.glm$id)) {
s <- gull.glm[i,]$winter.arrive
e <- gull.glm[i,]$winter.depart
id.i <- gull.glm[i,]$id
by.i <- gull.glm[i,]$birdyear
wa <- filter(nonbreed,id == id.i& birdyear == by.i& time >= s & time <= e)
wa.pts <- rbind(wa.pts, wa)
}
rm(nonbreed)
med.step <- quantile(wa.pts$dist, c(.5)) #50% = 43m, 90% = 1984m, 95% = 4602m
## wa points per id and by
id.v <- unique(wa.pts$id_birdyear)
wa.laea.l <- lapply(id.v, function(x){
mkSpatial(wa.pts[wa.pts$id_birdyear == x,])
})
## all winter points of 1 individual
idu.v <- unique(wa.pts$id)
wau.laea.l <- lapply(idu.v, function(idu.v) mkSpatial(wa.pts[wa.pts$id == idu.v,]))
#make ltraj
wau.traj.l <- lapply(wau.laea.l, function(wa.laea){
gc()
adehabitatLT::as.ltraj(as.data.frame(wa.laea@coords), date = wa.laea@data$time,
id = wa.laea@data$birdyear,
slsp = "missing", proj4string = CRS(proj4string(wa.laea)))})
saveRDS(wau.laea.l, "wau.laea.l.RDS")
saveRDS(wau.traj.l, "wau.traj.l.RDS")
save.image("ln831.RData")
rm(list = ls())
wau.laea.l <- readRDS("wau.laea.l.RDS")
wau.traj.l <- readRDS("wau.traj.l.RDS")
laea.proj <- "+proj=laea +lat_0=52 +lon_0=10 +x_0=4321000 +y_0=3210000 +towgs84=0,0,0,0,0,0,0 +units=m"
mkgrid <- function(lat=NULL, lon = NULL, resolution=10000, buffer = 100000,
projection = laea.proj ){
#lat is a vector of latitudes
#lon is a vector of longitudes
#resolution is the cell size of the raster.
#1 = ~ 133X165 km
#.1 = ~ 13.9 x 17.1 km
#.01 ~ 1.39 x 1.72 km .... in WSG84
#buffer adds space beyond the points in the grid. a buffer of 1 should equal ~ 111 km
#projection: projection of the data.
##projection = laea, measured in m
#resolution 1 000 = 1 km2...
xmax <- max(lon)
xmin <- min(lon)
ymax <- max(lat)
ymin <- min(lat)
extent<- matrix(c(xmin - buffer, xmax + buffer, ymin - buffer, ymax + buffer), ncol=2)
extent <- SpatialPoints(coords = extent, proj4string = CRS(projection)) # the data is in WSG84 lat-lon
rast<- ascgen(extent, cellsize=resolution)
}
##make grid
brbu.grid <- lapply(wau.laea.l[1:41], function(wa.grid) {
gc()
mkgrid(lat = wa.grid@coords[,2], lon = wa.grid@coords[,1],
resolution = 500, buffer = 2500)
})
## diffusion coef
Du.l <- lapply(wau.traj.l[1:41], function(traj) BRB.D(traj, Tmax = 3*3600, Lmin= 20)) ## one bird has a point every 3 hours
wa_overlap.1 <- vector(mode = "list", length = 41)
for(i in 1:18){
brb.i <- BRB(wau.traj.l[[i]],Du.l[[i]], filtershort = F, Tmax = 3*3600, Lmin= 20, hmin =150, type = "UD", grid = brbu.grid[[i]])
gc()
wa_overlap.1[[i]] <- kerneloverlaphr(brb.i, meth = "BA", percent = 95, conditional = T)
}
for(i in 20:41){
brb.i <- BRB(wau.traj.l[[i]],Du.l[[i]], filtershort = F, Tmax = 3*3600, Lmin= 20, hmin =150, type = "UD", grid = brbu.grid[[i]])
gc()
wa_overlap.1[[i]] <- kerneloverlaphr(brb.i, meth = "BA", percent = 95, conditional = T)
}
saveRDS(wa_overlap.1, "wa_overlap.1.RDS")
##individual brbs
##make grid
brbu.grid <- lapply(wau.laea.l[42:82], function(wa.grid) {
gc()
mkgrid(lat = wa.grid@coords[,2], lon = wa.grid@coords[,1],
resolution = 500, buffer = 2500)
})
## diffusion coef
Du.l <- lapply(wau.traj.l[42:82], function(traj) BRB.D(traj, Tmax = 3*3600, Lmin= 20)) ## one bird has a point every 3 hours
wa_overlap.2 <- vector(mode = "list", length = 41)
for(i in 1:41){
brb.i <- BRB(wau.traj.l[[41+i]],Du.l[[i]], filtershort = F, Tmax = 3*3600, Lmin= 20, hmin =150, type = "UD", grid = brbu.grid[[i]])
gc()
wa_overlap.2[[i]] <- kerneloverlaphr(brb.i, meth = "BA", percent = 95, conditional = T)
}
##save environment (ln861_2506.RData)
saveRDS(wa_overlap.2, "wa_overlap.2.RDS")
rm(list = ls())
wau.laea.l <- readRDS("wau.laea.l.RDS")
wau.traj.l <- readRDS("wau.traj.l.RDS")
laea.proj <- "+proj=laea +lat_0=52 +lon_0=10 +x_0=4321000 +y_0=3210000 +towgs84=0,0,0,0,0,0,0 +units=m"
mkgrid <- function(lat=NULL, lon = NULL, resolution=10000, buffer = 100000,
projection = laea.proj ){
#lat is a vector of latitudes
#lon is a vector of longitudes
#resolution is the cell size of the raster.
#1 = ~ 133X165 km
#.1 = ~ 13.9 x 17.1 km
#.01 ~ 1.39 x 1.72 km .... in WSG84
#buffer adds space beyond the points in the grid. a buffer of 1 should equal ~ 111 km
#projection: projection of the data.
##projection = laea, measured in m
#resolution 1 000 = 1 km2...
xmax <- max(lon)
xmin <- min(lon)
ymax <- max(lat)
ymin <- min(lat)
extent<- matrix(c(xmin - buffer, xmax + buffer, ymin - buffer, ymax + buffer), ncol=2)
extent <- SpatialPoints(coords = extent, proj4string = CRS(projection)) # the data is in WSG84 lat-lon
rast<- ascgen(extent, cellsize=resolution)
}
##skipped 19 b/c takes too much memory!!!
brbu.grid <- mkgrid(lat = wau.laea.l[[19]]@coords[,2], lon = wau.laea.l[[19]]@coords[,1],
resolution = 500, buffer = 500)
Du.l <- BRB.D(wau.traj.l[[19]], Tmax = 3*3600, Lmin= 20) ## one bird has a point every 3 hours
gc()
x <- BRB(wau.traj.l[[19]],Du.l, filtershort = F, Tmax = 3*3600, Lmin= 20, hmin =150, type = "UD", grid = brbu.grid)
gc()
##null slot in overlap.1 b/c missing 19
##If memory limit, open kerneloverlaphr function and run through loop manually.
#wa_overlap.19 <- kerneloverlaphr(x, meth = "BA", percent = 95, conditional = T)
vol <- getvolumeUD(x)
x <- lapply(x, function(y) {
coo <- coordinates(y)
y[order(coo[, 1], coo[, 2]), ]
})
vol <- lapply(vol, function(y) {
coo <- coordinates(y)
y[order(coo[, 1], coo[, 2]), ]
})
gp <- gridparameters(vol[[1]])
res <- matrix(0, ncol = length(x), nrow = length(x))
for (i in 1:length(x)) {
for (j in 1:i) {
vi <- x[[i]][[1]]
vj <- x[[j]][[1]]
ai <- vol[[i]][[1]]
aj <- vol[[j]][[1]]
ai[ai <= percent] <- 1
ai[ai > percent] <- 0
aj[aj <= percent] <- 1
aj[aj > percent] <- 0
vi <- vi * ai
vj <- vj * aj
res[j, i] <- res[i, j] <- sum(sqrt(vi) * sqrt(vj)) *
(gp[1, 2]^2)
}
}
rownames(res) <- names(x)
colnames(res) <- names(x)
wa_overlap.19 <- res
wa_overlap.1 <- readRDS("wa_overlap.1.RDS")
wa_overlap.1[[19]] <- wa_overlap.19
wa_overlap.2 <- readRDS("wa_overlap.2.RDS")
wa_overlap <- c(wa_overlap.1,wa_overlap.2)
saveRDS(wa_overlap, "wa_overlap_3035.RDS")
load("ln831.RData")
wa_overlap <- readRDS("wa_overlap_3035.RDS")
sf_range <- data.frame(matrix(ncol = 4, nrow = length(idu.v)))
for(i in 1:length(wa_overlap)){
wa_overlap.i <- wa_overlap[[i]]
##get range
for(j in 1:length(wa_overlap.i[1,])){ ###remove overlap within same years
wa_overlap.i[j,j] <- NA
}
range <- range(wa_overlap.i, na.rm = T)
sf_range[i,c(1,2)] <- range
sf_range[i,3] <- mean(wa_overlap.i, na.rm = T)
sf_range[i,4] <- idu.v[i]
}
colnames(sf_range) <- c("sf_min_overlap", "sf_max_overlap", "sf_mn_overlap", "id")
saveRDS(sf_range, "wa_overlap_range_3035.RDS")
gull.glm <- left_join(gull.glm, sf_range)
## Gull.glm export, add days with fix ----
gull.glm <- left_join(gull.glm, days.w.fix) %>% arrange(mig.dist)
saveRDS(gull.glm, "gull.glm.RDS")
save.image("ln964.RData")
## Overlap of random pairs ----
##### polygon overlap for randomization tests
#####Find pairings
start <- nonbreed.nogap.d %>% left_join(gull.glm) %>%
select(id, id_birdyear, birdyear, slat = start_lat, slon = start_lon) %>% distinct()
end <- centroid %>% group_by(id, birdyear) %>% filter(uniq.p %in% winter.poly.v) %>%
ungroup() %>% select(id, birdyear, elat = lat, elon =lon) %>%
mutate(id_birdyear = paste(id, birdyear, sep = "."), birdyear = as.numeric(as.character(birdyear)))
##find distance between each start point, if id != id. grouped within direction.
focal <- full_join(start, end)
pair <- focal %>% rename(p.id = id, p.slon = slon, p.slat = slat, p.birdyear = birdyear,
p.id_birdyear = id_birdyear, p.elon = elon, p.elat = elat)
rand.pair.ol <- expand.grid(1:length(focal$id), 1:length(pair$p.id))
r.pair.ol <- data.frame()
for (i in seq_along(rand.pair.ol$Var1)){
x <- cbind(focal[rand.pair.ol[i,1],], pair[rand.pair.ol[i,2],])
if(x$id == x$p.id) {
next
}
x$s.dist <- deg.dist(x$slon, x$slat, x$p.slon, x$p.slat)
x$e.dist <- deg.dist(x$elon, x$elat, x$p.elon, x$p.elat)
if(x$s.dist > 250 | x$e.dist > 250){ ##start and end need to be within 250k
next
}
x <- select(x, id, birdyear, id_birdyear, p.id, p.birdyear, p.id_birdyear)
r.pair.ol <- rbind(r.pair.ol, x)
}
## Between individual UD pairs
r.pair.ol <- r.pair.ol[!duplicated(t(apply(r.pair.ol, 1, sort))), ]
r.pair.ol.norep <- r.pair.ol %>% select(-id_birdyear, -p.id_birdyear) %>%
group_by(id, p.id) %>% ##for each unique id pair & direction
sample_n(1) %>% ##randomly select 1 birdyear route per id
ungroup() %>% mutate(id_birdyear = paste(id, birdyear, sep = "."),
p.id_birdyear = paste(p.id, p.birdyear, sep = "."))
###calculate between id overlaps
#readRDS("nogap_multi.yr.RDS")
by.poly <- unlist(nogap_multi.yr)
by.poly.v <- names(by.poly)
rand.UD_overlap <- lapply(1:length(r.pair.ol.norep$id), function(i){
f.poly <- by.poly[[which(by.poly.v == r.pair.ol.norep[i,]$id_birdyear)]]
p.poly <- by.poly[[which(by.poly.v == r.pair.ol.norep[i,]$p.id_birdyear)]]
ol <- list(f.poly, p.poly)
names(ol) <- c(r.pair.ol.norep[i,]$id_birdyear,r.pair.ol.norep[i,]$p.id_birdyear)
class(ol) <- "estUDm"
kerneloverlaphr(ol, meth = "BA", percent = 95, conditional = T)
})
saveRDS(rand.UD_overlap,"rand_overlap.RDS")
saveRDS(r.pair.ol.norep, "r.pair.ol.RDS")
save.image("ln1021.RData")
## Migration route variation ----
### trajectory averaging
##based on freeman et al.
##df of single direction trajectories, multiples identified by birdyear
## df includes id, birdyear, lon, lat, direction, date_time
## points within wintering area polygons (i.e. stopovers) were replaced with the polygon centroid to aid in
#spacing points equally along the trajectory
all_traj <- ungroup(all_traj) %>% arrange(id, time)
n_check <- all_traj %>% select(-time, -dur) %>% distinct() %>% arrange(id_birdyear)
##traj needs to be greater than 1 point
table(table((n_check %>% filter(direction == "Spring") %>% select(id_birdyear, direction, lon, lat) %>% distinct())$id_birdyear)<=1)
tmp <- n_check %>% filter(direction == "Spring") %>% select(id_birdyear, direction, lon, lat) %>% distinct()
nopath <- rle(tmp$id_birdyear)$values[which(rle(tmp$id_birdyear)$lengths <=1)]
all_traj <- filter(all_traj, !(id_birdyear %in% nopath & direction == "Spring"))
table(table((n_check %>% filter(direction == "Autumn") %>% select(id_birdyear, direction, lon, lat) %>% distinct())$id_birdyear)<=1)
tmp <- n_check %>% filter(direction == "Autumn") %>% select(id_birdyear, direction, lon, lat) %>% distinct()
nopath <- rle(tmp$id_birdyear)$values[which(rle(tmp$id_birdyear)$lengths <=1)]
all_traj <- filter(all_traj, !(id_birdyear %in% nopath & direction == "Autumn"))
## the three removed all still had multiple traj
pair.miss <- all_traj %>% select(id, id_birdyear, direction) %>% distinct()
pair.miss <- pair.miss %>% group_by(id, direction) %>% summarise(n.years = n()) %>% filter(n.years <=1)
all_traj <- all_traj %>% left_join(pair.miss) %>% filter(is.na(n.years)) %>% select(-n.years)
id.dir <- all_traj %>% select(id, direction) %>% distinct() ##all traj
id.dir <- filter(id.dir, ! id %in% c(1402, 606, 5027, 5593, 534)) ## 534 - wa poly doesn't overlap, but explored that area
mn_traj <- data.frame(matrix(ncol = 6, nrow = 0))
names(mn_traj) <- c("mn.lon", "mn.lat", "within.var", "id", "direction", "n.years")
for(a in 1:length(id.dir$id)){
# for(a in a:length(id.dir$id)){
traj <- filter(all_traj, id == id.dir[a,]$id & direction == id.dir[a,]$direction)
###
###This creates a list of equally spaced points (currently n=11, specified in t1_eq), with each trajectory having it's own list element.
ntraj <- unique(traj$birdyear) #trajectory id
equi_n <- 500 ### number of points along the trajectory
traj_l <- list()
name <- c()
for(i in 1:length(ntraj)){
t1 <- filter(traj, birdyear == ntraj[i]) %>%
select(lon, lat) %>% distinct()
if (length(t1$lon)<=10){ next }
t1_l <- SpatialLines(list(Lines(list(Line(cbind(t1$lon, t1$lat))), "id")))
t1_eq <- spsample(t1_l, equi_n, type = "regular") ## 250 point equally placed along length of line
traj_l[[i]] <- t1_eq@coords
name <- c(name, ntraj[[i]])
}
if(length(name) <= 1 | is.null(name)) {next}
if(length(name) != length(traj_l)) { traj_l <- traj_l[-which(sapply(traj_l, is.null))]}
names(traj_l) <- name
###
###Create starting 'thread' for mean trajectory between start and end midpoints
start <- cbind(x = mean(sapply(traj_l, '[[',1,1)), y = mean(sapply(traj_l, '[[',1,2))) ##mean of first elements in each list
end <- cbind(x = mean(sapply(traj_l, '[[',equi_n,1)), y = mean(sapply(traj_l, '[[',equi_n,2)))
###
###Create starting 'thread' from mean trajectory points
tmean <- data.frame(matrix(ncol = 2, nrow = 0))
colnames(tmean) <- c("x","y")
for(i in 1:equi_n){
tmean[i,1] = mean(sapply(traj_l, '[[',i,1))
tmean[i,2] = mean(sapply(traj_l, '[[',i,2))
}
###
##Averaging
iter_n <- 0
repeat{
iter_n <- iter_n + 1
for(i in seq_along(tmean$x)){
pmean <- as.numeric(tmean[i,]) ## select each point
nn <- data.frame()
for(j in seq_along(traj_l)){
x <- traj_l[[j]]
x <- matrix(x[x[,2] < pmean[2]+1 & x[,2] > pmean[2]-1,], ncol = 2)
if (length(x) == 0) { x <- traj_l[[j]]}
dist <- NA
for(k in seq_along(x[,1])){
point2 <- as.numeric(x[k,])
dist[k] <- deg.dist(pmean[1], pmean[2], point2[1], point2[2])
}
nearest_neighbour1 <- x[dist == min(dist),]
nn <- rbind(nn,nearest_neighbour1)
}
tmean[i,1] <- mean(nn[,1])
tmean[i,2] <- mean(nn[,2])
}
tmean <- unique(tmean)
tmean <- rbind(start, tmean, end)
dist <- pt2pt.distance(tmean$y, tmean$x) ## dist in m
dist_log <- dist > 25000 #25 km
dist_log[1] <- FALSE
if(iter_n == 100){ ###this will stop repeating after 100 iterations. option 2 is to stop repeating if distinces between tmean are all less than e.g. 25
break
}
tmean1 <- data.frame()
for(i in 1:length(dist_log)){
if(dist_log[i]== T){
newpt <- new_point(tmean[i-1,], tmean[i,], di = norm_vec(tmean[i-1,]- tmean[i,])/2) #new point at midpoint
tmean1 <- rbind(tmean1, newpt) ## order matters
}
tmean1 <- rbind(tmean1, tmean[i,])
}
tmean <- tmean1
}
tmean <- SpatialLines(list(Lines(list(Line(cbind(tmean$x, tmean$y))), "id")))
tmean <- spsample(tmean, equi_n, type = "regular") ## 500 point equally placed along length of line
tmean <- tmean@coords
traj_l <- lapply(traj_l, FUN = data.frame)
for(i in 1:length(traj_l)){
names(traj_l[[i]]) <- c("lon", "lat")
}
nn_dist <- NA
for(l in seq_along(traj_l)){
x <- traj_l[[l]]
for(i in seq_along(tmean[,1])){
point <- tmean[i,]
dist <- NA
for(j in seq_along(x[,1])){
point2 <- x[j,]
dist[j] <- deg.dist(point[1], point[2], point2[1,1], point2[1,2])
}
nn_dist[i] <- min(dist)
}
tmean <- cbind(tmean, nn_dist)
}
tmean <- data.frame(tmean)
tmean$within.var <- rowSums(tmean[,3:ncol(tmean)])/(ncol(tmean)-2-1) ##var = sum of (x-x.mn)/n-1. here n = num traj = (ncol - 2(=tmean coords))
tmean <- tmean[,c(1:2,ncol(tmean))]
names(tmean) <- c("mn.lon", "mn.lat", "within.var")
tmean$id <- id.dir[[a, "id"]]
tmean$direction <- id.dir[[a, "direction"]]
tmean$n.years <- length(name)
mn_traj <- rbind(mn_traj, tmean)
}
saveRDS(mn_traj, "mn_traj_limitlat.RDS")
## Route variation of random pairs ----
#####Pairing of individuals with close start and end points
start <- all_traj %>% group_by(id, id_birdyear, direction) %>% filter(row_number()==1) %>%
ungroup() %>% arrange(direction, id, id_birdyear) %>%
rename(slon = lon, slat = lat) %>% select(-time, -poly, -dur)
end <- all_traj %>% group_by(id, id_birdyear, direction) %>% filter(row_number()== n()) %>%
ungroup() %>% arrange(direction, id, id_birdyear)%>% rename(elon = lon, elat = lat) %>%
select(-time, -poly, -dur)
x <- all_traj %>% group_by(id, id_birdyear, direction) %>% summarise(n= n()) %>% filter(n <= 10)
all_traj <- left_join(all_traj, x) %>% filter(is.na(n)) %>% select(-n)
start <-left_join(start, x) %>% filter(is.na(n)) %>% select(-n)
end <-left_join(end, x) %>% filter(is.na(n)) %>% select(-n)
##find distance between each start point, if id != id. grouped within direction.
focal <- full_join(start, end) #start and end points for each bird year
pair <- focal %>% rename(p.id = id, p.slon = slon, p.slat = slat, p.birdyear = birdyear, #copy of focal, renamed with pair
p.id_birdyear = id_birdyear, p.direction = direction,
p.elon = elon, p.elat = elat)
rand.pair <- expand.grid(1:length(focal$id), 1:length(pair$p.id)) ## create df size of every possible id_birdyear pair
r.pair <- data.frame()
for (i in seq_along(rand.pair$Var1)){
x <- cbind(focal[rand.pair[i,1],], pair[rand.pair[i,2],])
if(x$id == x$p.id | x$direction != x$p.direction) {
next
}
x$s.dist <- deg.dist(x$slon, x$slat, x$p.slon, x$p.slat)
x$e.dist <- deg.dist(x$elon, x$elat, x$p.elon, x$p.elat)
if(x$s.dist > 250 | x$e.dist > 250){ next }
x <- select(x, id, birdyear, p.id, p.birdyear, direction)
r.pair<- rbind(r.pair,x)
}
r.pair <- r.pair[!duplicated(t(apply(r.pair, 1, sort))),]
###for each id pair, only one bird year should be used
set.seed(15)
r.pair.norep <- r.pair %>% group_by(id, p.id, direction) %>% ##for each unique id pair & direction
sample_n(1) %>% ##randomly select 1 birdyear route per id
ungroup()
rm(r.pair)
## calculate average trajectory and variance from paired roots.
rand_traj <- data.frame(matrix(ncol = 8, nrow = 0))
names(rand_traj) <- c("mn.lon", "mn.lat", "within.var", "id","birdyear", "p.id", "p.birdyear", "direction")
for(a in seq_along(r.pair.norep$id)){
# for(a in a:length(id.dir$id)){
traj <- filter(all_traj, id == r.pair.norep[a,]$id & birdyear == r.pair.norep[a,]$birdyear &
direction == r.pair.norep[a,]$direction)
p.traj <- filter(all_traj, id == r.pair.norep[a,]$p.id & birdyear == r.pair.norep[a,]$p.birdyear & direction == r.pair.norep[a,]$direction)
if(length(traj$id) == 0 | length(p.traj$id) == 0) {next}
###
###This creates a list of equally spaced points (currently n=11, specified in t1_eq), with each trajectory having it's own list element.
equi_n <- 500 ### number of points along the trajectory
traj_l <- list()
t1 <- traj %>% select(lon, lat) %>% distinct()
t1_l <- SpatialLines(list(Lines(list(Line(cbind(t1$lon, t1$lat))), "id")))
t1_eq <- spsample(t1_l, equi_n, type = "regular") ## 250 point equally placed along length of line
traj_l[[1]] <- t1_eq@coords
t1 <- p.traj %>% select(lon, lat) %>% distinct()
t1_l <- SpatialLines(list(Lines(list(Line(cbind(t1$lon, t1$lat))), "id")))
t1_eq <- spsample(t1_l, equi_n, type = "regular") ## 250 point equally placed along length of line
traj_l[[2]] <- t1_eq@coords
names(traj_l) <- c(1,2)
###
###Create starting 'thread' for mean trajectory between start and end midpoints
start <- cbind(x = mean(sapply(traj_l, '[[',1,1)), y = mean(sapply(traj_l, '[[',1,2))) ##mean of first elements in each list
end <- cbind(x = mean(sapply(traj_l, '[[',equi_n,1)), y = mean(sapply(traj_l, '[[',equi_n,2)))
###
###Create starting 'thread' from mean trajectory points
tmean <- data.frame(matrix(ncol = 2, nrow = length(1:equi_n)))
colnames(tmean) <- c("x","y")
for(i in 1:equi_n){
tmean[i,1] = mean(sapply(traj_l, '[[',i,1))
tmean[i,2] = mean(sapply(traj_l, '[[',i,2))
}
###
##Averaging
iter_n <- 0
repeat{
iter_n <- iter_n + 1
for(i in seq_along(tmean$x)){
pmean <- as.numeric(tmean[i,]) ## select each point
nn <- data.frame()
for(j in seq_along(traj_l)){
x <- traj_l[[j]]
x <- matrix(x[x[,2] < pmean[2]+1 & x[,2] > pmean[2]-1,], ncol = 2)
if (length(x) == 0 ) { x <- traj_l[[j]]}
dist <- NA
for(k in seq_along(x[,1])){
point2 <- as.numeric(x[k,])
dist[k] <- deg.dist(pmean[1], pmean[2], point2[1], point2[2])
}
nearest_neighbour1 <- x[dist == min(dist),]
nn <- rbind(nn, nearest_neighbour1)
}
tmean[i,1] <- mean(nn[,1])
tmean[i,2] <- mean(nn[,2])
}
tmean <- unique(tmean)
tmean <- rbind(start, tmean, end)
dist <- pt2pt.distance(tmean$y, tmean$x) ## dist in m
dist_log <- dist > 25000
dist_log[1] <- FALSE
if(iter_n == 100){ ###this will stop repeating after 100 iterations. option 2 is to stop repeating if distinces between tmean are all less than e.g. 25
break
}
tmean1 <- data.frame()
for(i in 1:length(dist_log)){
if(dist_log[i]== T){
newpt <- new_point(tmean[i-1,], tmean[i,], di = norm_vec(tmean[i-1,]- tmean[i,])/2) #new point at midpoint
tmean1 <- rbind(tmean1, newpt) ## order matters
}
tmean1 <- rbind(tmean1, tmean[i,])
}
tmean <- tmean1
}
tmean <- SpatialLines(list(Lines(list(Line(cbind(tmean$x, tmean$y))), "id")))
tmean <- spsample(tmean, equi_n, type = "regular") ## 100 point equally placed along length of line
tmean <- tmean@coords
traj_l <- lapply(traj_l, FUN = data.frame)
for(i in 1:length(traj_l)){
names(traj_l[[i]]) <- c("lon", "lat")
}
nn_dist <- NA
for(l in seq_along(traj_l)){
x <- traj_l[[l]]
for(i in seq_along(tmean[,1])){
point <- tmean[i,]
dist <- NA
for(j in seq_along(x[,1])){
point2 <- x[j,]
dist[j] <- deg.dist(point[1], point[2], point2[1,1], point2[1,2])
}
nn_dist[i] <- min(dist)
}
tmean <- cbind(tmean, nn_dist)
}
tmean <- data.frame(tmean)
tmean$within.var <- rowSums(tmean[,3:ncol(tmean)])/(ncol(tmean)-2-1) ##var = sum of (x-x.mn)/n-1. here n = num traj = (ncol - 2(=tmean coords))
tmean <- tmean[,c(1:2,ncol(tmean))]
names(tmean) <- c("mn.lon", "mn.lat", "within.var")
tmean$id <- r.pair.norep[[a, "id"]]
tmean$birdyear <- r.pair.norep[[a, "birdyear"]]
tmean$p.id <- r.pair.norep[[a, "p.id"]]
tmean$p.birdyear <- r.pair.norep[[a, "p.birdyear"]]
tmean$direction <- r.pair.norep[[a, "direction"]]
rand_traj <- rbind(rand_traj,tmean)
}
saveRDS(rand_traj, "rand_traj_limlat.RDS")
#find variance
rand_traj_sum <- rand_traj %>% group_by(id, birdyear, p.id, p.birdyear, direction) %>%
summarise(mn_var = mean(within.var)) %>% ungroup()
rm(rand_traj)
rand_traj_sum$pair <- "Between"
saveRDS(rand_traj_sum, "rand_traj_sum.RDS") ##Randomization test carried out in results_d4
saveRDS(r.pair.norep, "r_pair_traj.RDS")
##Route var examples ----
library(cowplot)
map.world <- rworldmap::getMap("low")
map.world <- fortify(map.world)
example.id <- c(5554,833,608, 782, 537, 4024,5337,540, 5060,5134,344)
mn.ex <- filter(mn_traj, id %in% example.id)
ID <- example.id[1]
##UDs
# UD <- nogap_multi.yr[[as.character(ID)]]
# p50 <- fortify(spTransform(getverticeshr(UD, 50), CRS("+init=epsg:4326"))) ##@data$id is birdyear
pts <- filter(all_traj, id == ID) %>% mutate(birdyear = factor(birdyear))
mn <- filter(mn.ex, id == ID)
pv5554<- ggplot(map.world, aes(long, lat, group = group)) +
geom_polygon(fill = "white", col = "black", size = .25) +
#geom_polygon(data = p50, aes(fill = id,col = id), size = 1, alpha = .1) +
geom_path(data=pts, aes(lon, lat, group = birdyear), size = 1) +
geom_point(data=pts, aes(lon, lat, group = NA), size = 1) +
geom_point(data=mn, aes(mn.lon, mn.lat, group = NA, col = within.var), size = 1) +
theme(legend.position = c(.85,.3), legend.background = element_rect(fill="white"),axis.title = element_blank(), panel.background = element_rect(fill = "gray93"),
panel.grid.major = element_line(colour= "white")) +
scale_colour_viridis_c() +
labs(col = "Variation \n (km)") +
ggtitle("ID = 5554, Mean Variation = 13 km")+
coord_fixed(xlim = c(-10, 10),
ylim = c(38.5, 53),ratio = 1.2) ## add core areas
ID <- example.id[10]
##UDs
# UD <- nogap_multi.yr[[as.character(ID)]]
# p50 <- fortify(spTransform(getverticeshr(UD, 50), CRS("+init=epsg:4326"))) ##@data$id is birdyear
pts <- filter(all_traj, id == ID & direction == "Autumn") %>% mutate(birdyear = factor(birdyear))
mn <- filter(mn.ex, id == ID & direction == "Autumn")
pv5134<- ggplot(map.world, aes(long, lat, group = group)) +
geom_polygon(fill = "white", col = "black", size = .25) +
# geom_polygon(data = p50, aes(fill = id,col = id), size = 1, alpha = .1) +
geom_path(data=pts, aes(lon, lat, group = birdyear), size = 1) +
geom_point(data=pts, aes(lon, lat, group = NA), size = 1) +
geom_point(data=mn, aes(mn.lon, mn.lat, group = NA, col = within.var), size = 1) +
theme(legend.position = c(.85,.3),legend.background = element_rect(fill="white"),axis.title = element_blank(), panel.background = element_rect(fill = "gray93"),
panel.grid.major = element_line(colour= "white")) +
scale_colour_viridis_c() +
ggtitle("ID = 5134, Mean Variation = 20 km") +
labs(col = "Variation \n (km)") +
coord_fixed(xlim = c(-7.5, 7.5),
ylim = c(40.5, 51.5),ratio = 1.2) ## add core areas
ID <- example.id[2]
##UDs
# UD <- nogap_multi.yr[[as.character(ID)]]
# p50 <- fortify(spTransform(getverticeshr(UD, 50), CRS("+init=epsg:4326"))) ##@data$id is birdyear
pts <- filter(all_traj, id == ID & direction == "Autumn") %>% mutate(birdyear = factor(birdyear))
mn <- filter(mn.ex, id == ID & direction == "Autumn")
pv833<- ggplot(map.world, aes(long, lat, group = group)) +
geom_polygon(fill = "white", col = "black", size = .25) +
# geom_polygon(data = p50, aes(fill = id,col = id), size = 1, alpha = .1) +
geom_path(data=pts, aes(lon, lat, group = birdyear), size = 1) +
geom_point(data=pts, aes(lon, lat, group = NA), size = 1) +
geom_point(data=mn, aes(mn.lon, mn.lat, group = NA, col = within.var), size = 1) +
theme(legend.position = c(.85,.3),legend.background = element_rect(fill="white"),axis.title = element_blank(), panel.background = element_rect(fill = "gray93"),
panel.grid.major = element_line(colour= "white")) +
scale_colour_viridis_c() +
ggtitle("ID = 833, Mean Variation = 17 km") +
labs(col = "Variation \n (km)") +
coord_fixed(xlim = c(-30, 22),
ylim = c(14, 52),ratio = 1.2) ## add core areas
ID <- example.id[3]
##UDs
# UD <- nogap_multi.yr[[as.character(ID)]]
# p50 <- fortify(spTransform(getverticeshr(UD, 50), CRS("+init=epsg:4326"))) ##@data$id is birdyear
pts <- filter(all_traj, id == ID & direction == "Spring") %>% mutate(birdyear = factor(birdyear))
mn <- filter(mn.ex, id == ID & direction == "Spring")
pv608<- ggplot(map.world, aes(long, lat, group = group)) +
geom_polygon(fill = "white", col = "black", size = .25) +
# geom_polygon(data = p50, aes(fill = id,col = id), size = 1, alpha = .1) +
geom_path(data=pts, aes(lon, lat, group = birdyear), size = 1) +
geom_point(data=pts, aes(lon, lat, group = NA), size = 1) +
geom_point(data=mn, aes(mn.lon, mn.lat, group = NA, col = within.var), size = 1) +
theme(legend.position = c(.85,.3),legend.background = element_rect(fill="white"),axis.title = element_blank(), panel.background = element_rect(fill = "gray93"),
panel.grid.major = element_line(colour= "white")) +
scale_colour_viridis_c() +
ggtitle("ID = 608, Mean Variation = 13 km") +
labs(col = "Variation \n (km)") +
coord_fixed(xlim = c(-2, 5),
ylim = c(49, 54),ratio = 1.2) ## add core areas
ID <- example.id[5]
##UDs
# UD <- nogap_multi.yr[[as.character(ID)]]
# p50 <- fortify(spTransform(getverticeshr(UD, 50), CRS("+init=epsg:4326"))) ##@data$id is birdyear
pts <- filter(all_traj, id == ID & direction == "Autumn") %>% mutate(birdyear = factor(birdyear))
mn <- filter(mn.ex, id == ID & direction == "Autumn")
pv537<- ggplot(map.world, aes(long, lat, group = group)) +
geom_polygon(fill = "white", col = "black", size = .25) +
geom_path(data=pts, aes(lon, lat, group = birdyear), size = 1) +
geom_point(data=pts, aes(lon, lat, group = NA), size = 1) +
geom_point(data=mn, aes(mn.lon, mn.lat, group = NA, col = within.var), size = 1) +
theme(legend.position = c(.13,.3), legend.background = element_rect(fill="white"),axis.title = element_blank(), panel.background = element_rect(fill = "gray93"),
panel.grid.major = element_line(colour= "white")) +
scale_colour_viridis_c() +
ggtitle("ID = 537, Mean Variation = 33 km") +
labs(col = "Variation \n (km)") +
coord_fixed(xlim = c(-2, 5),
ylim = c(49, 54),ratio = 1.2) ## add core areas
ID <- example.id[7]
##UDs
# UD <- nogap_multi.yr[[as.character(ID)]]
# p50 <- fortify(spTransform(getverticeshr(UD, 50), CRS("+init=epsg:4326"))) ##@data$id is birdyear
pts <- filter(all_traj, id == ID & direction == "Autumn") %>% mutate(birdyear = factor(birdyear))
mn <- filter(mn.ex, id == ID & direction == "Autumn")
pv5337<- ggplot(map.world, aes(long, lat, group = group)) +
geom_polygon(fill = "white", col = "black", size = .25) +
# geom_polygon(data = p50, aes(fill = id,col = id), size = 1, alpha = .1) +
geom_path(data=pts, aes(lon, lat, group = birdyear), size = 1) +
geom_point(data=pts, aes(lon, lat, group = NA), size = 1) +
geom_point(data=mn, aes(mn.lon, mn.lat, group = NA, col = within.var), size = 1) +
theme(legend.position = c(.85,.3), legend.background = element_rect(fill="white"),axis.title = element_blank(), panel.background = element_rect(fill = "gray93"),
panel.grid.major = element_line(colour= "white")) +
scale_colour_viridis_c() + scale_fill_viridis_d(begin = .1, end = .8) +
ggtitle("ID = 5337, Mean Variation = 106 km") +
labs(col = "Variation \n (km)") +
coord_fixed(xlim = c(-10, 5),
ylim = c(40, 51),ratio = 1.2) ## add core areas
ID <- example.id[8]
##UDs
# UD <- nogap_multi.yr[[as.character(ID)]]
# p50 <- fortify(spTransform(getverticeshr(UD, 50), CRS("+init=epsg:4326"))) ##@data$id is birdyear
pts <- filter(all_traj, id == ID & direction == "Spring") %>% mutate(birdyear = factor(birdyear))
mn <- filter(mn.ex, id == ID & direction == "Spring")
pv540<- ggplot(map.world, aes(long, lat, group = group)) +
geom_polygon(fill = "white", col = "black", size = .25) +
geom_path(data=pts, aes(lon, lat, group = birdyear), size = 1) +
geom_point(data=pts, aes(lon, lat, group = NA), size = 1) +
geom_point(data=mn, aes(mn.lon, mn.lat, group = NA, col = within.var), size = 1) +
theme(legend.position = c(.85,.3), legend.background = element_rect(fill="white"),axis.title = element_blank(), panel.background = element_rect(fill = "gray93"),
panel.grid.major = element_line(colour= "white")) +
scale_colour_viridis_c() +
ggtitle("ID = 540, Mean Variation = 111 km") +
labs(col = "Variation \n (km)") +
coord_fixed(xlim = c(-26, 22),
ylim = c(18, 53),ratio = 1.2) ## add core areas
#plot_grid(pv537, pv608, pv5337, pv5554, pv540, pv833, nrow = 3)
png("tvar_ex.png", width = 20, height = 30, units = "cm", res = 400)
plot_grid(pv537, pv608, pv5337, pv5134, pv540, pv833, nrow = 3)
dev.off()
## Maps - overlap ----
library(cowplot)
theme_set(theme_bw())
example.id <- c(478,608,1400,483,5027,5296, 871,860,4047,5068)
## 478
ID <- example.id[1]
##UDs
UD <- nogap_multi.yr[[as.character(ID)]]
p25 <- fortify(spTransform(getverticeshr(UD, 25), CRS("+init=epsg:4326"))) ##@data$id is birdyear
p50 <- fortify(spTransform(getverticeshr(UD, 50), CRS("+init=epsg:4326"))) ##@data$id is birdyear
p75 <- fortify(spTransform(getverticeshr(UD, 75), CRS("+init=epsg:4326"))) ##@data$id is birdyear
p95 <- fortify(spTransform(getverticeshr(UD, 95), CRS("+init=epsg:4326"))) ##@data$id is birdyear
##points
pts.d <- filter(nonbreed.nogap.d, id == ID) %>% select(lat, lon, time, birdyear) %>% mutate(birdyear = factor(birdyear))
OL <- gull.glm %>% filter(id == ID) %>% select(mn_overlap) %>% distinct() %>% pull(mn_overlap)
p478 <-
ggplot(map.world, aes(long, lat, group = group)) +
geom_polygon(fill = "white", col = "black", size = .25) + ## map
##UDS per year
geom_polygon(data = p95, aes(col = id, fill = NA), size = .2, fill = NA) +
geom_polygon(data = p75, aes(col = id, fill = NA), size = .5) +
geom_polygon(data = p50, aes(fill = id, col = id), size = 1.2, alpha = .2) +
geom_polygon(data = p25, aes(col = id, fill = NA), size = .5) +
#points
geom_point(data = pts.d, aes(lon, lat, group = birdyear, col = birdyear), size = .8)+
##appearance
scale_colour_viridis_d(end = .9, option = "C") + scale_fill_viridis_d(begin = .1, end = .8) +
theme(legend.position = "none", axis.title = element_blank(), panel.background = element_rect(fill = "gray93"),
panel.grid.major = element_line(colour= "white")) +
coord_fixed(xlim = c(-4, 4.5),
ylim = c(49,55) ,ratio = 1.2) +
ggtitle(paste( " ID = ", ID, ", Overlap = ", round(OL, digits = 2), ", Seasons = ",
gull.glm %>% filter(id == ID & !is.na(mig.dist)) %>% summarise(n = n()) %>% pull(n), sep = ""))
## 608
ID <- example.id[2]
UD <- nogap_multi.yr[[as.character(ID)]]
p25 <- fortify(spTransform(getverticeshr(UD, 25), CRS("+init=epsg:4326"))) ##@data$id is birdyear
p50 <- fortify(spTransform(getverticeshr(UD, 50), CRS("+init=epsg:4326"))) ##@data$id is birdyear
p75 <- fortify(spTransform(getverticeshr(UD, 75), CRS("+init=epsg:4326"))) ##@data$id is birdyear
p95 <- fortify(spTransform(getverticeshr(UD, 95), CRS("+init=epsg:4326"))) ##@data$id is birdyear
pts.d <- filter(nonbreed.nogap.d, id == ID) %>% select(lat, lon, time, birdyear) %>% mutate(birdyear = factor(birdyear))
OL <- gull.glm %>% filter(id == ID) %>% select(mn_overlap) %>% distinct() %>% pull(mn_overlap)
p608 <-
ggplot(map.world, aes(long, lat, group = group)) +
geom_polygon(fill = "white", col = "black", size = .25) + ## map
geom_polygon(data = p95, aes(col = id), size = .2, fill = NA) +
geom_polygon(data = p75, aes(fill = NA, col = id), size = .5) +
geom_polygon(data = p50, aes(fill = id, col = id), size = 1.2, alpha = .2) +
geom_polygon(data = p25, aes(fill = NA, col = id), size = .5) +
geom_point(data = pts.d, aes(lon, lat, group = birdyear, col = birdyear), size = .8)+
scale_colour_viridis_d(end = .9, option = "C") + scale_fill_viridis_d(begin = .1, end = .7) +
theme(legend.position = "none", axis.title = element_blank(), panel.background = element_rect(fill = "gray93"),
panel.grid.major = element_line(colour= "white")) +
coord_fixed(xlim = c(-5, 3.5),
ylim = c(49,55) ,ratio = 1.2) +
ggtitle(paste( " ID = ", ID, ", Overlap = ", round(OL, digits = 2), ", Seasons = ",
gull.glm %>% filter(id == ID & !is.na(mig.dist)) %>% summarise(n = n()) %>% pull(n), sep = ""))
#1400
ID <- example.id[3]
UD <- nogap_multi.yr[[as.character(ID)]]
p25 <- fortify(spTransform(getverticeshr(UD, 25), CRS("+init=epsg:4326"))) ##@data$id is birdyear
p50 <- fortify(spTransform(getverticeshr(UD, 50), CRS("+init=epsg:4326"))) ##@data$id is birdyear
p75 <- fortify(spTransform(getverticeshr(UD, 75), CRS("+init=epsg:4326"))) ##@data$id is birdyear
p95 <- fortify(spTransform(getverticeshr(UD, 95), CRS("+init=epsg:4326"))) ##@data$id is birdyear
pts.d <- filter(nonbreed.nogap.d, id == ID) %>% select(lat, lon, time, birdyear) %>% mutate(birdyear = factor(birdyear))
OL <- gull.glm %>% filter(id == ID) %>% select(mn_overlap) %>% distinct() %>% pull(mn_overlap)
p1400 <-
ggplot(map.world, aes(long, lat, group = group)) +
geom_polygon(fill = "white", col = "black", size = .25) + ## map
geom_polygon(data = p95, aes(col = id), size = .2, fill = NA) +
geom_polygon(data = p75, aes(fill = NA, col = id), size = .5) +
geom_polygon(data = p50, aes(fill = id, col = id), size = 1.2, alpha = .2) +
geom_polygon(data = p25, aes(fill = NA, col = id), size = .5) +
geom_point(data = pts.d, aes(lon, lat, group = birdyear, col = birdyear), size = .8)+
scale_colour_viridis_d(end = .9, option = "C") + scale_fill_viridis_d(begin = .1, end = .7) +
theme(legend.position = "none", axis.title = element_blank(), panel.background = element_rect(fill = "gray93"),
panel.grid.major = element_line(colour= "white")) +
coord_fixed(xlim = c(-14, 7),
ylim = c(36,52) ,ratio = 1.2) +
ggtitle(paste( " ID = ", ID, ", Overlap = ", round(OL, digits = 2), ", Seasons = ",
gull.glm %>% filter(id == ID & !is.na(mig.dist)) %>% summarise(n = n()) %>% pull(n), sep = ""))
#483
ID <- example.id[4]
UD <- nogap_multi.yr[[as.character(ID)]]
p25 <- fortify(spTransform(getverticeshr(UD, 25), CRS("+init=epsg:4326"))) ##@data$id is birdyear
p50 <- fortify(spTransform(getverticeshr(UD, 50), CRS("+init=epsg:4326"))) ##@data$id is birdyear
p75 <- fortify(spTransform(getverticeshr(UD, 75), CRS("+init=epsg:4326"))) ##@data$id is birdyear
p95 <- fortify(spTransform(getverticeshr(UD, 95), CRS("+init=epsg:4326"))) ##@data$id is birdyear
pts.d <- filter(nonbreed.nogap.d, id == ID) %>% select(lat, lon, time, birdyear) %>% mutate(birdyear = factor(birdyear))
OL <- gull.glm %>% filter(id == ID) %>% select(mn_overlap) %>% distinct() %>% pull(mn_overlap)
p483 <-
ggplot(map.world, aes(long, lat, group = group)) +
geom_polygon(fill = "white", col = "black", size = .25) + ## map
geom_polygon(data = p95, aes(col = id), size = .2, fill = NA) +
geom_polygon(data = p75, aes(fill = NA, col = id), size = .5) +
geom_polygon(data = p50, aes(fill = id, col = id), size = 1.2, alpha = .2) +
geom_polygon(data = p25, aes(fill = NA, col = id), size = .5) +
geom_point(data = pts.d, aes(lon, lat, group = birdyear, col = birdyear), size = .8)+
scale_colour_viridis_d(end = .9, option = "C") + scale_fill_viridis_d(begin = .1, end = .7) +
theme(legend.position = "none", axis.title = element_blank(), panel.background = element_rect(fill = "gray93"),
panel.grid.major = element_line(colour= "white")) +
coord_fixed(xlim = c(-15, 7),
ylim = c(37,54) ,ratio = 1.2) +
ggtitle(paste( " ID = ", ID, ", Overlap = ", round(OL, digits = 2), ", Seasons = ",
gull.glm %>% filter(id == ID & !is.na(mig.dist)) %>% summarise(n = n()) %>% pull(n), sep = ""))
#5027
ID <- example.id[5]
UD <- nogap_multi.yr[[as.character(ID)]]
p25 <- fortify(spTransform(getverticeshr(UD, 25), CRS("+init=epsg:4326"))) ##@data$id is birdyear
p50 <- fortify(spTransform(getverticeshr(UD, 50), CRS("+init=epsg:4326"))) ##@data$id is birdyear
p75 <- fortify(spTransform(getverticeshr(UD, 75), CRS("+init=epsg:4326"))) ##@data$id is birdyear
p95 <- fortify(spTransform(getverticeshr(UD, 95), CRS("+init=epsg:4326"))) ##@data$id is birdyear
pts.d <- filter(nonbreed.nogap.d, id == ID) %>% select(lat, lon, time, birdyear) %>% mutate(birdyear = factor(birdyear))
OL <- gull.glm %>% filter(id == ID) %>% select(mn_overlap) %>% distinct() %>% pull(mn_overlap)
p5027 <-
ggplot(map.world, aes(long, lat, group = group)) +
geom_polygon(fill = "white", col = "black", size = .25) + ## map
geom_polygon(data = p95, aes(col = id), size = .2, fill = NA) +
geom_polygon(data = p75, aes(fill = NA, col = id), size = .5) +
geom_polygon(data = p50, aes(fill = id, col = id), size = 1.2, alpha = .2) +
geom_polygon(data = p25, aes(fill = NA, col = id), size = .5) +
geom_point(data = pts.d, aes(lon, lat, group = birdyear, col = birdyear), size = .8)+
scale_colour_viridis_d(end = .9, option = "C") + scale_fill_viridis_d(begin = .1, end = .7) +
theme(legend.position = "none", axis.title = element_blank(), panel.background = element_rect(fill = "gray93"),
panel.grid.major = element_line(colour= "white")) +
coord_fixed(xlim = c(-20, 11),
ylim = c(32,55.5) ,ratio = 1.2) +
ggtitle(paste( " ID = ", ID, ", Overlap = ", round(OL, digits = 2), ", Seasons = ",
gull.glm %>% filter(id == ID & !is.na(mig.dist)) %>% summarise(n = n()) %>% pull(n), sep = ""))
## 5296
ID <- example.id[6]
UD <- nogap_multi.yr[[as.character(ID)]]
p25 <- fortify(spTransform(getverticeshr(UD, 25), CRS("+init=epsg:4326"))) ##@data$id is birdyear
p50 <- fortify(spTransform(getverticeshr(UD, 50), CRS("+init=epsg:4326"))) ##@data$id is birdyear
p75 <- fortify(spTransform(getverticeshr(UD, 75), CRS("+init=epsg:4326"))) ##@data$id is birdyear
p95 <- fortify(spTransform(getverticeshr(UD, 95), CRS("+init=epsg:4326"))) ##@data$id is birdyear
pts.d <- filter(nonbreed.nogap.d, id == ID) %>% select(lat, lon, time, birdyear) %>% mutate(birdyear = factor(birdyear))
OL <- gull.glm %>% filter(id == ID) %>% select(mn_overlap) %>% distinct() %>% pull(mn_overlap)
p5296 <-
ggplot(map.world, aes(long, lat, group = group)) +
geom_polygon(fill = "white", col = "black", size = .25) + ## map
geom_polygon(data = p95, aes(col = id), size = .2, fill = NA) +
geom_polygon(data = p75, aes(fill = NA, col = id), size = .5) +
geom_polygon(data = p50, aes(fill = id, col = id), size = 1.2, alpha = .2) +
geom_polygon(data = p25, aes(fill = NA, col = id), size = .5) +
geom_point(data = pts.d, aes(lon, lat, group = birdyear, col = birdyear), size = .8)+
scale_colour_viridis_d(end = .9, option = "C") + scale_fill_viridis_d(begin = .1, end = .7) +
theme(legend.position = "none", axis.title = element_blank(), panel.background = element_rect(fill = "gray93"),
panel.grid.major = element_line(colour= "white")) +
coord_fixed(xlim = c(-32, 17),
ylim = c(17,55) ,ratio = 1.2) +
ggtitle(paste( " ID = ", ID, ", Overlap = ", round(OL, digits = 2), ", Seasons = ",
gull.glm %>% filter(id == ID & !is.na(mig.dist)) %>% summarise(n = n()) %>% pull(n), sep = ""))
## 871
ID <- example.id[7]
UD <- nogap_multi.yr[[as.character(ID)]]
p25 <- fortify(spTransform(getverticeshr(UD, 25), CRS("+init=epsg:4326"))) ##@data$id is birdyear
p50 <- fortify(spTransform(getverticeshr(UD, 50), CRS("+init=epsg:4326"))) ##@data$id is birdyear
p75 <- fortify(spTransform(getverticeshr(UD, 75), CRS("+init=epsg:4326"))) ##@data$id is birdyear
p95 <- fortify(spTransform(getverticeshr(UD, 95), CRS("+init=epsg:4326"))) ##@data$id is birdyear
pts.d <- filter(nonbreed.nogap.d, id == ID) %>% select(lat, lon, time, birdyear) %>% mutate(birdyear = factor(birdyear))
OL <- gull.glm %>% filter(id == ID) %>% select(mn_overlap) %>% distinct() %>% pull(mn_overlap)
p871 <-
ggplot(map.world, aes(long, lat, group = group)) +
geom_polygon(fill = "white", col = "black", size = .25) + ## map
geom_polygon(data = p95, aes(col = id), size = .2, fill = NA) +
geom_polygon(data = p75, aes(fill = NA, col = id), size = .5) +
geom_polygon(data = p50, aes(fill = id, col = id), size = 1.2, alpha = .2) +
geom_polygon(data = p25, aes(fill = NA, col = id), size = .5) +
geom_point(data = pts.d, aes(lon, lat, group = birdyear, col = birdyear), size = .8)+
scale_colour_viridis_d(end = .9, option = "C") + scale_fill_viridis_d(begin = .1, end = .7) +
theme(legend.position = "none", axis.title = element_blank(), panel.background = element_rect(fill = "gray93"),
panel.grid.major = element_line(colour= "white")) +
coord_fixed(xlim = c(-5, 3.5),
ylim = c(49,55) ,ratio = 1.2) +
ggtitle(paste( " ID = ", ID, ", Overlap = ", round(OL, digits = 2), ", Seasons = ",
gull.glm %>% filter(id == ID & !is.na(mig.dist)) %>% summarise(n = n()) %>% pull(n), sep = ""))
#5068
ID <- example.id[10]
UD <- nogap_multi.yr[[as.character(ID)]]
p25 <- fortify(spTransform(getverticeshr(UD, 25), CRS("+init=epsg:4326"))) ##@data$id is birdyear
p50 <- fortify(spTransform(getverticeshr(UD, 50), CRS("+init=epsg:4326"))) ##@data$id is birdyear
p75 <- fortify(spTransform(getverticeshr(UD, 75), CRS("+init=epsg:4326"))) ##@data$id is birdyear
p95 <- fortify(spTransform(getverticeshr(UD, 95), CRS("+init=epsg:4326"))) ##@data$id is birdyear
pts.d <- filter(nonbreed.nogap.d, id == ID) %>% select(lat, lon, time, birdyear) %>% mutate(birdyear = factor(birdyear))
OL <- gull.glm %>% filter(id == ID) %>% select(mn_overlap) %>% distinct() %>% pull(mn_overlap)
p5068 <-
ggplot(map.world, aes(long, lat, group = group)) +
geom_polygon(fill = "white", col = "black", size = .25) + ## map
geom_polygon(data = p95, aes(col = id), size = .2, fill = NA) +
geom_polygon(data = p75, aes(fill = NA, col = id), size = .5) +
geom_polygon(data = p50, aes(fill = id, col = id), size = 1.2, alpha = .2) +
geom_polygon(data = p25, aes(fill = NA, col = id), size = .5) +
geom_point(data = pts.d, aes(lon, lat, group = birdyear, col = birdyear), size = .8)+
scale_colour_viridis_d(end = .9, option = "C") + scale_fill_viridis_d(begin = .1, end = .7) +
theme(legend.position = "none", axis.title = element_blank(), panel.background = element_rect(fill = "gray93"),
panel.grid.major = element_line(colour= "white")) +
coord_fixed(xlim = c(-20, 11),
ylim = c(30,53.5) ,ratio = 1.2) +
ggtitle(paste( " ID = ", ID, ", Overlap = ", round(OL, digits = 2), ", Seasons = ",
gull.glm %>% filter(id == ID & !is.na(mig.dist)) %>% summarise(n = n()) %>% pull(n), sep = ""))
#860
ID <- example.id[8]
UD <- nogap_multi.yr[[as.character(ID)]]
p25 <- fortify(spTransform(getverticeshr(UD, 25), CRS("+init=epsg:4326"))) ##@data$id is birdyear
p50 <- fortify(spTransform(getverticeshr(UD, 50), CRS("+init=epsg:4326"))) ##@data$id is birdyear
p75 <- fortify(spTransform(getverticeshr(UD, 75), CRS("+init=epsg:4326"))) ##@data$id is birdyear
p95 <- fortify(spTransform(getverticeshr(UD, 95), CRS("+init=epsg:4326"))) ##@data$id is birdyear
pts.d <- filter(nonbreed.nogap.d, id == ID) %>% select(lat, lon, time, birdyear) %>% mutate(birdyear = factor(birdyear))
OL <- gull.glm %>% filter(id == ID) %>% select(mn_overlap) %>% distinct() %>% pull(mn_overlap)
p860 <-
ggplot(map.world, aes(long, lat, group = group)) +
geom_polygon(fill = "white", col = "black", size = .25) + ## map
geom_polygon(data = p95, aes(col = id), size = .2, fill = NA) +
geom_polygon(data = p75, aes(fill = NA, col = id), size = .5) +
geom_polygon(data = p50, aes(fill = id, col = id), size = 1.2, alpha = .2) +
geom_polygon(data = p25, aes(fill = NA, col = id), size = .5) +
geom_point(data = pts.d, aes(lon, lat, group = birdyear, col = birdyear), size = .8)+
scale_colour_viridis_d(end = .9, option = "C") + scale_fill_viridis_d(begin = .1, end = .7) +
theme(legend.position = "none", axis.title = element_blank(), panel.background = element_rect(fill = "gray93"),
panel.grid.major = element_line(colour= "white")) +
coord_fixed(xlim = c(-15, 7),
ylim = c(37,54) ,ratio = 1.2) +
ggtitle(paste( " ID = ", ID, ", Overlap = ", round(OL, digits = 2), ", Seasons = ",
gull.glm %>% filter(id == ID & !is.na(mig.dist)) %>% summarise(n = n()) %>% pull(n), sep = ""))
ol.page <- plot_grid(p478,p871,p1400,p860,p5027,p5068, nrow = 3)
png("ol_ex.png", width = 20, height = 30, units = "cm", res = 400)
ol.page
dev.off()
## Mean traj computation example ----
all_traj <- ungroup(all_traj) %>% arrange(id, time)
traj5524 <- filter(all_traj, id == 5524 & direction == "Autumn")
p5524 <- nogap_multi.yr.50p[[74]]
p5524 <- spTransform(p5524, CRS("+init=epsg:4326"))
p5524 <- fortify(p5524)
p5524.0 <- filter(p5524, id %in% c("1","2"))
p5524.1 <- filter(p5524, id %in% c("3","4"))
### first need plot with UD + real points
t.0 <- filter(traj5524, birdyear == 0) %>% select(lon, lat)
t.1<- filter(traj5524, birdyear == 1) %>% select(lon, lat)
SFa <- ggplot(map.world, aes(long, lat, group = group)) +
geom_polygon(fill = "white", col = "black", size = .25) +
geom_polygon(data = p5524.0, fill = "#0D0887FF",col = "#0D0887FF", size = 1, alpha = .1) +
geom_polygon(data = p5524.1, fill = "#CC4678FF",col = "#CC4678FF", size = 1, alpha = .1) +
geom_path(data=t.0, aes(lon, lat, group = NA), col = "#0D0887FF", size = 1) +
geom_path(data = t.1, aes(lon, lat,group = NA),col = "#CC4678FF", size = 1) +
geom_point(data = t.0, aes(lon, lat,group = NA), col = "#0D0887FF", size = 2) +
geom_point(data = t.1, aes(lon, lat,group = NA),col = "#CC4678FF", size = 2) +
annotate(xmin = -11, xmax = -1, ymin = 37, ymax = 44,
geom = "rect", alpha = 0, col = "black") +
theme(legend.position = "none", axis.title = element_blank(), panel.background = element_rect(fill = "gray93"),
panel.grid.major = element_line(colour= "white")) +
coord_fixed(xlim = c(-15, 14),
ylim = c(32.5, 53),ratio = 1.2) ## add core areas
##df of single direction trajectories, multiples identified by birdyear
## df includes id, birdyear, lon, lat, direction, date_time
## points within wintering area polygons (i.e. stopovers) were replaced with the polygon centroid to aid in
#spacing points equally along the trajectory
id.dir <- traj5524 %>% select(id, direction) %>% distinct()
mn_traj <- data.frame(matrix(ncol = 5, nrow = 0))
names(mn_traj) <- c("mn.lon", "mn.lat", "within.var", "id", "direction")
a <- 1
traj <- filter(traj5524, id == id.dir[a,]$id & direction == id.dir[a,]$direction)
###This creates a list of equally spaced points (currently n=11, specified in t1_eq), with each trajectory having it's own list element.
ntraj <- unique(traj$birdyear) #trajectory id
equi_n <- 100 ### number of points along the trajectory
traj_l <- list()
for(i in 1:length(ntraj)){
t1 <- filter(traj, birdyear == ntraj[i]) %>% select(lon, lat) %>% distinct()
if (length(t1$lon)<=10){ ntraj <- ntraj[-i]
} else {
t1_l <- SpatialLines(list(Lines(list(Line(cbind(t1$lon, t1$lat))), "id")))
t1_eq <- spsample(t1_l, equi_n, type = "regular") ## 250 point equally placed along length of line
traj_l[[i]] <- t1_eq@coords
}
}
names(traj_l) <- ntraj
traj_l_sample <- lapply(traj_l, function(x) {
x <- as.data.frame(x)
x$seq <- 1:equi_n
filter(x, coords.x1 >= -12 & coords.x1 <=0 & coords.x2 >= 36 & coords.x2 <= 45)
})
t0.0 <- data.frame(traj_l_sample[[1]])
t0.1 <- data.frame(traj_l_sample[[2]])
###Create starting 'thread' for mean trajectory between start and end midpoints
start <- cbind(x = mean(sapply(traj_l, '[[',1,1)), y = mean(sapply(traj_l, '[[',1,2))) ##mean of first elements in each list
end <- cbind(x = mean(sapply(traj_l, '[[',equi_n,1)), y = mean(sapply(traj_l, '[[',equi_n,2)))
###Create starting 'thread' from mean trajectory points
tmean <- data.frame(matrix(ncol = 2, nrow = 0))
colnames(tmean) <- c("x","y")
for(i in 1:equi_n){
tmean[i,1] = mean(sapply(traj_l, '[[',i,1))
tmean[i,2] = mean(sapply(traj_l, '[[',i,2))
}
t1.0 <- tmean %>% mutate(seq = 1:equi_n) %>% filter(x > -12 & x < 0 & y > 36 & y < 45)
SFb <- ggplot(map.world, aes(long, lat, group = group)) +
geom_polygon(fill = "white", col = "black", size = .25) +
geom_path(data=t0.0, aes(coords.x1, coords.x2, group = NA), size = 1, alpha = .5, linetype = '21') +
geom_path(data = t0.1, aes(coords.x1, coords.x2,group = NA), size = 1, alpha = .5, linetype = '21') +
geom_point(data = t0.0, aes(coords.x1, coords.x2,group = NA, fill = seq), size = 2, shape = 21) +
geom_point(data = t0.1, aes(coords.x1, coords.x2,group = NA, fill = seq), size = 2, shape = 21) +
geom_path(data = t1.0, aes(x, y,group = NA), size = 1) +
geom_point(data = t1.0, aes(x, y,group = NA, fill = seq), size = 2, shape = 21) +
geom_point(data = t0.0[t0.0$seq %in% c(50,59,64,81),], aes(coords.x1, coords.x2,group = NA, fill = seq), size = 4, shape = 21) +
geom_point(data = t0.1[t0.1$seq %in% c(50,59,64,81),], aes(coords.x1, coords.x2,group = NA, fill = seq), size = 4, shape = 21) +
geom_point(data = t1.0[t1.0$seq %in% c(50,59,64,81),], aes(x, y,group = NA, fill = seq), size = 4, shape = 21,) +
scale_fill_viridis_c(option = "C") + theme(legend.position = "none", axis.title = element_blank(), panel.background = element_rect(fill = "gray93"),
panel.grid.major = element_line(colour= "white")) +
coord_fixed(xlim = c(-11, -1),
ylim = c(37, 44),ratio = 1.2)
### nearest neighbour points for 50,59,64,81:
nn.ex <- data.frame()
for(i in c(50,59,64,81)){
pmean <- as.numeric(tmean[i,]) ## select each point
nn <- data.frame()
for(j in seq_along(traj_l)){
x <- traj_l[[j]]
x<- data.frame(x)
x$seq <- 1:equi_n
x <- x[x[,2] < pmean[2]+1 & x[,2] > pmean[2]-1,]
if (length(x) == 0) { x <- traj_l[[j]]}
dist <- NA
for(k in seq_along(x[,1])){
point2 <- as.numeric(x[k,])
dist[k] <- deg.dist(pmean[1], pmean[2], point2[1], point2[2])
}
nearest_neighbour1 <- x[dist == min(dist),]
nn <- rbind(nn,nearest_neighbour1)
}
names(nn) <- c("x", "y", "seq")
x <- mean(nn[,1])
y <- mean(nn[,2])
seq <- i
nn <- rbind(nn, data.frame(x,y, seq))
nn.ex <- rbind(nn, nn.ex)
}
##Averaging
iter_n <- 0
iter_n <- iter_n + 1
for(i in seq_along(tmean$x)){
pmean <- as.numeric(tmean[i,]) ## select each point
nn <- data.frame()
for(j in seq_along(traj_l)){
x <- traj_l[[j]]
x <- matrix(x[x[,2] < pmean[2]+1 & x[,2] > pmean[2]-1,], ncol = 2)
if (length(x) == 0) { x <- traj_l[[j]]}
dist <- NA
for(k in seq_along(x[,1])){
point2 <- as.numeric(x[k,])
dist[k] <- deg.dist(pmean[1], pmean[2], point2[1], point2[2])
}
nearest_neighbour1 <- x[dist == min(dist),]
nn <- rbind(nn,nearest_neighbour1)
}
tmean[i,1] <- mean(nn[,1])
tmean[i,2] <- mean(nn[,2])
}
tmean <- unique(tmean)
tmean <- rbind(start, tmean, end)
t1.1 <- tmean %>% mutate(seq = 1:length(tmean$x)) %>% filter(x > -12 & x < 0 & y > 36 & y < 45)
dist <- pt2pt.distance(tmean$y, tmean$x) ## dist in m
dist_log <- dist > 25
dist_log[1] <- FALSE
tmean1 <- data.frame()
for(i in 1:length(dist_log)){
if(dist_log[i]== T){
newpt <- new_point(tmean[i-1,], tmean[i,], di = norm_vec(tmean[i-1,]- tmean[i,])/2) #new point at midpoint
tmean1 <- rbind(tmean1, newpt) ## order matters
}
tmean1 <- rbind(tmean1, tmean[i,])
}
new.pts <- anti_join(tmean1, tmean)
tmean <- tmean1
SFc <- ggplot(map.world, aes(long, lat, group = group)) +
geom_polygon(fill = "white", col = "black", size = .25) +
geom_path(data=t0.0, aes(coords.x1, coords.x2, group = NA), size = 1, alpha = .5, linetype = '21') +
geom_path(data = t0.1, aes(coords.x1, coords.x2,group = NA), size = 1, alpha = .5, linetype = '21') +
geom_point(data = t0.0, aes(coords.x1, coords.x2,group = NA, fill = seq), size = 2, shape = 21) +
geom_point(data = t0.1, aes(coords.x1, coords.x2,group = NA, fill = seq), size = 2, shape = 21) +
geom_path(data = t1.0, aes(x, y,group = NA), size = 1, alpha = .25) +
geom_point(data = t1.0, aes(x, y,group = NA, fill = seq), size = 2, shape = 21, alpha = .25) +
geom_path(data = t1.1, aes(x, y,group = NA), size = 1) +
geom_point(data = new.pts, aes(x, y,group = NA), fill = '#20A387FF', size = 2, shape = 21) +
geom_point(data = t1.1, aes(x, y,group = NA, fill = seq), size = 2, shape = 21) +
geom_point(data = nn.ex,aes(x, y,group = NA, fill = seq), size = 4, shape = 21) +
scale_fill_viridis_c(option = "C") + theme(legend.position = "none", axis.title = element_blank(),
panel.background = element_rect(fill = "gray93"),
panel.grid.major = element_line(colour= "white")) +
coord_fixed(xlim = c(-11, -1),
ylim = c(37, 44),ratio = 1.2)
###
###Create starting 'thread' for mean trajectory between start and end midpoints
start <- cbind(x = mean(sapply(traj_l, '[[',1,1)), y = mean(sapply(traj_l, '[[',1,2))) ##mean of first elements in each list
end <- cbind(x = mean(sapply(traj_l, '[[',equi_n,1)), y = mean(sapply(traj_l, '[[',equi_n,2)))
#tmean <- data.frame(x = seq(start[,1], end[,1], length.out = 250), y = seq(start[,2], end[,2], length.out = 250))
###
###Create starting 'thread' from mean trajectory points
tmean <- data.frame(matrix(ncol = 2, nrow = 0))
colnames(tmean) <- c("x","y")
for(i in 1:equi_n){
tmean[i,1] = mean(sapply(traj_l, '[[',i,1))
tmean[i,2] = mean(sapply(traj_l, '[[',i,2))
}
###
##Averaging
iter_n <- 0
repeat{
iter_n <- iter_n + 1
for(i in seq_along(tmean$x)){
pmean <- as.numeric(tmean[i,]) ## select each point
nn <- data.frame()
for(j in seq_along(traj_l)){
x <- traj_l[[j]]
x <- matrix(x[x[,2] < pmean[2]+1 & x[,2] > pmean[2]-1,], ncol = 2)
if (length(x) == 0) { x <- traj_l[[j]]}
dist <- NA
for(k in seq_along(x[,1])){
point2 <- as.numeric(x[k,])
dist[k] <- deg.dist(pmean[1], pmean[2], point2[1], point2[2])
}
nearest_neighbour1 <- x[dist == min(dist),]
nn <- rbind(nn,nearest_neighbour1)
}
tmean[i,1] <- mean(nn[,1])
tmean[i,2] <- mean(nn[,2])
}
tmean <- unique(tmean)
tmean <- rbind(start, tmean, end)
dist <- pt2pt.distance(tmean$y, tmean$x) ## dist in m
dist_log <- dist > 25
dist_log[1] <- FALSE
if(iter_n == 100){ ###this will stop repeating after 100 iterations. option 2 is to stop repeating if distinces between tmean are all less than e.g. 25
break
}
tmean1 <- data.frame()
for(i in 1:length(dist_log)){
if(dist_log[i]== T){
newpt <- new_point(tmean[i-1,], tmean[i,], di = norm_vec(tmean[i-1,]- tmean[i,])/2) #new point at midpoint
tmean1 <- rbind(tmean1, newpt) ## order matters
}
tmean1 <- rbind(tmean1, tmean[i,])
}
tmean <- tmean1
}
tmean <- SpatialLines(list(Lines(list(Line(cbind(tmean$x, tmean$y))), "id")))
tmean <- spsample(tmean, equi_n, type = "regular") ## 100 point equally placed along length of line
tmean <- tmean@coords
traj_l <- lapply(traj_l, FUN = data.frame)
for(i in 1:length(traj_l)){
names(traj_l[[i]]) <- c("lon", "lat")
}
ex2 <- c(52,59,65,79)
nn.ex2 <- data.frame()
for(i in ex2){
pmean <- as.numeric(tmean[i,]) ## select each point
nn <- data.frame()
for(j in seq_along(traj_l)){
x <- traj_l[[j]]
x <-x[x[,2] < pmean[2]+1 & x[,2] > pmean[2]-1,]
if (length(x$lon) == 0) { x <- traj_l[[j]]}
dist <- NA
for(k in seq_along(x[,1])){
point2 <- as.numeric(x[k,])
dist[k] <- deg.dist(pmean[1], pmean[2], point2[1], point2[2])
}
nearest_neighbour1 <- x[dist == min(dist),]
nn <- rbind(nn,nearest_neighbour1)
}
names(nn) <- c("x", "y")
nn$mn_seq <- i
nn.ex2 <- rbind(nn, nn.ex2)
}
nn_dist <- NA
for(l in seq_along(traj_l)){
x <- traj_l[[l]]
for(i in seq_along(tmean[,1])){
point <- tmean[i,]
dist <- NA
for(j in seq_along(x[,1])){
point2 <- x[j,]
dist[j] <- deg.dist(point[1], point[2], point2[1,1], point2[1,2])
}
nn_dist[i] <- min(dist)
}
tmean <- cbind(tmean, nn_dist)
}
tmean <- data.frame(tmean)
tmean$within.var <- rowSums(tmean[,3:ncol(tmean)])/(ncol(tmean)-2-1) ##var = sum of (x-x.mn)/n-1. here n = num traj = (ncol - 2(=tmean coords))
tmean <- tmean[,c(1:2,ncol(tmean))]
names(tmean) <- c("mn.lon", "mn.lat", "within.var")
nn.ex2$wvar <- rep(tmean[rev(ex2),]$within.var, each = length(traj_l))
SFd<- ggplot(map.world, aes(long, lat, group = group)) +
geom_polygon(fill = "white", col = "black", size = .25) +
geom_path(data=t0.0, aes(coords.x1, coords.x2, group = NA), size = 1, alpha = .5, linetype = '21') +
geom_path(data = t0.1, aes(coords.x1, coords.x2,group = NA), size = 1, alpha = .5, linetype = '21') +
geom_point(data = t0.0, aes(coords.x1, coords.x2,group = NA), size = 2, shape = 21, fill = "black") +
geom_point(data = t0.1, aes(coords.x1, coords.x2,group = NA), size = 2, shape = 21, fill = "black") +
geom_point(data = nn.ex2, aes(x,y,group = NA, fill = wvar), size = 4, shape = 21) +
geom_path(data = tmean, aes(mn.lon, mn.lat, group = NA), size = 1) +
geom_point(data = tmean, aes(mn.lon, mn.lat, group = NA, fill = within.var), size = 2, shape = 21) +
geom_point(data = tmean[ex2,], aes(mn.lon, mn.lat, group = NA, fill = within.var), size = 4, shape = 21) +
scale_fill_viridis_c(name = "Variance \n (km)") + theme(axis.title = element_blank(), panel.background = element_rect(fill = "gray93"),
panel.grid.major = element_line(colour= "white"), legend.position = c(.68,.33)) +
coord_fixed(xlim = c(-11, -1),
ylim = c(37, 44),ratio = 1.2)
png("FigS3.png", width = 20, height = 16, units = "cm", res = 400)
cowplot::plot_grid(SFa,SFb,SFc,SFd, nrow = 2)
dev.off()
|
/LBBG_plasticity_pub.R
|
no_license
|
jbrow247/LBBG_MigrationPlasticity_Pub
|
R
| false
| false
| 97,831
|
r
|
##Load all packages ----
library(fossil)
library(adehabitatHR)
library(maps)
library(grid)
library(gridExtra)
library(lubridate)
library(tidyverse)
library(rgdal)
library(rworldmap)
library(rgeos)
library(RODBC) ##connect to data base
library(sp)
select <- dplyr::select
##other packages used in this script: cowplot, data.table
##Load all user defined functions.----
norm_vec <- function(x) sqrt(sum(x^2))
new_point <- function(p0, p1, di) { # Finds point in distance di from point p0 in direction of point p1
v = p1 - p0
u = v / norm_vec(v)
return (p0 + u * di)
}
## A function to calculate the distances between consecutive points ##
## Output in meters ##
pt2pt.distance <- function(latitude, longitude, lag = 1){
require(fossil)
distance <- NA
for(i in 2:length(latitude)){
distance[i] <- deg.dist(long1= longitude[i-lag],lat1 = latitude[i-lag], long2 = longitude[i], lat2 = latitude[i] )*1000 }
return(distance)
}
## A function to calculate the time increment between consecutive points ##
## Default output in seconds, other options are "auto", "mins", "hours","days", or "weeks" ##
pt2pt.duration <- function(datetime, output.units='secs'){
duration <- NA
for(i in 2:length(datetime)){
duration[i] <- difftime(datetime[i], datetime[i-1], units=output.units) }
return(duration)
}
## A function to calculate the speed of movement between consecutive points ##
pt2pt.speed <- function(distance, duration){
return(distance/duration)
}
###A function that finds the closest points to a regular (e.g. hourly) time series
trackSubSamp = function(df, int=1,unit='hours')
{
id.i = unique(df$id)
n.id = length(id.i)
df.sub = list(n.id)
timestep = paste(int,unit)
# breakdown to datasets per bird
for (i in 1:n.id)
{
df.i = df[df$id==id.i[i],]
dat.seq <- seq(from = min(df.i$time, na.rm=T), to = max(df.i$time, na.rm=T), by = timestep)
id.sub = sapply(dat.seq, function(x) which.min(abs(difftime(df.i$time, x, units='mins')))) #find gps points minimizing distance to each ts in dat.seq
df.sub[[i]] = unique(df.i[id.sub,])
# the function unique makes sure that the rows in df.i[idx,] are unique - so no duplicate points
}
df.sub <- data.table::rbindlist(df.sub)
}
#
# ##Same as trackSubSamp, but usable in a loop
# trackSubSamp.birdyear.loop = function(id.df,int=1,unit='hours'){
# timestep = paste(int,unit)
# dat.seq <- seq(from = min(id.df$ts, na.rm=T), to = max(id.df$ts, na.rm=T), by = timestep)
# id.sub = sapply(dat.seq, function(x) which.min(abs(difftime(df.i$ts, x, units='secs')))) #find gps points minimizing distance to each ts in dat.seq
#
# df.sub[[i]] = unique(df.i[id.sub,])
# # the function unique makes sure that the rows in Dtemp[idx,] are unique - so no duplicate points
# return(df.sub)
# }
#laea.proj <- "+proj=laea +lat_0=34.9 +lon_0=-4.16 +x_0=4321000 +y_0=3210000 +towgs84=0,0,0,0,0,0,0 +units=m"
laea.proj <- "+proj=laea +lat_0=52 +lon_0=10 +x_0=4321000 +y_0=3210000 +towgs84=0,0,0,0,0,0,0 +units=m"
##defult project projection is lambert equal-area projection, which is centered on the spatialy center of the data (midpoint between most extreme values). This projection uses meters and is recommended for statistic analysis.
mkSpatial = function(df, CRS.in = "+init=epsg:4326",
CRS.out = laea.proj) {
library(sp)
library(rgdal)
df$all = rep('all',nrow(df))
df = SpatialPointsDataFrame(coords = cbind(df$lon,df$lat), data = df[,c('id','all','time', 'lon', 'lat', 'birdyear')],
proj4string = CRS(CRS.in)) # the data is in WSG84 lat-lon
df = spTransform(df, CRS(CRS.out))
}
mkgrid <- function(lat=NULL, lon = NULL, resolution=10000, buffer = 100000,
projection = laea.proj ){
#lat is a vector of latitudes
#lon is a vector of longitudes
#resolution is the cell size of the raster.
#1 = ~ 133X165 km
#.1 = ~ 13.9 x 17.1 km
#.01 ~ 1.39 x 1.72 km .... in WSG84
#buffer adds space beyond the points in the grid. a buffer of 1 should equal ~ 111 km
#projection: projection of the data.
##projection = laea, measured in m
#resolution 1 000 = 1 km2...
xmax <- max(lon)
xmin <- min(lon)
ymax <- max(lat)
ymin <- min(lat)
extent<- matrix(c(xmin - buffer, xmax + buffer, ymin - buffer, ymax + buffer), ncol=2)
extent <- SpatialPoints(coords = extent, proj4string = CRS(projection)) # the data is in WSG84 lat-lon
rast<- ascgen(extent, cellsize=resolution)
}
# ## Cleaning ----
gull <- readRDS("multitrack_gull_clean.RDS") ##running the above results in different daily.by??
gull <- gull %>% group_by(device_info_serial) %>%
mutate(birdyear = year(date_time-days(152)) - year(min(date_time)), ## a bird year starts on year day 152 (usually jun1, except leap years),
id_birdyear = paste(device_info_serial, birdyear, sep = ".")) %>%
filter(birdyear >=0 & date_time < ymd(paste0(year(max(date_time)),'-06-01'))) %>% #remove points in last year following jun 1 that don't contain migration/winter points
ungroup()
gull <- select(gull, id = device_info_serial, birdyear, id_birdyear, time = date_time, lat = latitude, lon = longitude, alt = altitude,
start_date, start_lat = start_latitude, start_lon = start_longitude, mass, sex, dur, dist, speed.pts,
col = key_name) %>% mutate(year = year(time))
original.n <- c(length(unique(gull$id_birdyear)), length(unique(gull$id)))
gull <- filter(gull, !(id == 757)) ## changed colonies!
gull <- filter(gull, !(id == 4023)) ## Doesn't migrate!
gull <- filter(gull, !(id_birdyear %in% c("325.1", "497.1","540.2"))) ## no arrival to colony (gps didn't finish downloading )
gull$col <- factor(gull$col)
gull.meta <- gull %>% select(id, start_date, start_lat, start_lon, mass, sex, col) %>% distinct()
gull <- gull %>% select(-mass, -sex, -col)
##Calculate distance to colony
gull <- gull %>% mutate(d2_col = deg.dist(lon, lat, start_lon, start_lat))
#Find first (=arrival time) and last (=departure time) visit to colony in a bird year
arrival.date <- gull %>% filter(year != year(start_date) & #no arrival in first year
d2_col <= 10) %>% ##select points within the 10k buffer of colony
group_by(id, year) %>%
summarise(col.arrival = min(time)) %>% ungroup() ##Find the first point within 10k of colony
departure.date <- gull %>% group_by(id) %>% mutate(end.year = max(year)) %>%
ungroup() %>%
filter(year != end.year &
d2_col <= 10) %>% ##select points within the 10k buffer of colony
group_by(id, year) %>%
summarise(col.depart = max(time)) %>% ungroup() ##Find the first point within 10k of colony
##join with gull to re-specify birdyear
arrival.date <- gull.meta %>% select(id, start_date) %>% full_join(arrival.date) %>%
mutate(birdyear = year(col.arrival) - year(start_date) -1) %>% ## -1 because bird year of arrival corresponds to previous years departure
select(id, col.arrival, birdyear)
departure.date <- gull.meta %>% select(id, start_date) %>% full_join(departure.date) %>%
mutate(birdyear = year(col.depart) - year(start_date)) %>% select(id, col.depart, birdyear)
mig.date <- full_join(arrival.date, departure.date) %>% group_by(id) %>%
arrange(birdyear) %>%
mutate(year.start = (col.depart - lag(col.arrival))/2 + lag(col.arrival)) %>%
left_join(gull.meta) %>%
mutate(year.start = as.POSIXct(ifelse(birdyear == 0, start_date, year.start),
origin = ymd("1970-01-01"))) %>%
select(-col, -start_date, - start_lat, -start_lon) %>%
mutate(year.end = lead(year.start)) %>%
mutate(year.end = as.POSIXct(ifelse(birdyear == max(birdyear), col.arrival, year.end),
origin = ymd("1970-01-01"))) %>% ungroup() %>%
filter(!is.na(year.start) & !is.na(year.end)) ##remove birds with missing departures/arrivals (e.g. year long gaps, no return data, etc. )
gull <- gull %>% select(-start_lon, -start_lat, -birdyear, -id_birdyear, -start_date)
gull <- gull %>% right_join(mig.date) %>%
filter(time <= year.end & time >= year.start) %>%
mutate(id_birdyear = paste(id, birdyear, sep = ".")) %>%
select(-year.start, -year.end)
nopair <- names(table((gull %>% select(id, birdyear) %>% distinct())$id))[which(table((gull %>% select(id, birdyear) %>% distinct())$id)<2)]
gull <- filter(gull, ! id %in% nopair)
nonbreed <- gull %>%
filter((time >= floor_date(col.depart, unit = "days")) &
(time <= ceiling_date(col.arrival, unit = "days"))) %>% ##use rounded start and end dates to ensure arrival points are included in data. This is important for identifying gaps at the end of the year
select(-col.depart, -col.arrival)
gull <- select(gull, -col.arrival, -col.depart)
rm(gull, mig.date, nopair)
## gull.glm, birdyear summary info----
tmp <- full_join(departure.date, arrival.date)
gull.glm <- left_join(gull.meta, tmp) %>% rename(start = start_date)
rm(gull.meta)
## KDEs, daily ----
#subsample data
nonbreed.d <- as.data.frame(trackSubSamp(nonbreed,12)) ## 2 points per day
## point2point
nonbreed.d <- nonbreed.d %>% group_by(id_birdyear) %>%
mutate(dur = pt2pt.duration(time), dist = pt2pt.distance(lat, lon), speed.pts = pt2pt.speed(dist, dur)) %>%
ungroup()
##daily id
##Find birdyears with 1 pt per day
daily.by <- nonbreed %>% group_by(id, birdyear) %>%
summarise(max.gap = max(dur, na.rm=T)/3600) %>%
filter(max.gap <= 24) ##131 birdyears with no gaps!!
id.v <- unique(daily.by$id)
#Conver data to spatial points df
snonbreed.d <- mkSpatial(nonbreed.d)
#create grid
snonbreed.pt <- as.data.frame(snonbreed.d@coords)
ud.grid <- mkgrid(lat=snonbreed.pt$coords.x2,
lon = snonbreed.pt$coords.x1,
resolution = 10000,
buffer = 450000,
projection = laea.proj)
# get map
map <- getMap("coarse")
laea.map <- spTransform(map, CRS(laea.proj))
laea.map <- fortify(laea.map)
map <- fortify(map)
##KDEs for ids with daily points
##calculate utilization distribution for each individual/year
multi.yr <- lapply(1:length(id.v), function(i) {
id.d <- filter(nonbreed.d, id == id.v[i]) ## get all points for one id
sid <- mkSpatial(id.d) ##turn into spatial df
ud <- kernelUD(sid['birdyear'], h = 100000, grid = ud.grid)}) ##calculate UD per birdyear (sid)
## get 50% UD
multi.yr.50 <- lapply(1:length(id.v), function(i){
name <- paste("ver50", id.v[i], sep = ".")
assign(name, getverticeshr(multi.yr[[i]], 50))
})
#Split 50% core area into seperate polygons
multi.yr.50p <- lapply(1:length(multi.yr.50), function(i) {
p <- disaggregate(multi.yr.50[[i]])
p@data$birdyear <- p@data$id #clarify variable name
p@data$id <- rownames(p@data) #create id variable of rownames so data can be merged with fortified polygon
p
})
## Gap: determin threshold and remove ----
##Filter points outside of any core area
#or breeding colony-
#this is so when a bird moves outside of the core area polygon
#(e.g. long central placed foraging trip),
#and returns to the same polygon, it does not count as a new visit to this core area.
nonbreed.core <- data.frame()
for (i in 1:length(daily.by$id)) {
id.n <- daily.by[i,]$id
by <- daily.by[i,]$birdyear
pts <- nonbreed.d[nonbreed.d$id_birdyear %in% paste(id.n, by, sep = "."),]
pol <- multi.yr.50p[[which(id.n == id.v)]]
s.pts <- mkSpatial(pts)
pol.y <- pol[pol@data$birdyear == by,]
overlap <- s.pts[pol.y,] ##id and year matched
overlap@data$poly <- over(overlap, pol.y)$id #which polygon it overlapped with
r.le <- rle(overlap@data$poly) #number of points in each polygon per each temporal visit
overlap@data$p.visit <- rep(seq(1:length(r.le$lengths)), r.le$lengths) #each temporally distinct visit to a polygon has a unique id (e.g. if moved from poly 1, to poly 2, then back to poly 1, the corresponding p.visits will be 1, 2, and 3 )
nonbreed.core <- rbind(nonbreed.core, overlap@data) ##overlap points from one id
}
## Select first and last point in each new polygon
core.ee <- nonbreed.core %>%
group_by(id, birdyear, poly, p.visit) %>%
mutate(ee = ifelse(time == min(time), "en", ifelse (time == max(time), "ex", NA))) %>% ## a few p.visits only have 1 point in their last p visit (considered entrance). This helps filter the right points to calculate gap length
filter(time == min(time) | time == max(time)) %>%
ungroup()
##Select birdyears with 1 pt per day
poly.dur <- core.ee %>% select(id, time, ee, poly, p.visit, birdyear) %>% spread(ee, time) %>%
mutate(dur = difftime(ex,en, units = "days")) %>% group_by(id, birdyear, poly) %>%
summarise(total_dur = as.numeric(sum(dur, na.rm = T)))
poly.dur %>% arrange(total_dur) ##shortest duration = 21 days (one of zero because migrated through fall poly in spring)
#ids with gap > 21 days
id.w.gap <- filter(nonbreed.d, dur >= 3600*24*21) %>% select(id_birdyear) %>%
distinct() %>% pull(id_birdyear)
## remove ids with gap, and ids that were then left without a pair
nonbreed.nogap.d <- filter(nonbreed.d, !(id_birdyear %in% id.w.gap))
pair.miss <- nonbreed.nogap.d %>% select(id, birdyear) %>% distinct() %>% group_by(id) %>% summarise(n=n()) %>% filter(n==1) %>% pull(id) #an additional 20 rows lost
nonbreed.nogap.d <- filter(nonbreed.nogap.d, ! (id %in% pair.miss)) ## 84 birds remain, with 237 years
id.v <- unique(nonbreed.nogap.d$id)
nonbreed.nogap <- filter(nonbreed, !(id_birdyear %in% id.w.gap))
pair.miss <- nonbreed.nogap %>% select(id, birdyear) %>% distinct() %>% group_by(id) %>% summarise(n=n()) %>% filter(n==1) %>% pull(id) #an additional 20 rows lost
nonbreed.nogap <- filter(nonbreed.nogap, ! (id %in% pair.miss)) ## 84 birds remain, with 237 years
saveRDS(nonbreed, "nonbreed.RDS")
rm(nonbreed, nonbreed.d)
## KDEs - no b.y. with gap ----
## create new KDE list
nogap_multi.yr<- lapply(1:length(id.v), function(i) {
id.h <- filter(nonbreed.nogap.d, id == id.v[i])
sid <- mkSpatial(id.h)
ud <- kernelUD(sid['birdyear'], h = 100000, grid = ud.grid)})
names(nogap_multi.yr) <- id.v
nogap_multi.yr.50 <- lapply(1:length(id.v), function(i){
name <- paste("ver50", id.v[i], sep = ".")
assign(name, getverticeshr(nogap_multi.yr[[i]], 50))
}) ##core areas
nogap_multi.yr.50p <- lapply(1:length(nogap_multi.yr.50), function(i) {
p <- disaggregate(nogap_multi.yr.50[[i]])
p@data$birdyear <- p@data$id #clarify variable name
p@data$id <- rownames(p@data) #create id variable of rownames so data can be merged with fortified polygon
p
}) #Split 50% core area into seperate polygons
##nonbreeding Overlap----
##BA overlap of 95% KDE
UD_overlap <- lapply(1:length(nogap_multi.yr), function(i){
kerneloverlaphr(nogap_multi.yr[[i]], meth = "BA", percent = 95, conditional = T)
})
UD_overlap_range <- data.frame(matrix(ncol = 2, nrow = length(UD_overlap)))
for(i in 1:length(UD_overlap)){
overlap <- UD_overlap[[i]]
for(j in 1:length(overlap[1,])){ ###remove overlap within same years
overlap[j,j] <- NA
}
range <- range(overlap, na.rm = T)
UD_overlap_range[i,] <- range
}
colnames(UD_overlap_range) <- c("min_overlap", "max_overlap")
UD_overlap_range$id <- names(nogap_multi.yr) ##order in loop
## max is 0.95 (overlap with)
UD_overlap_mn <- data.frame(matrix(ncol = 1, nrow = length(UD_overlap)))
for(i in 1:length(UD_overlap)){
overlap <- UD_overlap[[i]]
for(j in 1:length(overlap[1,])){ ###remove overlap within same years
overlap[j,j] <- NA
}
mean.overlap <- mean(overlap, na.rm = T)
UD_overlap_mn[i,] <- mean.overlap
}
colnames(UD_overlap_mn) <- c("mn_overlap")
UD_overlap_mn$id <- names(nogap_multi.yr) ##order in loop
## Gull.glm, add overlap ----
UD_overlap_range <- left_join(UD_overlap_range, UD_overlap_mn)
UD_overlap_range$id <- as.numeric(UD_overlap_range$id)
gull.glm <- left_join(gull.glm, UD_overlap_range)
## Seperate Core Areas ----
## convert polygons into a dataframe for plotting with ggplot
nogap_multi.yr.50p <- lapply(nogap_multi.yr.50p,
function(x) spTransform(x, CRS("+init=epsg:4326")))
nogap_multi.yr.50df <- lapply(1:length(nogap_multi.yr.50p), function(i) {
df <- fortify(nogap_multi.yr.50p[[i]]) ##convert to dataframe
merge(df, nogap_multi.yr.50p[[i]]@data, by = "id") #add original data
}) ## convert polygons into a dataframe for plotting with ggplot, each id is one element of list.
nogap_multi.yr.50df <- mapply(cbind, nogap_multi.yr.50df, "bird_id"=id.v, SIMPLIFY=F)
##Fragmented winter polygons
ggplot(map, aes(long, lat, group = group)) + geom_polygon(fill = "white", col = "black") +
geom_polygon(data = filter(nogap_multi.yr.50df[[which(id.v == 395)]], birdyear == 0), aes(col = birdyear, fill = id, group = id), alpha = .3) + ##birdyear = year, id = polygon number
coord_fixed(xlim = c(-21, 17.8),
ylim = c(10, 58.1))
ggplot(map, aes(long, lat, group = group)) + geom_polygon(fill = "white", col = "black") +
geom_polygon(data = filter(nogap_multi.yr.50df[[which(id.v == 5296)]], birdyear == 0), aes(col = id, fill = id, group = id), alpha = .3) + ##birdyear = year, id = polygon number
coord_fixed(xlim = c(-21, 17.8),
ylim = c(10, 58.1))
ggplot(map, aes(long, lat, group = group)) + geom_polygon(fill = "white", col = "black") +
geom_polygon(data = filter(nogap_multi.yr.50df[[which(id.v == 5296)]], birdyear == 1), aes(col = id, fill = id, group = id), alpha = .3) + ##birdyear = year, id = polygon number
coord_fixed(xlim = c(-21, 17.8),
ylim = c(10, 58.1))
ggplot(map, aes(long, lat, group = group)) + geom_polygon(fill = "white", col = "black") +
geom_polygon(data = filter(nogap_multi.yr.50df[[which(id.v == 5335)]], birdyear == 1), aes(col = id, fill = id, group = id), alpha = .3) + ##birdyear = year, id = polygon number
coord_fixed(xlim = c(-21, 17.8),
ylim = c(10, 58.1))
ggplot(map, aes(long, lat, group = group)) + geom_polygon(fill = "white", col = "black") +
geom_polygon(data = filter(nogap_multi.yr.50df[[which(id.v == 5535)]], birdyear == 1), aes(col = id, fill = id, group = id), alpha = .3) + ##birdyear = year, id = polygon number
coord_fixed(xlim = c(-21, 17.8),
ylim = c(10, 58.1))
# The following have fragmented polygons in 1 year that are joined in others. These years need to be merged:
#
# 395.0: p1 + p2
# 5296.0: p1 + p2 + p3
# 5296.1: p5 + p6
# 5335.1: p3 + p4
# 5535.1: p3 + p4
##fragmented WA poly
nogap_multi.yr.50df[[which(id.v == 395)]][nogap_multi.yr.50df[[which(id.v == 395)]]$id == 2,]$id <- 1
nogap_multi.yr.50df[[which(id.v == 5296)]][nogap_multi.yr.50df[[which(id.v == 5296)]]$id %in% c(2,3),]$id <- 1
nogap_multi.yr.50df[[which(id.v == 5296)]][nogap_multi.yr.50df[[which(id.v == 5296)]]$id == 6,]$id <- 5
nogap_multi.yr.50df[[which(id.v == 5335)]][nogap_multi.yr.50df[[which(id.v == 5335)]]$id == 4,]$id <- 3
nogap_multi.yr.50df[[which(id.v == 5535)]][nogap_multi.yr.50df[[which(id.v == 5535)]]$id == 4,]$id <- 3
###Create 1 dataframe for all polygons
poly.df <- lapply(1:length(id.v), function(i){
df <- nogap_multi.yr.50df[[i]]
df <- df %>% select( polygon = id,id = bird_id, birdyear, lat, lon = long, order)
})
poly.df <- data.table::rbindlist(poly.df)
poly.df$uniq.p <- paste(poly.df$id, poly.df$birdyear, poly.df$polygon, sep = ".")
##create a list of polygons per core area (instead of a list per year)
poly.p <- lapply(split(poly.df[, c("lat","lon")], poly.df[, "uniq.p"]), Polygon)
##Find centroid of each core area
centroid <- lapply(1:length(poly.p), function(i){
lat <- poly.p[[i]]@labpt[1]
lon <- poly.p[[i]]@labpt[2]
uniq.p <- split(poly.df[, 7], poly.df[, 7])[[i]][1]
df <- data.frame(lat, lon, uniq.p)
df
})
centroid <- data.table::rbindlist(centroid)
tmp <- poly.df %>% select(-lat, -lon, -order) %>% distinct()
centroid <- left_join(centroid, tmp)
#centroid$birdyear <- as.numeric(centroid$birdyear) - 1 #birdyear was renumber starting at 1 instead of 0, so change back to join
centroid <- gull.glm %>% select(id, start_lat, start_lon) %>% distinct() %>% right_join(centroid)
## Define Winter area ----
## distance between colony and polygon centroid
centroid <- mutate(centroid, mig.dist = deg.dist(lon, lat, start_lon, start_lat))
#Calculate enter and exit times per polygon
##select only points in the polygons
gc()
remove(poly.pts)
poly.p <- poly.p[centroid$uniq.p] ##orders poly.p
poly.pts <- data.frame()
for (i in 1:length(centroid$uniq.p)) { ##find points within each core area
ID <- centroid[i,]$id
by <- centroid[i,]$birdyear
p <- centroid[i,]$uniq.p
pts <- nonbreed.nogap[nonbreed.nogap$id == ID & nonbreed.nogap$birdyear == by,] ## select points for the right id and bird year
pts <- select(pts, id, time, lat, lon, birdyear, dur)
pol <- poly.p[[p]] ## select the core area polygon
overlap.log <- point.in.polygon(pts$lon, pts$lat, pol@coords[,2], pol@coords[,1]) ## Find points overlaping the polygon
overlap.log <- overlap.log == 1 ##1 = overlaping
overlap <- pts[overlap.log,] ##overlapping points from the furthest polygon in i individual birdyear
overlap$poly <- p
poly.pts <- rbind(poly.pts, overlap) ####list of all points in polygons.
}
poly.pts <- ungroup(poly.pts)
poly.pts <- poly.pts %>% group_by(id) %>% arrange(time, .by_group = TRUE) %>% ungroup() #order points by time
poly.pts$p.index <- rep(1:length(rle(poly.pts$poly)$lengths), rle(poly.pts$poly)$lengths)
#Find first and last point in a polygon
en.ex <- poly.pts %>% group_by(id, birdyear, poly, p.index) %>% summarise(enter = min(time), exit = max(time))
en.ex$time.in.poly <- difftime(en.ex$exit, en.ex$enter, units = "days") ## Find duration in polygon
#
# Selection of wintering area:
# duration during the winter
winter.time.poly <- en.ex %>% mutate(enter = if_else(yday(enter) > 335 | yday(enter) < 90, yday(enter),
if_else(yday(enter) > 152, 335, 90)),
exit = if_else(yday(exit) > 335 | yday(exit) < 90, yday(exit),
if_else(yday(exit) > 152, 335, 90))) %>%
mutate(enter = ifelse(enter < 152, enter+365, enter), exit = ifelse(exit < 152, exit +365, exit), wtime = exit-enter) %>% group_by(id, birdyear)%>% filter(wtime == max(wtime))
winter.poly.v <- winter.time.poly$poly
##89 days is the maximum possible duration duration (number of day from Dec 1 - Mar 1)
## furthest polygon.
far_poly <- centroid %>% #in km
group_by(id, birdyear) %>% filter(mig.dist == max(mig.dist)) %>%
ungroup() %>% select(id, birdyear, polygon, mig.dist, uniq.p)
far.poly.v <- far_poly$uniq.p
#was the primary used between December - March in all but 2 cases (from same individual).
#table(far.poly.v %in% winter.poly.v)
## Based on maps, the resuls of time during winter make more sense - individual spent autumn on coast of portugal (furthest poly), moving to central spain for the winter months.
## Select centroid from polygons in winter.poly.v, to use as migration distance
## Migration distance ----
winter_poly <- centroid %>% group_by(id, birdyear) %>% filter(uniq.p %in% winter.poly.v) %>%
ungroup() %>% select(id, birdyear, polygon, mig.dist, uniq.p)
##Winter arrival & Departure ----
## Select polygon of wintering area
winter.poly.p<- poly.p[winter_poly$uniq.p] ##In most cases, two polygons in a similar region, so no change in migration distance (but see 4032.1).
##select only points in wintering area polygon in each year
remove(winter.poly.pts)
winter.poly.pts <- data.frame()
nonbreed.nogap.d <- ungroup(nonbreed.nogap.d)
for (i in 1:length(winter_poly$id)) {
ID <- winter_poly[i,]$id
by <- winter_poly[i,]$birdyear
pts <- nonbreed.nogap.d[nonbreed.nogap.d$id == ID & nonbreed.nogap.d$birdyear == by,]
pol <- winter.poly.p[[i]]
overlap.log <- point.in.polygon(pts$lon, pts$lat, pol@coords[,2], pol@coords[,1])
overlap.log <- overlap.log == 1
overlap <- pts[overlap.log,] ##overlapping points from the furthest polygon in i individual birdyear
winter.poly.pts <- rbind(winter.poly.pts, overlap)
}
## Select first and last point in each winter area (times at which a polygon was either entered or exited)
winter.poly.ee <- winter.poly.pts %>%
group_by(id, birdyear) %>%
mutate(ee = ifelse(time == min(time), "en", ifelse (time == max(time), "ex", NA))) %>%
filter(!is.na(ee)) %>%
ungroup()
##winter departure = last point in winter area
ex <- filter(winter.poly.ee, ee == "ex") %>% select(id, birdyear, winter.depart = time)
##Winter arrival = first point in winter area
en <- filter(winter.poly.ee, ee == "en") %>% select(id, birdyear, winter.arrive = time)
winter_poly$birdyear <- as.numeric(as.character(winter_poly$birdyear))
winter_poly<- left_join(winter_poly, ex) %>% left_join(en)
tmp <- winter_poly %>% select(id, birdyear, mig.dist, winter.arrive, winter.depart)
## Gull.glm, add mig.dist & winter time ----
gull.glm <- left_join(gull.glm, tmp)
###more obs in gull.glm than in winter_poly because data with gaps can still be used in analysis of arrival and departure from colony (if mig.dist not included as a fixed factor) ##
## remove arrivals and departures that occur during gap ----
gap <- nonbreed.nogap %>% group_by(id, birdyear) %>% mutate(start.gap = lag(time)) %>% ungroup() %>%
filter(dur > 60*60*24*2) %>% select(id, birdyear, start.gap, end.gap = time)
gap <- left_join(gap, select(gull.glm, id, birdyear, winter.arrive, winter.depart, col.depart, col.arrival))
filter(gap, winter.arrive >= start.gap & winter.arrive <= end.gap)
filter(gap, winter.depart >= start.gap & winter.depart <= end.gap)
filter(gap, col.depart >= start.gap & col.depart <= end.gap)
filter(gap, col.arrival >= start.gap & col.arrival <= end.gap)
##5215 by 0 col.depart, 5554 by 0 winter.arrive, 459 by 1 winter arrive
#removal done in .rmd
## Trajectory df ----
#For trajectories: we want all 'travel points' between colony and wintering area (autumn and spring).
#Core areas enroute will be replaced with their polygon centroid so that the trajectory is smoothed through these areas.
id.v <- unique(nonbreed.nogap.d$id)
###want to use original polygons (not combined WA), because joins are otherwise wonky
nogap_multi.yr.50df.noagg <- lapply(1:length(nogap_multi.yr.50p), function(i) {
df <- fortify(nogap_multi.yr.50p[[i]]) ##convert to dataframe
merge(df, nogap_multi.yr.50p[[i]]@data, by = "id") #add original data
}) ## convert polygons into a dataframe for plotting with ggplot, each id is one element of list.
nogap_multi.yr.50df.noagg <- mapply(cbind, nogap_multi.yr.50df.noagg, "bird_id"=id.v, SIMPLIFY=F)
###Create 1 dataframe for all polygons
poly.df.noagg <- lapply(1:length(id.v), function(i){
df <- nogap_multi.yr.50df.noagg[[i]]
df <- df %>% select( polygon = id,id = bird_id, birdyear, lat, lon = long, order)
})
poly.df.noagg <- data.table::rbindlist(poly.df.noagg)
poly.df.noagg$uniq.p <- paste(poly.df.noagg$id, poly.df.noagg$birdyear, poly.df.noagg$polygon, sep = ".")
poly.p.noagg <- lapply(split(poly.df.noagg[, c("lat","lon")], poly.df.noagg[, "uniq.p"]), Polygon)
centroid.noagg <- lapply(1:length(poly.p.noagg), function(i){
lat <- poly.p.noagg[[i]]@labpt[1]
lon <- poly.p.noagg[[i]]@labpt[2]
uniq.p <- split(poly.df.noagg[, 7], poly.df.noagg[, 7])[[i]][1]
df <- data.frame(lat, lon, uniq.p)
df
})
centroid.noagg <- data.table::rbindlist(centroid.noagg)
tmp <- poly.df.noagg %>% select(-lat, -lon, -order) %>% distinct()
centroid.noagg <- left_join(centroid.noagg, tmp)
centroid.noagg$birdyear <- as.numeric(centroid.noagg$birdyear) #- 1 #birdyear was re-numbered starting at 1 instead of 0, so change back to join
##select only points in the polygons
nonbreed.nogap <- ungroup(nonbreed.nogap) ### use non subsampled for duration calculation.
gc()
remove(poly.pts.noagg)
poly.p.noagg <- poly.p.noagg[centroid.noagg$uniq.p] ##orders poly.p.noagg
poly.pts.noagg<- data.frame()
for (i in 1:length(centroid.noagg$uniq.p)) {
ID <- centroid.noagg[i,]$id
by <- centroid.noagg[i,]$birdyear
p <- centroid.noagg[i,]$uniq.p
pts <- nonbreed.nogap[nonbreed.nogap$id == ID & nonbreed.nogap$birdyear == by,]
pts <- select(pts, id, time, lat, lon, birdyear, dur)
pol <- poly.p.noagg[[i]]
overlap.log <- point.in.polygon(pts$lon, pts$lat, pol@coords[,2], pol@coords[,1])
overlap.log <- overlap.log == 1
overlap <- pts[overlap.log,] ##overlapping points from the furthest polygon in i individual birdyear
overlap$poly <- p
poly.pts.noagg <- rbind(poly.pts.noagg, overlap) ####list of all points in polygons.
}
poly.pts.noagg <- ungroup(poly.pts.noagg)
poly.pts.noagg <- poly.pts.noagg %>% group_by(id) %>% arrange(time, .by_group = TRUE) %>% ungroup()
poly.pts.noagg$p.index <- rep(1:length(rle(poly.pts.noagg$poly)$lengths), rle(poly.pts.noagg$poly)$lengths)
#find first and last point in core area
en.ex.noagg <- poly.pts.noagg %>% group_by(id, birdyear, poly, p.index) %>% summarise(enter = min(time), exit = max(time))
en.ex.noagg$time.in.poly <- difftime(en.ex.noagg$exit, en.ex.noagg$enter, units = "days")
##Replace points during time bird was in core area, replace with centroid.
#this stops distance from accumulating during central point foraging, as well as cleans up the actual migratory track.
gull.glm$id_birdyear <- paste(gull.glm$id, gull.glm$birdyear, sep = ".")
smooth_pts <- filter(nonbreed.nogap, id_birdyear %in% gull.glm$id_birdyear) %>% ##hourly
full_join(en.ex.noagg) %>%
filter(time > enter & time < exit) %>%
select(id, birdyear, time, poly)
tmp <- centroid.noagg %>% select(c.lat = lat, c.lon = lon, uniq.p)
nonbreed.UDcentroid <- left_join(nonbreed.nogap, smooth_pts) %>% ##DF of points, core areas replaced by centroid
left_join(tmp, by = c("poly" = "uniq.p")) %>%
mutate(lat = if_else(is.na(poly), lat, c.lat),
lon = if_else(is.na(poly), lon, c.lon))
##Find point between colony exit and winter entrance (autumn), and winter exit and colony entrance (spring)
all_traj <- nonbreed.UDcentroid %>% left_join(gull.glm) %>%
mutate(direction = if_else(time >= col.depart & time <= winter.arrive, "Autumn",
if_else(time >= winter.depart & time <= col.arrival, "Spring", "NA"))) %>%
filter(!direction=="NA") %>% arrange(time) %>%
select(id, lon, lat, birdyear, id_birdyear, direction, time, poly) %>%
distinct()
##Only use trajectories with 1 point per day during travel periods (outside of core areas)
all_traj <- all_traj %>% group_by(id_birdyear, direction) %>%
mutate(dur = as.numeric(as.character(difftime(time, lag(time), units = "secs")))) %>% ungroup()
id.w.gap <- all_traj %>% filter(is.na(poly) & dur >= 3600*24) %>% select(id_birdyear, direction) %>% #points outside polygons have no polygon name (NA)
distinct() %>% mutate(gap = T) #if there is a dur > 24 h, lable this traj with gap = T
all_traj <- all_traj %>% left_join(id.w.gap) %>% filter(is.na(gap)) %>% select(-gap) #remove points from traj where gap = T
pair.miss <- all_traj %>% select(id, birdyear,direction) %>% distinct() %>% ## Find traj left with no pair
group_by(id, direction) %>% summarise(n=n()) %>% filter(n==1) %>%
select(id, direction) %>% mutate(gap = T) #an additional 18 rows lost
all_traj <- all_traj %>% left_join(pair.miss) %>% filter(is.na(gap)) %>% select(-gap) # Remove traj with no pair
saveRDS(all_traj, "all_traj.RDS")
## N core areas & core area overlap ----
##core areas with no overlap
### find winter areas with no overlap
##Fragmented winter area p
nogap_multi.yr.50p[[which(id.v == 395)]]@data[2,]$id <- "1"
nogap_multi.yr.50p[[which(id.v == 5296)]]@data[c(2,3),]$id <- "1"
nogap_multi.yr.50p[[which(id.v == 5296)]]@data[6,]$id <- "5"
nogap_multi.yr.50p[[which(id.v == 5335)]]@data[4,]$id <- "3"
nogap_multi.yr.50p[[which(id.v == 5535)]]@data[4,]$id <- "3"
no.wa.overlap <- vector()
for(i in 1:length(nogap_multi.yr.50p)){
id.i <- id.v[i] #vector of bird ids
sp <- nogap_multi.yr.50p[[i]] ## spatial polygon list of all core areas for a bird id
p <- nogap_multi.yr.50p[[i]]@data ### all core areas across years
p$poly <- paste(rep(id.i, times = length(p$id)) , p$birdyear, p$id, sep = ".")
wa.id <- filter(winter.time.poly, id == id.i) %>% pull(poly) ## list of wa polys for that id
for(j in 1:length(wa.id)){ ##for each wa polygon
p1 <- sp[which(p$poly == wa.id[j]),]
other.wa <- wa.id[which(wa.id != wa.id[j])]
no.overlap.v <- logical(length(other.wa))
for(k in 1:length(other.wa)){ ## see if it overlaps with other wa polygons
p2 <-sp[which(p$poly == other.wa[k]),]
int <- gIntersection(p1,p2)
no.overlap.v[k] <- is.null(int)
## if there is 1 T, then wa in 1 yer doesn't overlap with wa in another year
}
if(!all(!no.overlap.v)){ ## if there is 1 or more True values (i.e. a wa that doesn't overlap with a wa in another year)
no.wa.overlap <- rbind(no.wa.overlap, id.i)
}
}
}
no.wa.overlap <- unique(no.wa.overlap)
###534 spent year 3 in uk only
ggplot(map, aes(long, lat, group = group)) + geom_polygon(fill = "white", col = "black") +
geom_polygon(data = filter(nogap_multi.yr.50df[[which(id.v == 606)]], birdyear == 1), aes(col = id, fill = id, group = id), alpha = .3) + ##birdyear = year, id = polygon number
coord_fixed(xlim = c(-21, 17.8),
ylim = c(10, 58.1))
no.so.overlap <- data.frame()
for (i in 1:length(nogap_multi.yr.50p)){
id.i <- id.v[i] #vector of bird ids
sp <- nogap_multi.yr.50p[[i]] ## spatial polygon list of all core areas for a bird id
p <- nogap_multi.yr.50p[[i]]@data ### all core areas across years
p$poly <- paste(rep(id.i, times = length(p$id)) , p$birdyear, p$id, sep = ".")
wa.id <- filter(winter.time.poly, id == id.i) %>% pull(poly) ## list of wa polys for that id
so.poly <- p[which(!(p$poly %in% wa.id)),]
if(length(so.poly$id)==0) next ## if no stopovers, skip to next id
for(j in 1:length(so.poly$id)){
p.id <- so.poly[j,1]
by <- so.poly[j,3]
## list all polygons in different years
id.pair <- as.numeric(filter(p, birdyear != by) %>% pull(id))
no.overlap.v <- logical(length(id.pair))
p1<-sp[sp$id == p.id,]
for(k in 1:length(id.pair)){
p2<-sp[sp$id == id.pair[k],]
int <- gIntersection(p1,p2)
no.overlap.v[k] <- is.null(int)
}
if(all(no.overlap.v)){
x<-data.frame(id.i, by, p.id)
no.so.overlap <- rbind(no.so.overlap, x) ##core area with no overlap in any year.
}
}
}
no.overlap <- select(no.so.overlap, id=id.i, birdyear=by) %>% distinct() %>% mutate(p.no.overlap = T)
## how many polygons per birdyear?
remove(n.winter.area)
n.winter.area <- data.frame()
for (i in 1:length(nogap_multi.yr.50p)){
year <- unique(nogap_multi.yr.50p[[i]]@data$birdyear) #vector of year ids
id <- id.v[i] #vector of bird ids
p <- nogap_multi.yr.50p[[i]]@data
p <- select(p, id, birdyear) %>% distinct()
n <- as.numeric(rle(as.numeric(p$birdyear))$lengths)
df <- data.frame(id, year, n)
n.winter.area <- rbind(n.winter.area, df)
}
#
n.winter.area$year <- as.numeric(as.character(n.winter.area$year))
names(n.winter.area) <- c("id","year","n.winter.p")
## Gull.glm, add n core areas ----
gull.glm <- left_join(gull.glm, n.winter.area, by = c("id", "birdyear" = "year"))
no.overlap <- no.overlap %>% mutate(birdyear = as.numeric(as.character(birdyear)))
gull.glm <- left_join(gull.glm, no.overlap)
gull.glm$change.wa <- ifelse(gull.glm$id %in% no.wa.overlap[,1], T, F)
## tracking summary ----
#migration duration
mig.dur <- nonbreed.nogap.d %>% group_by(id, birdyear) %>%
summarise(min = min(time), max = max(time), mig.dur = as.numeric(difftime(max, min, units = "days")))
#days with fix by birdyear
days.w.fix <- nonbreed.nogap.d %>% mutate(yday = yday(time)) %>% select(id, birdyear, yday) %>% distinct() %>%
group_by(id, birdyear) %>% summarise(days.with.fix = n())
days.w.fix <- days.w.fix %>% left_join(mig.dur) %>% mutate(p.days.w.fix = days.with.fix/mig.dur) %>% ungroup()
days.w.fix %>% summarise(mean = mean(p.days.w.fix), min = min(p.days.w.fix),
max = max(p.days.w.fix), median = median(p.days.w.fix))
days.w.fix <- nonbreed.nogap.d %>% group_by(id, birdyear) %>%
summarise(max.gap = max(as.numeric(difftime(time, lag(time), units = "days")), na.rm = T)) %>% left_join(days.w.fix) %>%
select(id, birdyear, mig.dur, p.days.w.fix, max.gap) %>% ungroup()
days.w.fix %>% summarise(mean = mean(max.gap), min = min(max.gap),
max = max(max.gap), median = median(max.gap))
## WA site fidelity ----
library(raster)
nonbreed <- readRDS("nonbreed.RDS")
nonbreed <- ungroup(nonbreed)
#
wa.pts <- data.frame()
for (i in 1:length(gull.glm$id)) {
s <- gull.glm[i,]$winter.arrive
e <- gull.glm[i,]$winter.depart
id.i <- gull.glm[i,]$id
by.i <- gull.glm[i,]$birdyear
wa <- filter(nonbreed,id == id.i& birdyear == by.i& time >= s & time <= e)
wa.pts <- rbind(wa.pts, wa)
}
rm(nonbreed)
med.step <- quantile(wa.pts$dist, c(.5)) #50% = 43m, 90% = 1984m, 95% = 4602m
## wa points per id and by
id.v <- unique(wa.pts$id_birdyear)
wa.laea.l <- lapply(id.v, function(x){
mkSpatial(wa.pts[wa.pts$id_birdyear == x,])
})
## all winter points of 1 individual
idu.v <- unique(wa.pts$id)
wau.laea.l <- lapply(idu.v, function(idu.v) mkSpatial(wa.pts[wa.pts$id == idu.v,]))
#make ltraj
wau.traj.l <- lapply(wau.laea.l, function(wa.laea){
gc()
adehabitatLT::as.ltraj(as.data.frame(wa.laea@coords), date = wa.laea@data$time,
id = wa.laea@data$birdyear,
slsp = "missing", proj4string = CRS(proj4string(wa.laea)))})
saveRDS(wau.laea.l, "wau.laea.l.RDS")
saveRDS(wau.traj.l, "wau.traj.l.RDS")
save.image("ln831.RData")
rm(list = ls())
wau.laea.l <- readRDS("wau.laea.l.RDS")
wau.traj.l <- readRDS("wau.traj.l.RDS")
laea.proj <- "+proj=laea +lat_0=52 +lon_0=10 +x_0=4321000 +y_0=3210000 +towgs84=0,0,0,0,0,0,0 +units=m"
mkgrid <- function(lat=NULL, lon = NULL, resolution=10000, buffer = 100000,
projection = laea.proj ){
#lat is a vector of latitudes
#lon is a vector of longitudes
#resolution is the cell size of the raster.
#1 = ~ 133X165 km
#.1 = ~ 13.9 x 17.1 km
#.01 ~ 1.39 x 1.72 km .... in WSG84
#buffer adds space beyond the points in the grid. a buffer of 1 should equal ~ 111 km
#projection: projection of the data.
##projection = laea, measured in m
#resolution 1 000 = 1 km2...
xmax <- max(lon)
xmin <- min(lon)
ymax <- max(lat)
ymin <- min(lat)
extent<- matrix(c(xmin - buffer, xmax + buffer, ymin - buffer, ymax + buffer), ncol=2)
extent <- SpatialPoints(coords = extent, proj4string = CRS(projection)) # the data is in WSG84 lat-lon
rast<- ascgen(extent, cellsize=resolution)
}
##make grid
brbu.grid <- lapply(wau.laea.l[1:41], function(wa.grid) {
gc()
mkgrid(lat = wa.grid@coords[,2], lon = wa.grid@coords[,1],
resolution = 500, buffer = 2500)
})
## diffusion coef
Du.l <- lapply(wau.traj.l[1:41], function(traj) BRB.D(traj, Tmax = 3*3600, Lmin= 20)) ## one bird has a point every 3 hours
wa_overlap.1 <- vector(mode = "list", length = 41)
for(i in 1:18){
brb.i <- BRB(wau.traj.l[[i]],Du.l[[i]], filtershort = F, Tmax = 3*3600, Lmin= 20, hmin =150, type = "UD", grid = brbu.grid[[i]])
gc()
wa_overlap.1[[i]] <- kerneloverlaphr(brb.i, meth = "BA", percent = 95, conditional = T)
}
for(i in 20:41){
brb.i <- BRB(wau.traj.l[[i]],Du.l[[i]], filtershort = F, Tmax = 3*3600, Lmin= 20, hmin =150, type = "UD", grid = brbu.grid[[i]])
gc()
wa_overlap.1[[i]] <- kerneloverlaphr(brb.i, meth = "BA", percent = 95, conditional = T)
}
saveRDS(wa_overlap.1, "wa_overlap.1.RDS")
##individual brbs
##make grid
brbu.grid <- lapply(wau.laea.l[42:82], function(wa.grid) {
gc()
mkgrid(lat = wa.grid@coords[,2], lon = wa.grid@coords[,1],
resolution = 500, buffer = 2500)
})
## diffusion coef
Du.l <- lapply(wau.traj.l[42:82], function(traj) BRB.D(traj, Tmax = 3*3600, Lmin= 20)) ## one bird has a point every 3 hours
wa_overlap.2 <- vector(mode = "list", length = 41)
for(i in 1:41){
brb.i <- BRB(wau.traj.l[[41+i]],Du.l[[i]], filtershort = F, Tmax = 3*3600, Lmin= 20, hmin =150, type = "UD", grid = brbu.grid[[i]])
gc()
wa_overlap.2[[i]] <- kerneloverlaphr(brb.i, meth = "BA", percent = 95, conditional = T)
}
##save environment (ln861_2506.RData)
saveRDS(wa_overlap.2, "wa_overlap.2.RDS")
rm(list = ls())
wau.laea.l <- readRDS("wau.laea.l.RDS")
wau.traj.l <- readRDS("wau.traj.l.RDS")
laea.proj <- "+proj=laea +lat_0=52 +lon_0=10 +x_0=4321000 +y_0=3210000 +towgs84=0,0,0,0,0,0,0 +units=m"
mkgrid <- function(lat=NULL, lon = NULL, resolution=10000, buffer = 100000,
projection = laea.proj ){
#lat is a vector of latitudes
#lon is a vector of longitudes
#resolution is the cell size of the raster.
#1 = ~ 133X165 km
#.1 = ~ 13.9 x 17.1 km
#.01 ~ 1.39 x 1.72 km .... in WSG84
#buffer adds space beyond the points in the grid. a buffer of 1 should equal ~ 111 km
#projection: projection of the data.
##projection = laea, measured in m
#resolution 1 000 = 1 km2...
xmax <- max(lon)
xmin <- min(lon)
ymax <- max(lat)
ymin <- min(lat)
extent<- matrix(c(xmin - buffer, xmax + buffer, ymin - buffer, ymax + buffer), ncol=2)
extent <- SpatialPoints(coords = extent, proj4string = CRS(projection)) # the data is in WSG84 lat-lon
rast<- ascgen(extent, cellsize=resolution)
}
##skipped 19 b/c takes too much memory!!!
brbu.grid <- mkgrid(lat = wau.laea.l[[19]]@coords[,2], lon = wau.laea.l[[19]]@coords[,1],
resolution = 500, buffer = 500)
Du.l <- BRB.D(wau.traj.l[[19]], Tmax = 3*3600, Lmin= 20) ## one bird has a point every 3 hours
gc()
x <- BRB(wau.traj.l[[19]],Du.l, filtershort = F, Tmax = 3*3600, Lmin= 20, hmin =150, type = "UD", grid = brbu.grid)
gc()
##null slot in overlap.1 b/c missing 19
##If memory limit, open kerneloverlaphr function and run through loop manually.
#wa_overlap.19 <- kerneloverlaphr(x, meth = "BA", percent = 95, conditional = T)
vol <- getvolumeUD(x)
x <- lapply(x, function(y) {
coo <- coordinates(y)
y[order(coo[, 1], coo[, 2]), ]
})
vol <- lapply(vol, function(y) {
coo <- coordinates(y)
y[order(coo[, 1], coo[, 2]), ]
})
gp <- gridparameters(vol[[1]])
res <- matrix(0, ncol = length(x), nrow = length(x))
for (i in 1:length(x)) {
for (j in 1:i) {
vi <- x[[i]][[1]]
vj <- x[[j]][[1]]
ai <- vol[[i]][[1]]
aj <- vol[[j]][[1]]
ai[ai <= percent] <- 1
ai[ai > percent] <- 0
aj[aj <= percent] <- 1
aj[aj > percent] <- 0
vi <- vi * ai
vj <- vj * aj
res[j, i] <- res[i, j] <- sum(sqrt(vi) * sqrt(vj)) *
(gp[1, 2]^2)
}
}
rownames(res) <- names(x)
colnames(res) <- names(x)
wa_overlap.19 <- res
wa_overlap.1 <- readRDS("wa_overlap.1.RDS")
wa_overlap.1[[19]] <- wa_overlap.19
wa_overlap.2 <- readRDS("wa_overlap.2.RDS")
wa_overlap <- c(wa_overlap.1,wa_overlap.2)
saveRDS(wa_overlap, "wa_overlap_3035.RDS")
load("ln831.RData")
wa_overlap <- readRDS("wa_overlap_3035.RDS")
sf_range <- data.frame(matrix(ncol = 4, nrow = length(idu.v)))
for(i in 1:length(wa_overlap)){
wa_overlap.i <- wa_overlap[[i]]
##get range
for(j in 1:length(wa_overlap.i[1,])){ ###remove overlap within same years
wa_overlap.i[j,j] <- NA
}
range <- range(wa_overlap.i, na.rm = T)
sf_range[i,c(1,2)] <- range
sf_range[i,3] <- mean(wa_overlap.i, na.rm = T)
sf_range[i,4] <- idu.v[i]
}
colnames(sf_range) <- c("sf_min_overlap", "sf_max_overlap", "sf_mn_overlap", "id")
saveRDS(sf_range, "wa_overlap_range_3035.RDS")
gull.glm <- left_join(gull.glm, sf_range)
## Gull.glm export, add days with fix ----
gull.glm <- left_join(gull.glm, days.w.fix) %>% arrange(mig.dist)
saveRDS(gull.glm, "gull.glm.RDS")
save.image("ln964.RData")
## Overlap of random pairs ----
##### polygon overlap for randomization tests
#####Find pairings
start <- nonbreed.nogap.d %>% left_join(gull.glm) %>%
select(id, id_birdyear, birdyear, slat = start_lat, slon = start_lon) %>% distinct()
end <- centroid %>% group_by(id, birdyear) %>% filter(uniq.p %in% winter.poly.v) %>%
ungroup() %>% select(id, birdyear, elat = lat, elon =lon) %>%
mutate(id_birdyear = paste(id, birdyear, sep = "."), birdyear = as.numeric(as.character(birdyear)))
##find distance between each start point, if id != id. grouped within direction.
focal <- full_join(start, end)
pair <- focal %>% rename(p.id = id, p.slon = slon, p.slat = slat, p.birdyear = birdyear,
p.id_birdyear = id_birdyear, p.elon = elon, p.elat = elat)
rand.pair.ol <- expand.grid(1:length(focal$id), 1:length(pair$p.id))
r.pair.ol <- data.frame()
for (i in seq_along(rand.pair.ol$Var1)){
x <- cbind(focal[rand.pair.ol[i,1],], pair[rand.pair.ol[i,2],])
if(x$id == x$p.id) {
next
}
x$s.dist <- deg.dist(x$slon, x$slat, x$p.slon, x$p.slat)
x$e.dist <- deg.dist(x$elon, x$elat, x$p.elon, x$p.elat)
if(x$s.dist > 250 | x$e.dist > 250){ ##start and end need to be within 250k
next
}
x <- select(x, id, birdyear, id_birdyear, p.id, p.birdyear, p.id_birdyear)
r.pair.ol <- rbind(r.pair.ol, x)
}
## Between individual UD pairs
r.pair.ol <- r.pair.ol[!duplicated(t(apply(r.pair.ol, 1, sort))), ]
r.pair.ol.norep <- r.pair.ol %>% select(-id_birdyear, -p.id_birdyear) %>%
group_by(id, p.id) %>% ##for each unique id pair & direction
sample_n(1) %>% ##randomly select 1 birdyear route per id
ungroup() %>% mutate(id_birdyear = paste(id, birdyear, sep = "."),
p.id_birdyear = paste(p.id, p.birdyear, sep = "."))
###calculate between id overlaps
#readRDS("nogap_multi.yr.RDS")
by.poly <- unlist(nogap_multi.yr)
by.poly.v <- names(by.poly)
rand.UD_overlap <- lapply(1:length(r.pair.ol.norep$id), function(i){
f.poly <- by.poly[[which(by.poly.v == r.pair.ol.norep[i,]$id_birdyear)]]
p.poly <- by.poly[[which(by.poly.v == r.pair.ol.norep[i,]$p.id_birdyear)]]
ol <- list(f.poly, p.poly)
names(ol) <- c(r.pair.ol.norep[i,]$id_birdyear,r.pair.ol.norep[i,]$p.id_birdyear)
class(ol) <- "estUDm"
kerneloverlaphr(ol, meth = "BA", percent = 95, conditional = T)
})
saveRDS(rand.UD_overlap,"rand_overlap.RDS")
saveRDS(r.pair.ol.norep, "r.pair.ol.RDS")
save.image("ln1021.RData")
## Migration route variation ----
### trajectory averaging
##based on freeman et al.
##df of single direction trajectories, multiples identified by birdyear
## df includes id, birdyear, lon, lat, direction, date_time
## points within wintering area polygons (i.e. stopovers) were replaced with the polygon centroid to aid in
#spacing points equally along the trajectory
all_traj <- ungroup(all_traj) %>% arrange(id, time)
n_check <- all_traj %>% select(-time, -dur) %>% distinct() %>% arrange(id_birdyear)
##traj needs to be greater than 1 point
table(table((n_check %>% filter(direction == "Spring") %>% select(id_birdyear, direction, lon, lat) %>% distinct())$id_birdyear)<=1)
tmp <- n_check %>% filter(direction == "Spring") %>% select(id_birdyear, direction, lon, lat) %>% distinct()
nopath <- rle(tmp$id_birdyear)$values[which(rle(tmp$id_birdyear)$lengths <=1)]
all_traj <- filter(all_traj, !(id_birdyear %in% nopath & direction == "Spring"))
table(table((n_check %>% filter(direction == "Autumn") %>% select(id_birdyear, direction, lon, lat) %>% distinct())$id_birdyear)<=1)
tmp <- n_check %>% filter(direction == "Autumn") %>% select(id_birdyear, direction, lon, lat) %>% distinct()
nopath <- rle(tmp$id_birdyear)$values[which(rle(tmp$id_birdyear)$lengths <=1)]
all_traj <- filter(all_traj, !(id_birdyear %in% nopath & direction == "Autumn"))
## the three removed all still had multiple traj
pair.miss <- all_traj %>% select(id, id_birdyear, direction) %>% distinct()
pair.miss <- pair.miss %>% group_by(id, direction) %>% summarise(n.years = n()) %>% filter(n.years <=1)
all_traj <- all_traj %>% left_join(pair.miss) %>% filter(is.na(n.years)) %>% select(-n.years)
id.dir <- all_traj %>% select(id, direction) %>% distinct() ##all traj
id.dir <- filter(id.dir, ! id %in% c(1402, 606, 5027, 5593, 534)) ## 534 - wa poly doesn't overlap, but explored that area
mn_traj <- data.frame(matrix(ncol = 6, nrow = 0))
names(mn_traj) <- c("mn.lon", "mn.lat", "within.var", "id", "direction", "n.years")
for(a in 1:length(id.dir$id)){
# for(a in a:length(id.dir$id)){
traj <- filter(all_traj, id == id.dir[a,]$id & direction == id.dir[a,]$direction)
###
###This creates a list of equally spaced points (currently n=11, specified in t1_eq), with each trajectory having it's own list element.
ntraj <- unique(traj$birdyear) #trajectory id
equi_n <- 500 ### number of points along the trajectory
traj_l <- list()
name <- c()
for(i in 1:length(ntraj)){
t1 <- filter(traj, birdyear == ntraj[i]) %>%
select(lon, lat) %>% distinct()
if (length(t1$lon)<=10){ next }
t1_l <- SpatialLines(list(Lines(list(Line(cbind(t1$lon, t1$lat))), "id")))
t1_eq <- spsample(t1_l, equi_n, type = "regular") ## 250 point equally placed along length of line
traj_l[[i]] <- t1_eq@coords
name <- c(name, ntraj[[i]])
}
if(length(name) <= 1 | is.null(name)) {next}
if(length(name) != length(traj_l)) { traj_l <- traj_l[-which(sapply(traj_l, is.null))]}
names(traj_l) <- name
###
###Create starting 'thread' for mean trajectory between start and end midpoints
start <- cbind(x = mean(sapply(traj_l, '[[',1,1)), y = mean(sapply(traj_l, '[[',1,2))) ##mean of first elements in each list
end <- cbind(x = mean(sapply(traj_l, '[[',equi_n,1)), y = mean(sapply(traj_l, '[[',equi_n,2)))
###
###Create starting 'thread' from mean trajectory points
tmean <- data.frame(matrix(ncol = 2, nrow = 0))
colnames(tmean) <- c("x","y")
for(i in 1:equi_n){
tmean[i,1] = mean(sapply(traj_l, '[[',i,1))
tmean[i,2] = mean(sapply(traj_l, '[[',i,2))
}
###
##Averaging
iter_n <- 0
repeat{
iter_n <- iter_n + 1
for(i in seq_along(tmean$x)){
pmean <- as.numeric(tmean[i,]) ## select each point
nn <- data.frame()
for(j in seq_along(traj_l)){
x <- traj_l[[j]]
x <- matrix(x[x[,2] < pmean[2]+1 & x[,2] > pmean[2]-1,], ncol = 2)
if (length(x) == 0) { x <- traj_l[[j]]}
dist <- NA
for(k in seq_along(x[,1])){
point2 <- as.numeric(x[k,])
dist[k] <- deg.dist(pmean[1], pmean[2], point2[1], point2[2])
}
nearest_neighbour1 <- x[dist == min(dist),]
nn <- rbind(nn,nearest_neighbour1)
}
tmean[i,1] <- mean(nn[,1])
tmean[i,2] <- mean(nn[,2])
}
tmean <- unique(tmean)
tmean <- rbind(start, tmean, end)
dist <- pt2pt.distance(tmean$y, tmean$x) ## dist in m
dist_log <- dist > 25000 #25 km
dist_log[1] <- FALSE
if(iter_n == 100){ ###this will stop repeating after 100 iterations. option 2 is to stop repeating if distinces between tmean are all less than e.g. 25
break
}
tmean1 <- data.frame()
for(i in 1:length(dist_log)){
if(dist_log[i]== T){
newpt <- new_point(tmean[i-1,], tmean[i,], di = norm_vec(tmean[i-1,]- tmean[i,])/2) #new point at midpoint
tmean1 <- rbind(tmean1, newpt) ## order matters
}
tmean1 <- rbind(tmean1, tmean[i,])
}
tmean <- tmean1
}
tmean <- SpatialLines(list(Lines(list(Line(cbind(tmean$x, tmean$y))), "id")))
tmean <- spsample(tmean, equi_n, type = "regular") ## 500 point equally placed along length of line
tmean <- tmean@coords
traj_l <- lapply(traj_l, FUN = data.frame)
for(i in 1:length(traj_l)){
names(traj_l[[i]]) <- c("lon", "lat")
}
nn_dist <- NA
for(l in seq_along(traj_l)){
x <- traj_l[[l]]
for(i in seq_along(tmean[,1])){
point <- tmean[i,]
dist <- NA
for(j in seq_along(x[,1])){
point2 <- x[j,]
dist[j] <- deg.dist(point[1], point[2], point2[1,1], point2[1,2])
}
nn_dist[i] <- min(dist)
}
tmean <- cbind(tmean, nn_dist)
}
tmean <- data.frame(tmean)
tmean$within.var <- rowSums(tmean[,3:ncol(tmean)])/(ncol(tmean)-2-1) ##var = sum of (x-x.mn)/n-1. here n = num traj = (ncol - 2(=tmean coords))
tmean <- tmean[,c(1:2,ncol(tmean))]
names(tmean) <- c("mn.lon", "mn.lat", "within.var")
tmean$id <- id.dir[[a, "id"]]
tmean$direction <- id.dir[[a, "direction"]]
tmean$n.years <- length(name)
mn_traj <- rbind(mn_traj, tmean)
}
saveRDS(mn_traj, "mn_traj_limitlat.RDS")
## Route variation of random pairs ----
#####Pairing of individuals with close start and end points
start <- all_traj %>% group_by(id, id_birdyear, direction) %>% filter(row_number()==1) %>%
ungroup() %>% arrange(direction, id, id_birdyear) %>%
rename(slon = lon, slat = lat) %>% select(-time, -poly, -dur)
end <- all_traj %>% group_by(id, id_birdyear, direction) %>% filter(row_number()== n()) %>%
ungroup() %>% arrange(direction, id, id_birdyear)%>% rename(elon = lon, elat = lat) %>%
select(-time, -poly, -dur)
x <- all_traj %>% group_by(id, id_birdyear, direction) %>% summarise(n= n()) %>% filter(n <= 10)
all_traj <- left_join(all_traj, x) %>% filter(is.na(n)) %>% select(-n)
start <-left_join(start, x) %>% filter(is.na(n)) %>% select(-n)
end <-left_join(end, x) %>% filter(is.na(n)) %>% select(-n)
##find distance between each start point, if id != id. grouped within direction.
focal <- full_join(start, end) #start and end points for each bird year
pair <- focal %>% rename(p.id = id, p.slon = slon, p.slat = slat, p.birdyear = birdyear, #copy of focal, renamed with pair
p.id_birdyear = id_birdyear, p.direction = direction,
p.elon = elon, p.elat = elat)
rand.pair <- expand.grid(1:length(focal$id), 1:length(pair$p.id)) ## create df size of every possible id_birdyear pair
r.pair <- data.frame()
for (i in seq_along(rand.pair$Var1)){
x <- cbind(focal[rand.pair[i,1],], pair[rand.pair[i,2],])
if(x$id == x$p.id | x$direction != x$p.direction) {
next
}
x$s.dist <- deg.dist(x$slon, x$slat, x$p.slon, x$p.slat)
x$e.dist <- deg.dist(x$elon, x$elat, x$p.elon, x$p.elat)
if(x$s.dist > 250 | x$e.dist > 250){ next }
x <- select(x, id, birdyear, p.id, p.birdyear, direction)
r.pair<- rbind(r.pair,x)
}
r.pair <- r.pair[!duplicated(t(apply(r.pair, 1, sort))),]
###for each id pair, only one bird year should be used
set.seed(15)
r.pair.norep <- r.pair %>% group_by(id, p.id, direction) %>% ##for each unique id pair & direction
sample_n(1) %>% ##randomly select 1 birdyear route per id
ungroup()
rm(r.pair)
## calculate average trajectory and variance from paired roots.
rand_traj <- data.frame(matrix(ncol = 8, nrow = 0))
names(rand_traj) <- c("mn.lon", "mn.lat", "within.var", "id","birdyear", "p.id", "p.birdyear", "direction")
for(a in seq_along(r.pair.norep$id)){
# for(a in a:length(id.dir$id)){
traj <- filter(all_traj, id == r.pair.norep[a,]$id & birdyear == r.pair.norep[a,]$birdyear &
direction == r.pair.norep[a,]$direction)
p.traj <- filter(all_traj, id == r.pair.norep[a,]$p.id & birdyear == r.pair.norep[a,]$p.birdyear & direction == r.pair.norep[a,]$direction)
if(length(traj$id) == 0 | length(p.traj$id) == 0) {next}
###
###This creates a list of equally spaced points (currently n=11, specified in t1_eq), with each trajectory having it's own list element.
equi_n <- 500 ### number of points along the trajectory
traj_l <- list()
t1 <- traj %>% select(lon, lat) %>% distinct()
t1_l <- SpatialLines(list(Lines(list(Line(cbind(t1$lon, t1$lat))), "id")))
t1_eq <- spsample(t1_l, equi_n, type = "regular") ## 250 point equally placed along length of line
traj_l[[1]] <- t1_eq@coords
t1 <- p.traj %>% select(lon, lat) %>% distinct()
t1_l <- SpatialLines(list(Lines(list(Line(cbind(t1$lon, t1$lat))), "id")))
t1_eq <- spsample(t1_l, equi_n, type = "regular") ## 250 point equally placed along length of line
traj_l[[2]] <- t1_eq@coords
names(traj_l) <- c(1,2)
###
###Create starting 'thread' for mean trajectory between start and end midpoints
start <- cbind(x = mean(sapply(traj_l, '[[',1,1)), y = mean(sapply(traj_l, '[[',1,2))) ##mean of first elements in each list
end <- cbind(x = mean(sapply(traj_l, '[[',equi_n,1)), y = mean(sapply(traj_l, '[[',equi_n,2)))
###
###Create starting 'thread' from mean trajectory points
tmean <- data.frame(matrix(ncol = 2, nrow = length(1:equi_n)))
colnames(tmean) <- c("x","y")
for(i in 1:equi_n){
tmean[i,1] = mean(sapply(traj_l, '[[',i,1))
tmean[i,2] = mean(sapply(traj_l, '[[',i,2))
}
###
##Averaging
iter_n <- 0
repeat{
iter_n <- iter_n + 1
for(i in seq_along(tmean$x)){
pmean <- as.numeric(tmean[i,]) ## select each point
nn <- data.frame()
for(j in seq_along(traj_l)){
x <- traj_l[[j]]
x <- matrix(x[x[,2] < pmean[2]+1 & x[,2] > pmean[2]-1,], ncol = 2)
if (length(x) == 0 ) { x <- traj_l[[j]]}
dist <- NA
for(k in seq_along(x[,1])){
point2 <- as.numeric(x[k,])
dist[k] <- deg.dist(pmean[1], pmean[2], point2[1], point2[2])
}
nearest_neighbour1 <- x[dist == min(dist),]
nn <- rbind(nn, nearest_neighbour1)
}
tmean[i,1] <- mean(nn[,1])
tmean[i,2] <- mean(nn[,2])
}
tmean <- unique(tmean)
tmean <- rbind(start, tmean, end)
dist <- pt2pt.distance(tmean$y, tmean$x) ## dist in m
dist_log <- dist > 25000
dist_log[1] <- FALSE
if(iter_n == 100){ ###this will stop repeating after 100 iterations. option 2 is to stop repeating if distinces between tmean are all less than e.g. 25
break
}
tmean1 <- data.frame()
for(i in 1:length(dist_log)){
if(dist_log[i]== T){
newpt <- new_point(tmean[i-1,], tmean[i,], di = norm_vec(tmean[i-1,]- tmean[i,])/2) #new point at midpoint
tmean1 <- rbind(tmean1, newpt) ## order matters
}
tmean1 <- rbind(tmean1, tmean[i,])
}
tmean <- tmean1
}
tmean <- SpatialLines(list(Lines(list(Line(cbind(tmean$x, tmean$y))), "id")))
tmean <- spsample(tmean, equi_n, type = "regular") ## 100 point equally placed along length of line
tmean <- tmean@coords
traj_l <- lapply(traj_l, FUN = data.frame)
for(i in 1:length(traj_l)){
names(traj_l[[i]]) <- c("lon", "lat")
}
nn_dist <- NA
for(l in seq_along(traj_l)){
x <- traj_l[[l]]
for(i in seq_along(tmean[,1])){
point <- tmean[i,]
dist <- NA
for(j in seq_along(x[,1])){
point2 <- x[j,]
dist[j] <- deg.dist(point[1], point[2], point2[1,1], point2[1,2])
}
nn_dist[i] <- min(dist)
}
tmean <- cbind(tmean, nn_dist)
}
tmean <- data.frame(tmean)
tmean$within.var <- rowSums(tmean[,3:ncol(tmean)])/(ncol(tmean)-2-1) ##var = sum of (x-x.mn)/n-1. here n = num traj = (ncol - 2(=tmean coords))
tmean <- tmean[,c(1:2,ncol(tmean))]
names(tmean) <- c("mn.lon", "mn.lat", "within.var")
tmean$id <- r.pair.norep[[a, "id"]]
tmean$birdyear <- r.pair.norep[[a, "birdyear"]]
tmean$p.id <- r.pair.norep[[a, "p.id"]]
tmean$p.birdyear <- r.pair.norep[[a, "p.birdyear"]]
tmean$direction <- r.pair.norep[[a, "direction"]]
rand_traj <- rbind(rand_traj,tmean)
}
saveRDS(rand_traj, "rand_traj_limlat.RDS")
#find variance
rand_traj_sum <- rand_traj %>% group_by(id, birdyear, p.id, p.birdyear, direction) %>%
summarise(mn_var = mean(within.var)) %>% ungroup()
rm(rand_traj)
rand_traj_sum$pair <- "Between"
saveRDS(rand_traj_sum, "rand_traj_sum.RDS") ##Randomization test carried out in results_d4
saveRDS(r.pair.norep, "r_pair_traj.RDS")
##Route var examples ----
library(cowplot)
map.world <- rworldmap::getMap("low")
map.world <- fortify(map.world)
example.id <- c(5554,833,608, 782, 537, 4024,5337,540, 5060,5134,344)
mn.ex <- filter(mn_traj, id %in% example.id)
ID <- example.id[1]
##UDs
# UD <- nogap_multi.yr[[as.character(ID)]]
# p50 <- fortify(spTransform(getverticeshr(UD, 50), CRS("+init=epsg:4326"))) ##@data$id is birdyear
pts <- filter(all_traj, id == ID) %>% mutate(birdyear = factor(birdyear))
mn <- filter(mn.ex, id == ID)
pv5554<- ggplot(map.world, aes(long, lat, group = group)) +
geom_polygon(fill = "white", col = "black", size = .25) +
#geom_polygon(data = p50, aes(fill = id,col = id), size = 1, alpha = .1) +
geom_path(data=pts, aes(lon, lat, group = birdyear), size = 1) +
geom_point(data=pts, aes(lon, lat, group = NA), size = 1) +
geom_point(data=mn, aes(mn.lon, mn.lat, group = NA, col = within.var), size = 1) +
theme(legend.position = c(.85,.3), legend.background = element_rect(fill="white"),axis.title = element_blank(), panel.background = element_rect(fill = "gray93"),
panel.grid.major = element_line(colour= "white")) +
scale_colour_viridis_c() +
labs(col = "Variation \n (km)") +
ggtitle("ID = 5554, Mean Variation = 13 km")+
coord_fixed(xlim = c(-10, 10),
ylim = c(38.5, 53),ratio = 1.2) ## add core areas
ID <- example.id[10]
##UDs
# UD <- nogap_multi.yr[[as.character(ID)]]
# p50 <- fortify(spTransform(getverticeshr(UD, 50), CRS("+init=epsg:4326"))) ##@data$id is birdyear
pts <- filter(all_traj, id == ID & direction == "Autumn") %>% mutate(birdyear = factor(birdyear))
mn <- filter(mn.ex, id == ID & direction == "Autumn")
pv5134<- ggplot(map.world, aes(long, lat, group = group)) +
geom_polygon(fill = "white", col = "black", size = .25) +
# geom_polygon(data = p50, aes(fill = id,col = id), size = 1, alpha = .1) +
geom_path(data=pts, aes(lon, lat, group = birdyear), size = 1) +
geom_point(data=pts, aes(lon, lat, group = NA), size = 1) +
geom_point(data=mn, aes(mn.lon, mn.lat, group = NA, col = within.var), size = 1) +
theme(legend.position = c(.85,.3),legend.background = element_rect(fill="white"),axis.title = element_blank(), panel.background = element_rect(fill = "gray93"),
panel.grid.major = element_line(colour= "white")) +
scale_colour_viridis_c() +
ggtitle("ID = 5134, Mean Variation = 20 km") +
labs(col = "Variation \n (km)") +
coord_fixed(xlim = c(-7.5, 7.5),
ylim = c(40.5, 51.5),ratio = 1.2) ## add core areas
ID <- example.id[2]
##UDs
# UD <- nogap_multi.yr[[as.character(ID)]]
# p50 <- fortify(spTransform(getverticeshr(UD, 50), CRS("+init=epsg:4326"))) ##@data$id is birdyear
pts <- filter(all_traj, id == ID & direction == "Autumn") %>% mutate(birdyear = factor(birdyear))
mn <- filter(mn.ex, id == ID & direction == "Autumn")
pv833<- ggplot(map.world, aes(long, lat, group = group)) +
geom_polygon(fill = "white", col = "black", size = .25) +
# geom_polygon(data = p50, aes(fill = id,col = id), size = 1, alpha = .1) +
geom_path(data=pts, aes(lon, lat, group = birdyear), size = 1) +
geom_point(data=pts, aes(lon, lat, group = NA), size = 1) +
geom_point(data=mn, aes(mn.lon, mn.lat, group = NA, col = within.var), size = 1) +
theme(legend.position = c(.85,.3),legend.background = element_rect(fill="white"),axis.title = element_blank(), panel.background = element_rect(fill = "gray93"),
panel.grid.major = element_line(colour= "white")) +
scale_colour_viridis_c() +
ggtitle("ID = 833, Mean Variation = 17 km") +
labs(col = "Variation \n (km)") +
coord_fixed(xlim = c(-30, 22),
ylim = c(14, 52),ratio = 1.2) ## add core areas
ID <- example.id[3]
##UDs
# UD <- nogap_multi.yr[[as.character(ID)]]
# p50 <- fortify(spTransform(getverticeshr(UD, 50), CRS("+init=epsg:4326"))) ##@data$id is birdyear
pts <- filter(all_traj, id == ID & direction == "Spring") %>% mutate(birdyear = factor(birdyear))
mn <- filter(mn.ex, id == ID & direction == "Spring")
pv608<- ggplot(map.world, aes(long, lat, group = group)) +
geom_polygon(fill = "white", col = "black", size = .25) +
# geom_polygon(data = p50, aes(fill = id,col = id), size = 1, alpha = .1) +
geom_path(data=pts, aes(lon, lat, group = birdyear), size = 1) +
geom_point(data=pts, aes(lon, lat, group = NA), size = 1) +
geom_point(data=mn, aes(mn.lon, mn.lat, group = NA, col = within.var), size = 1) +
theme(legend.position = c(.85,.3),legend.background = element_rect(fill="white"),axis.title = element_blank(), panel.background = element_rect(fill = "gray93"),
panel.grid.major = element_line(colour= "white")) +
scale_colour_viridis_c() +
ggtitle("ID = 608, Mean Variation = 13 km") +
labs(col = "Variation \n (km)") +
coord_fixed(xlim = c(-2, 5),
ylim = c(49, 54),ratio = 1.2) ## add core areas
ID <- example.id[5]
##UDs
# UD <- nogap_multi.yr[[as.character(ID)]]
# p50 <- fortify(spTransform(getverticeshr(UD, 50), CRS("+init=epsg:4326"))) ##@data$id is birdyear
pts <- filter(all_traj, id == ID & direction == "Autumn") %>% mutate(birdyear = factor(birdyear))
mn <- filter(mn.ex, id == ID & direction == "Autumn")
pv537<- ggplot(map.world, aes(long, lat, group = group)) +
geom_polygon(fill = "white", col = "black", size = .25) +
geom_path(data=pts, aes(lon, lat, group = birdyear), size = 1) +
geom_point(data=pts, aes(lon, lat, group = NA), size = 1) +
geom_point(data=mn, aes(mn.lon, mn.lat, group = NA, col = within.var), size = 1) +
theme(legend.position = c(.13,.3), legend.background = element_rect(fill="white"),axis.title = element_blank(), panel.background = element_rect(fill = "gray93"),
panel.grid.major = element_line(colour= "white")) +
scale_colour_viridis_c() +
ggtitle("ID = 537, Mean Variation = 33 km") +
labs(col = "Variation \n (km)") +
coord_fixed(xlim = c(-2, 5),
ylim = c(49, 54),ratio = 1.2) ## add core areas
ID <- example.id[7]
##UDs
# UD <- nogap_multi.yr[[as.character(ID)]]
# p50 <- fortify(spTransform(getverticeshr(UD, 50), CRS("+init=epsg:4326"))) ##@data$id is birdyear
pts <- filter(all_traj, id == ID & direction == "Autumn") %>% mutate(birdyear = factor(birdyear))
mn <- filter(mn.ex, id == ID & direction == "Autumn")
pv5337<- ggplot(map.world, aes(long, lat, group = group)) +
geom_polygon(fill = "white", col = "black", size = .25) +
# geom_polygon(data = p50, aes(fill = id,col = id), size = 1, alpha = .1) +
geom_path(data=pts, aes(lon, lat, group = birdyear), size = 1) +
geom_point(data=pts, aes(lon, lat, group = NA), size = 1) +
geom_point(data=mn, aes(mn.lon, mn.lat, group = NA, col = within.var), size = 1) +
theme(legend.position = c(.85,.3), legend.background = element_rect(fill="white"),axis.title = element_blank(), panel.background = element_rect(fill = "gray93"),
panel.grid.major = element_line(colour= "white")) +
scale_colour_viridis_c() + scale_fill_viridis_d(begin = .1, end = .8) +
ggtitle("ID = 5337, Mean Variation = 106 km") +
labs(col = "Variation \n (km)") +
coord_fixed(xlim = c(-10, 5),
ylim = c(40, 51),ratio = 1.2) ## add core areas
ID <- example.id[8]
##UDs
# UD <- nogap_multi.yr[[as.character(ID)]]
# p50 <- fortify(spTransform(getverticeshr(UD, 50), CRS("+init=epsg:4326"))) ##@data$id is birdyear
pts <- filter(all_traj, id == ID & direction == "Spring") %>% mutate(birdyear = factor(birdyear))
mn <- filter(mn.ex, id == ID & direction == "Spring")
pv540<- ggplot(map.world, aes(long, lat, group = group)) +
geom_polygon(fill = "white", col = "black", size = .25) +
geom_path(data=pts, aes(lon, lat, group = birdyear), size = 1) +
geom_point(data=pts, aes(lon, lat, group = NA), size = 1) +
geom_point(data=mn, aes(mn.lon, mn.lat, group = NA, col = within.var), size = 1) +
theme(legend.position = c(.85,.3), legend.background = element_rect(fill="white"),axis.title = element_blank(), panel.background = element_rect(fill = "gray93"),
panel.grid.major = element_line(colour= "white")) +
scale_colour_viridis_c() +
ggtitle("ID = 540, Mean Variation = 111 km") +
labs(col = "Variation \n (km)") +
coord_fixed(xlim = c(-26, 22),
ylim = c(18, 53),ratio = 1.2) ## add core areas
#plot_grid(pv537, pv608, pv5337, pv5554, pv540, pv833, nrow = 3)
png("tvar_ex.png", width = 20, height = 30, units = "cm", res = 400)
plot_grid(pv537, pv608, pv5337, pv5134, pv540, pv833, nrow = 3)
dev.off()
## Maps - overlap ----
library(cowplot)
theme_set(theme_bw())
example.id <- c(478,608,1400,483,5027,5296, 871,860,4047,5068)
## 478
ID <- example.id[1]
##UDs
UD <- nogap_multi.yr[[as.character(ID)]]
p25 <- fortify(spTransform(getverticeshr(UD, 25), CRS("+init=epsg:4326"))) ##@data$id is birdyear
p50 <- fortify(spTransform(getverticeshr(UD, 50), CRS("+init=epsg:4326"))) ##@data$id is birdyear
p75 <- fortify(spTransform(getverticeshr(UD, 75), CRS("+init=epsg:4326"))) ##@data$id is birdyear
p95 <- fortify(spTransform(getverticeshr(UD, 95), CRS("+init=epsg:4326"))) ##@data$id is birdyear
##points
pts.d <- filter(nonbreed.nogap.d, id == ID) %>% select(lat, lon, time, birdyear) %>% mutate(birdyear = factor(birdyear))
OL <- gull.glm %>% filter(id == ID) %>% select(mn_overlap) %>% distinct() %>% pull(mn_overlap)
p478 <-
ggplot(map.world, aes(long, lat, group = group)) +
geom_polygon(fill = "white", col = "black", size = .25) + ## map
##UDS per year
geom_polygon(data = p95, aes(col = id, fill = NA), size = .2, fill = NA) +
geom_polygon(data = p75, aes(col = id, fill = NA), size = .5) +
geom_polygon(data = p50, aes(fill = id, col = id), size = 1.2, alpha = .2) +
geom_polygon(data = p25, aes(col = id, fill = NA), size = .5) +
#points
geom_point(data = pts.d, aes(lon, lat, group = birdyear, col = birdyear), size = .8)+
##appearance
scale_colour_viridis_d(end = .9, option = "C") + scale_fill_viridis_d(begin = .1, end = .8) +
theme(legend.position = "none", axis.title = element_blank(), panel.background = element_rect(fill = "gray93"),
panel.grid.major = element_line(colour= "white")) +
coord_fixed(xlim = c(-4, 4.5),
ylim = c(49,55) ,ratio = 1.2) +
ggtitle(paste( " ID = ", ID, ", Overlap = ", round(OL, digits = 2), ", Seasons = ",
gull.glm %>% filter(id == ID & !is.na(mig.dist)) %>% summarise(n = n()) %>% pull(n), sep = ""))
## 608
ID <- example.id[2]
UD <- nogap_multi.yr[[as.character(ID)]]
p25 <- fortify(spTransform(getverticeshr(UD, 25), CRS("+init=epsg:4326"))) ##@data$id is birdyear
p50 <- fortify(spTransform(getverticeshr(UD, 50), CRS("+init=epsg:4326"))) ##@data$id is birdyear
p75 <- fortify(spTransform(getverticeshr(UD, 75), CRS("+init=epsg:4326"))) ##@data$id is birdyear
p95 <- fortify(spTransform(getverticeshr(UD, 95), CRS("+init=epsg:4326"))) ##@data$id is birdyear
pts.d <- filter(nonbreed.nogap.d, id == ID) %>% select(lat, lon, time, birdyear) %>% mutate(birdyear = factor(birdyear))
OL <- gull.glm %>% filter(id == ID) %>% select(mn_overlap) %>% distinct() %>% pull(mn_overlap)
p608 <-
ggplot(map.world, aes(long, lat, group = group)) +
geom_polygon(fill = "white", col = "black", size = .25) + ## map
geom_polygon(data = p95, aes(col = id), size = .2, fill = NA) +
geom_polygon(data = p75, aes(fill = NA, col = id), size = .5) +
geom_polygon(data = p50, aes(fill = id, col = id), size = 1.2, alpha = .2) +
geom_polygon(data = p25, aes(fill = NA, col = id), size = .5) +
geom_point(data = pts.d, aes(lon, lat, group = birdyear, col = birdyear), size = .8)+
scale_colour_viridis_d(end = .9, option = "C") + scale_fill_viridis_d(begin = .1, end = .7) +
theme(legend.position = "none", axis.title = element_blank(), panel.background = element_rect(fill = "gray93"),
panel.grid.major = element_line(colour= "white")) +
coord_fixed(xlim = c(-5, 3.5),
ylim = c(49,55) ,ratio = 1.2) +
ggtitle(paste( " ID = ", ID, ", Overlap = ", round(OL, digits = 2), ", Seasons = ",
gull.glm %>% filter(id == ID & !is.na(mig.dist)) %>% summarise(n = n()) %>% pull(n), sep = ""))
#1400
ID <- example.id[3]
UD <- nogap_multi.yr[[as.character(ID)]]
p25 <- fortify(spTransform(getverticeshr(UD, 25), CRS("+init=epsg:4326"))) ##@data$id is birdyear
p50 <- fortify(spTransform(getverticeshr(UD, 50), CRS("+init=epsg:4326"))) ##@data$id is birdyear
p75 <- fortify(spTransform(getverticeshr(UD, 75), CRS("+init=epsg:4326"))) ##@data$id is birdyear
p95 <- fortify(spTransform(getverticeshr(UD, 95), CRS("+init=epsg:4326"))) ##@data$id is birdyear
pts.d <- filter(nonbreed.nogap.d, id == ID) %>% select(lat, lon, time, birdyear) %>% mutate(birdyear = factor(birdyear))
OL <- gull.glm %>% filter(id == ID) %>% select(mn_overlap) %>% distinct() %>% pull(mn_overlap)
p1400 <-
ggplot(map.world, aes(long, lat, group = group)) +
geom_polygon(fill = "white", col = "black", size = .25) + ## map
geom_polygon(data = p95, aes(col = id), size = .2, fill = NA) +
geom_polygon(data = p75, aes(fill = NA, col = id), size = .5) +
geom_polygon(data = p50, aes(fill = id, col = id), size = 1.2, alpha = .2) +
geom_polygon(data = p25, aes(fill = NA, col = id), size = .5) +
geom_point(data = pts.d, aes(lon, lat, group = birdyear, col = birdyear), size = .8)+
scale_colour_viridis_d(end = .9, option = "C") + scale_fill_viridis_d(begin = .1, end = .7) +
theme(legend.position = "none", axis.title = element_blank(), panel.background = element_rect(fill = "gray93"),
panel.grid.major = element_line(colour= "white")) +
coord_fixed(xlim = c(-14, 7),
ylim = c(36,52) ,ratio = 1.2) +
ggtitle(paste( " ID = ", ID, ", Overlap = ", round(OL, digits = 2), ", Seasons = ",
gull.glm %>% filter(id == ID & !is.na(mig.dist)) %>% summarise(n = n()) %>% pull(n), sep = ""))
#483
ID <- example.id[4]
UD <- nogap_multi.yr[[as.character(ID)]]
p25 <- fortify(spTransform(getverticeshr(UD, 25), CRS("+init=epsg:4326"))) ##@data$id is birdyear
p50 <- fortify(spTransform(getverticeshr(UD, 50), CRS("+init=epsg:4326"))) ##@data$id is birdyear
p75 <- fortify(spTransform(getverticeshr(UD, 75), CRS("+init=epsg:4326"))) ##@data$id is birdyear
p95 <- fortify(spTransform(getverticeshr(UD, 95), CRS("+init=epsg:4326"))) ##@data$id is birdyear
pts.d <- filter(nonbreed.nogap.d, id == ID) %>% select(lat, lon, time, birdyear) %>% mutate(birdyear = factor(birdyear))
OL <- gull.glm %>% filter(id == ID) %>% select(mn_overlap) %>% distinct() %>% pull(mn_overlap)
p483 <-
ggplot(map.world, aes(long, lat, group = group)) +
geom_polygon(fill = "white", col = "black", size = .25) + ## map
geom_polygon(data = p95, aes(col = id), size = .2, fill = NA) +
geom_polygon(data = p75, aes(fill = NA, col = id), size = .5) +
geom_polygon(data = p50, aes(fill = id, col = id), size = 1.2, alpha = .2) +
geom_polygon(data = p25, aes(fill = NA, col = id), size = .5) +
geom_point(data = pts.d, aes(lon, lat, group = birdyear, col = birdyear), size = .8)+
scale_colour_viridis_d(end = .9, option = "C") + scale_fill_viridis_d(begin = .1, end = .7) +
theme(legend.position = "none", axis.title = element_blank(), panel.background = element_rect(fill = "gray93"),
panel.grid.major = element_line(colour= "white")) +
coord_fixed(xlim = c(-15, 7),
ylim = c(37,54) ,ratio = 1.2) +
ggtitle(paste( " ID = ", ID, ", Overlap = ", round(OL, digits = 2), ", Seasons = ",
gull.glm %>% filter(id == ID & !is.na(mig.dist)) %>% summarise(n = n()) %>% pull(n), sep = ""))
#5027
ID <- example.id[5]
UD <- nogap_multi.yr[[as.character(ID)]]
p25 <- fortify(spTransform(getverticeshr(UD, 25), CRS("+init=epsg:4326"))) ##@data$id is birdyear
p50 <- fortify(spTransform(getverticeshr(UD, 50), CRS("+init=epsg:4326"))) ##@data$id is birdyear
p75 <- fortify(spTransform(getverticeshr(UD, 75), CRS("+init=epsg:4326"))) ##@data$id is birdyear
p95 <- fortify(spTransform(getverticeshr(UD, 95), CRS("+init=epsg:4326"))) ##@data$id is birdyear
pts.d <- filter(nonbreed.nogap.d, id == ID) %>% select(lat, lon, time, birdyear) %>% mutate(birdyear = factor(birdyear))
OL <- gull.glm %>% filter(id == ID) %>% select(mn_overlap) %>% distinct() %>% pull(mn_overlap)
p5027 <-
ggplot(map.world, aes(long, lat, group = group)) +
geom_polygon(fill = "white", col = "black", size = .25) + ## map
geom_polygon(data = p95, aes(col = id), size = .2, fill = NA) +
geom_polygon(data = p75, aes(fill = NA, col = id), size = .5) +
geom_polygon(data = p50, aes(fill = id, col = id), size = 1.2, alpha = .2) +
geom_polygon(data = p25, aes(fill = NA, col = id), size = .5) +
geom_point(data = pts.d, aes(lon, lat, group = birdyear, col = birdyear), size = .8)+
scale_colour_viridis_d(end = .9, option = "C") + scale_fill_viridis_d(begin = .1, end = .7) +
theme(legend.position = "none", axis.title = element_blank(), panel.background = element_rect(fill = "gray93"),
panel.grid.major = element_line(colour= "white")) +
coord_fixed(xlim = c(-20, 11),
ylim = c(32,55.5) ,ratio = 1.2) +
ggtitle(paste( " ID = ", ID, ", Overlap = ", round(OL, digits = 2), ", Seasons = ",
gull.glm %>% filter(id == ID & !is.na(mig.dist)) %>% summarise(n = n()) %>% pull(n), sep = ""))
## 5296
ID <- example.id[6]
UD <- nogap_multi.yr[[as.character(ID)]]
p25 <- fortify(spTransform(getverticeshr(UD, 25), CRS("+init=epsg:4326"))) ##@data$id is birdyear
p50 <- fortify(spTransform(getverticeshr(UD, 50), CRS("+init=epsg:4326"))) ##@data$id is birdyear
p75 <- fortify(spTransform(getverticeshr(UD, 75), CRS("+init=epsg:4326"))) ##@data$id is birdyear
p95 <- fortify(spTransform(getverticeshr(UD, 95), CRS("+init=epsg:4326"))) ##@data$id is birdyear
pts.d <- filter(nonbreed.nogap.d, id == ID) %>% select(lat, lon, time, birdyear) %>% mutate(birdyear = factor(birdyear))
OL <- gull.glm %>% filter(id == ID) %>% select(mn_overlap) %>% distinct() %>% pull(mn_overlap)
p5296 <-
ggplot(map.world, aes(long, lat, group = group)) +
geom_polygon(fill = "white", col = "black", size = .25) + ## map
geom_polygon(data = p95, aes(col = id), size = .2, fill = NA) +
geom_polygon(data = p75, aes(fill = NA, col = id), size = .5) +
geom_polygon(data = p50, aes(fill = id, col = id), size = 1.2, alpha = .2) +
geom_polygon(data = p25, aes(fill = NA, col = id), size = .5) +
geom_point(data = pts.d, aes(lon, lat, group = birdyear, col = birdyear), size = .8)+
scale_colour_viridis_d(end = .9, option = "C") + scale_fill_viridis_d(begin = .1, end = .7) +
theme(legend.position = "none", axis.title = element_blank(), panel.background = element_rect(fill = "gray93"),
panel.grid.major = element_line(colour= "white")) +
coord_fixed(xlim = c(-32, 17),
ylim = c(17,55) ,ratio = 1.2) +
ggtitle(paste( " ID = ", ID, ", Overlap = ", round(OL, digits = 2), ", Seasons = ",
gull.glm %>% filter(id == ID & !is.na(mig.dist)) %>% summarise(n = n()) %>% pull(n), sep = ""))
## 871
ID <- example.id[7]
UD <- nogap_multi.yr[[as.character(ID)]]
p25 <- fortify(spTransform(getverticeshr(UD, 25), CRS("+init=epsg:4326"))) ##@data$id is birdyear
p50 <- fortify(spTransform(getverticeshr(UD, 50), CRS("+init=epsg:4326"))) ##@data$id is birdyear
p75 <- fortify(spTransform(getverticeshr(UD, 75), CRS("+init=epsg:4326"))) ##@data$id is birdyear
p95 <- fortify(spTransform(getverticeshr(UD, 95), CRS("+init=epsg:4326"))) ##@data$id is birdyear
pts.d <- filter(nonbreed.nogap.d, id == ID) %>% select(lat, lon, time, birdyear) %>% mutate(birdyear = factor(birdyear))
OL <- gull.glm %>% filter(id == ID) %>% select(mn_overlap) %>% distinct() %>% pull(mn_overlap)
p871 <-
ggplot(map.world, aes(long, lat, group = group)) +
geom_polygon(fill = "white", col = "black", size = .25) + ## map
geom_polygon(data = p95, aes(col = id), size = .2, fill = NA) +
geom_polygon(data = p75, aes(fill = NA, col = id), size = .5) +
geom_polygon(data = p50, aes(fill = id, col = id), size = 1.2, alpha = .2) +
geom_polygon(data = p25, aes(fill = NA, col = id), size = .5) +
geom_point(data = pts.d, aes(lon, lat, group = birdyear, col = birdyear), size = .8)+
scale_colour_viridis_d(end = .9, option = "C") + scale_fill_viridis_d(begin = .1, end = .7) +
theme(legend.position = "none", axis.title = element_blank(), panel.background = element_rect(fill = "gray93"),
panel.grid.major = element_line(colour= "white")) +
coord_fixed(xlim = c(-5, 3.5),
ylim = c(49,55) ,ratio = 1.2) +
ggtitle(paste( " ID = ", ID, ", Overlap = ", round(OL, digits = 2), ", Seasons = ",
gull.glm %>% filter(id == ID & !is.na(mig.dist)) %>% summarise(n = n()) %>% pull(n), sep = ""))
#5068
ID <- example.id[10]
UD <- nogap_multi.yr[[as.character(ID)]]
p25 <- fortify(spTransform(getverticeshr(UD, 25), CRS("+init=epsg:4326"))) ##@data$id is birdyear
p50 <- fortify(spTransform(getverticeshr(UD, 50), CRS("+init=epsg:4326"))) ##@data$id is birdyear
p75 <- fortify(spTransform(getverticeshr(UD, 75), CRS("+init=epsg:4326"))) ##@data$id is birdyear
p95 <- fortify(spTransform(getverticeshr(UD, 95), CRS("+init=epsg:4326"))) ##@data$id is birdyear
pts.d <- filter(nonbreed.nogap.d, id == ID) %>% select(lat, lon, time, birdyear) %>% mutate(birdyear = factor(birdyear))
OL <- gull.glm %>% filter(id == ID) %>% select(mn_overlap) %>% distinct() %>% pull(mn_overlap)
p5068 <-
ggplot(map.world, aes(long, lat, group = group)) +
geom_polygon(fill = "white", col = "black", size = .25) + ## map
geom_polygon(data = p95, aes(col = id), size = .2, fill = NA) +
geom_polygon(data = p75, aes(fill = NA, col = id), size = .5) +
geom_polygon(data = p50, aes(fill = id, col = id), size = 1.2, alpha = .2) +
geom_polygon(data = p25, aes(fill = NA, col = id), size = .5) +
geom_point(data = pts.d, aes(lon, lat, group = birdyear, col = birdyear), size = .8)+
scale_colour_viridis_d(end = .9, option = "C") + scale_fill_viridis_d(begin = .1, end = .7) +
theme(legend.position = "none", axis.title = element_blank(), panel.background = element_rect(fill = "gray93"),
panel.grid.major = element_line(colour= "white")) +
coord_fixed(xlim = c(-20, 11),
ylim = c(30,53.5) ,ratio = 1.2) +
ggtitle(paste( " ID = ", ID, ", Overlap = ", round(OL, digits = 2), ", Seasons = ",
gull.glm %>% filter(id == ID & !is.na(mig.dist)) %>% summarise(n = n()) %>% pull(n), sep = ""))
#860
ID <- example.id[8]
UD <- nogap_multi.yr[[as.character(ID)]]
p25 <- fortify(spTransform(getverticeshr(UD, 25), CRS("+init=epsg:4326"))) ##@data$id is birdyear
p50 <- fortify(spTransform(getverticeshr(UD, 50), CRS("+init=epsg:4326"))) ##@data$id is birdyear
p75 <- fortify(spTransform(getverticeshr(UD, 75), CRS("+init=epsg:4326"))) ##@data$id is birdyear
p95 <- fortify(spTransform(getverticeshr(UD, 95), CRS("+init=epsg:4326"))) ##@data$id is birdyear
pts.d <- filter(nonbreed.nogap.d, id == ID) %>% select(lat, lon, time, birdyear) %>% mutate(birdyear = factor(birdyear))
OL <- gull.glm %>% filter(id == ID) %>% select(mn_overlap) %>% distinct() %>% pull(mn_overlap)
p860 <-
ggplot(map.world, aes(long, lat, group = group)) +
geom_polygon(fill = "white", col = "black", size = .25) + ## map
geom_polygon(data = p95, aes(col = id), size = .2, fill = NA) +
geom_polygon(data = p75, aes(fill = NA, col = id), size = .5) +
geom_polygon(data = p50, aes(fill = id, col = id), size = 1.2, alpha = .2) +
geom_polygon(data = p25, aes(fill = NA, col = id), size = .5) +
geom_point(data = pts.d, aes(lon, lat, group = birdyear, col = birdyear), size = .8)+
scale_colour_viridis_d(end = .9, option = "C") + scale_fill_viridis_d(begin = .1, end = .7) +
theme(legend.position = "none", axis.title = element_blank(), panel.background = element_rect(fill = "gray93"),
panel.grid.major = element_line(colour= "white")) +
coord_fixed(xlim = c(-15, 7),
ylim = c(37,54) ,ratio = 1.2) +
ggtitle(paste( " ID = ", ID, ", Overlap = ", round(OL, digits = 2), ", Seasons = ",
gull.glm %>% filter(id == ID & !is.na(mig.dist)) %>% summarise(n = n()) %>% pull(n), sep = ""))
ol.page <- plot_grid(p478,p871,p1400,p860,p5027,p5068, nrow = 3)
png("ol_ex.png", width = 20, height = 30, units = "cm", res = 400)
ol.page
dev.off()
## Mean traj computation example ----
all_traj <- ungroup(all_traj) %>% arrange(id, time)
traj5524 <- filter(all_traj, id == 5524 & direction == "Autumn")
p5524 <- nogap_multi.yr.50p[[74]]
p5524 <- spTransform(p5524, CRS("+init=epsg:4326"))
p5524 <- fortify(p5524)
p5524.0 <- filter(p5524, id %in% c("1","2"))
p5524.1 <- filter(p5524, id %in% c("3","4"))
### first need plot with UD + real points
t.0 <- filter(traj5524, birdyear == 0) %>% select(lon, lat)
t.1<- filter(traj5524, birdyear == 1) %>% select(lon, lat)
SFa <- ggplot(map.world, aes(long, lat, group = group)) +
geom_polygon(fill = "white", col = "black", size = .25) +
geom_polygon(data = p5524.0, fill = "#0D0887FF",col = "#0D0887FF", size = 1, alpha = .1) +
geom_polygon(data = p5524.1, fill = "#CC4678FF",col = "#CC4678FF", size = 1, alpha = .1) +
geom_path(data=t.0, aes(lon, lat, group = NA), col = "#0D0887FF", size = 1) +
geom_path(data = t.1, aes(lon, lat,group = NA),col = "#CC4678FF", size = 1) +
geom_point(data = t.0, aes(lon, lat,group = NA), col = "#0D0887FF", size = 2) +
geom_point(data = t.1, aes(lon, lat,group = NA),col = "#CC4678FF", size = 2) +
annotate(xmin = -11, xmax = -1, ymin = 37, ymax = 44,
geom = "rect", alpha = 0, col = "black") +
theme(legend.position = "none", axis.title = element_blank(), panel.background = element_rect(fill = "gray93"),
panel.grid.major = element_line(colour= "white")) +
coord_fixed(xlim = c(-15, 14),
ylim = c(32.5, 53),ratio = 1.2) ## add core areas
##df of single direction trajectories, multiples identified by birdyear
## df includes id, birdyear, lon, lat, direction, date_time
## points within wintering area polygons (i.e. stopovers) were replaced with the polygon centroid to aid in
#spacing points equally along the trajectory
id.dir <- traj5524 %>% select(id, direction) %>% distinct()
mn_traj <- data.frame(matrix(ncol = 5, nrow = 0))
names(mn_traj) <- c("mn.lon", "mn.lat", "within.var", "id", "direction")
a <- 1
traj <- filter(traj5524, id == id.dir[a,]$id & direction == id.dir[a,]$direction)
###This creates a list of equally spaced points (currently n=11, specified in t1_eq), with each trajectory having it's own list element.
ntraj <- unique(traj$birdyear) #trajectory id
equi_n <- 100 ### number of points along the trajectory
traj_l <- list()
for(i in 1:length(ntraj)){
t1 <- filter(traj, birdyear == ntraj[i]) %>% select(lon, lat) %>% distinct()
if (length(t1$lon)<=10){ ntraj <- ntraj[-i]
} else {
t1_l <- SpatialLines(list(Lines(list(Line(cbind(t1$lon, t1$lat))), "id")))
t1_eq <- spsample(t1_l, equi_n, type = "regular") ## 250 point equally placed along length of line
traj_l[[i]] <- t1_eq@coords
}
}
names(traj_l) <- ntraj
traj_l_sample <- lapply(traj_l, function(x) {
x <- as.data.frame(x)
x$seq <- 1:equi_n
filter(x, coords.x1 >= -12 & coords.x1 <=0 & coords.x2 >= 36 & coords.x2 <= 45)
})
t0.0 <- data.frame(traj_l_sample[[1]])
t0.1 <- data.frame(traj_l_sample[[2]])
###Create starting 'thread' for mean trajectory between start and end midpoints
start <- cbind(x = mean(sapply(traj_l, '[[',1,1)), y = mean(sapply(traj_l, '[[',1,2))) ##mean of first elements in each list
end <- cbind(x = mean(sapply(traj_l, '[[',equi_n,1)), y = mean(sapply(traj_l, '[[',equi_n,2)))
###Create starting 'thread' from mean trajectory points
tmean <- data.frame(matrix(ncol = 2, nrow = 0))
colnames(tmean) <- c("x","y")
for(i in 1:equi_n){
tmean[i,1] = mean(sapply(traj_l, '[[',i,1))
tmean[i,2] = mean(sapply(traj_l, '[[',i,2))
}
t1.0 <- tmean %>% mutate(seq = 1:equi_n) %>% filter(x > -12 & x < 0 & y > 36 & y < 45)
SFb <- ggplot(map.world, aes(long, lat, group = group)) +
geom_polygon(fill = "white", col = "black", size = .25) +
geom_path(data=t0.0, aes(coords.x1, coords.x2, group = NA), size = 1, alpha = .5, linetype = '21') +
geom_path(data = t0.1, aes(coords.x1, coords.x2,group = NA), size = 1, alpha = .5, linetype = '21') +
geom_point(data = t0.0, aes(coords.x1, coords.x2,group = NA, fill = seq), size = 2, shape = 21) +
geom_point(data = t0.1, aes(coords.x1, coords.x2,group = NA, fill = seq), size = 2, shape = 21) +
geom_path(data = t1.0, aes(x, y,group = NA), size = 1) +
geom_point(data = t1.0, aes(x, y,group = NA, fill = seq), size = 2, shape = 21) +
geom_point(data = t0.0[t0.0$seq %in% c(50,59,64,81),], aes(coords.x1, coords.x2,group = NA, fill = seq), size = 4, shape = 21) +
geom_point(data = t0.1[t0.1$seq %in% c(50,59,64,81),], aes(coords.x1, coords.x2,group = NA, fill = seq), size = 4, shape = 21) +
geom_point(data = t1.0[t1.0$seq %in% c(50,59,64,81),], aes(x, y,group = NA, fill = seq), size = 4, shape = 21,) +
scale_fill_viridis_c(option = "C") + theme(legend.position = "none", axis.title = element_blank(), panel.background = element_rect(fill = "gray93"),
panel.grid.major = element_line(colour= "white")) +
coord_fixed(xlim = c(-11, -1),
ylim = c(37, 44),ratio = 1.2)
### nearest neighbour points for 50,59,64,81:
nn.ex <- data.frame()
for(i in c(50,59,64,81)){
pmean <- as.numeric(tmean[i,]) ## select each point
nn <- data.frame()
for(j in seq_along(traj_l)){
x <- traj_l[[j]]
x<- data.frame(x)
x$seq <- 1:equi_n
x <- x[x[,2] < pmean[2]+1 & x[,2] > pmean[2]-1,]
if (length(x) == 0) { x <- traj_l[[j]]}
dist <- NA
for(k in seq_along(x[,1])){
point2 <- as.numeric(x[k,])
dist[k] <- deg.dist(pmean[1], pmean[2], point2[1], point2[2])
}
nearest_neighbour1 <- x[dist == min(dist),]
nn <- rbind(nn,nearest_neighbour1)
}
names(nn) <- c("x", "y", "seq")
x <- mean(nn[,1])
y <- mean(nn[,2])
seq <- i
nn <- rbind(nn, data.frame(x,y, seq))
nn.ex <- rbind(nn, nn.ex)
}
##Averaging
iter_n <- 0
iter_n <- iter_n + 1
for(i in seq_along(tmean$x)){
pmean <- as.numeric(tmean[i,]) ## select each point
nn <- data.frame()
for(j in seq_along(traj_l)){
x <- traj_l[[j]]
x <- matrix(x[x[,2] < pmean[2]+1 & x[,2] > pmean[2]-1,], ncol = 2)
if (length(x) == 0) { x <- traj_l[[j]]}
dist <- NA
for(k in seq_along(x[,1])){
point2 <- as.numeric(x[k,])
dist[k] <- deg.dist(pmean[1], pmean[2], point2[1], point2[2])
}
nearest_neighbour1 <- x[dist == min(dist),]
nn <- rbind(nn,nearest_neighbour1)
}
tmean[i,1] <- mean(nn[,1])
tmean[i,2] <- mean(nn[,2])
}
tmean <- unique(tmean)
tmean <- rbind(start, tmean, end)
t1.1 <- tmean %>% mutate(seq = 1:length(tmean$x)) %>% filter(x > -12 & x < 0 & y > 36 & y < 45)
dist <- pt2pt.distance(tmean$y, tmean$x) ## dist in m
dist_log <- dist > 25
dist_log[1] <- FALSE
tmean1 <- data.frame()
for(i in 1:length(dist_log)){
if(dist_log[i]== T){
newpt <- new_point(tmean[i-1,], tmean[i,], di = norm_vec(tmean[i-1,]- tmean[i,])/2) #new point at midpoint
tmean1 <- rbind(tmean1, newpt) ## order matters
}
tmean1 <- rbind(tmean1, tmean[i,])
}
new.pts <- anti_join(tmean1, tmean)
tmean <- tmean1
SFc <- ggplot(map.world, aes(long, lat, group = group)) +
geom_polygon(fill = "white", col = "black", size = .25) +
geom_path(data=t0.0, aes(coords.x1, coords.x2, group = NA), size = 1, alpha = .5, linetype = '21') +
geom_path(data = t0.1, aes(coords.x1, coords.x2,group = NA), size = 1, alpha = .5, linetype = '21') +
geom_point(data = t0.0, aes(coords.x1, coords.x2,group = NA, fill = seq), size = 2, shape = 21) +
geom_point(data = t0.1, aes(coords.x1, coords.x2,group = NA, fill = seq), size = 2, shape = 21) +
geom_path(data = t1.0, aes(x, y,group = NA), size = 1, alpha = .25) +
geom_point(data = t1.0, aes(x, y,group = NA, fill = seq), size = 2, shape = 21, alpha = .25) +
geom_path(data = t1.1, aes(x, y,group = NA), size = 1) +
geom_point(data = new.pts, aes(x, y,group = NA), fill = '#20A387FF', size = 2, shape = 21) +
geom_point(data = t1.1, aes(x, y,group = NA, fill = seq), size = 2, shape = 21) +
geom_point(data = nn.ex,aes(x, y,group = NA, fill = seq), size = 4, shape = 21) +
scale_fill_viridis_c(option = "C") + theme(legend.position = "none", axis.title = element_blank(),
panel.background = element_rect(fill = "gray93"),
panel.grid.major = element_line(colour= "white")) +
coord_fixed(xlim = c(-11, -1),
ylim = c(37, 44),ratio = 1.2)
###
###Create starting 'thread' for mean trajectory between start and end midpoints
start <- cbind(x = mean(sapply(traj_l, '[[',1,1)), y = mean(sapply(traj_l, '[[',1,2))) ##mean of first elements in each list
end <- cbind(x = mean(sapply(traj_l, '[[',equi_n,1)), y = mean(sapply(traj_l, '[[',equi_n,2)))
#tmean <- data.frame(x = seq(start[,1], end[,1], length.out = 250), y = seq(start[,2], end[,2], length.out = 250))
###
###Create starting 'thread' from mean trajectory points
tmean <- data.frame(matrix(ncol = 2, nrow = 0))
colnames(tmean) <- c("x","y")
for(i in 1:equi_n){
tmean[i,1] = mean(sapply(traj_l, '[[',i,1))
tmean[i,2] = mean(sapply(traj_l, '[[',i,2))
}
###
##Averaging
iter_n <- 0
repeat{
iter_n <- iter_n + 1
for(i in seq_along(tmean$x)){
pmean <- as.numeric(tmean[i,]) ## select each point
nn <- data.frame()
for(j in seq_along(traj_l)){
x <- traj_l[[j]]
x <- matrix(x[x[,2] < pmean[2]+1 & x[,2] > pmean[2]-1,], ncol = 2)
if (length(x) == 0) { x <- traj_l[[j]]}
dist <- NA
for(k in seq_along(x[,1])){
point2 <- as.numeric(x[k,])
dist[k] <- deg.dist(pmean[1], pmean[2], point2[1], point2[2])
}
nearest_neighbour1 <- x[dist == min(dist),]
nn <- rbind(nn,nearest_neighbour1)
}
tmean[i,1] <- mean(nn[,1])
tmean[i,2] <- mean(nn[,2])
}
tmean <- unique(tmean)
tmean <- rbind(start, tmean, end)
dist <- pt2pt.distance(tmean$y, tmean$x) ## dist in m
dist_log <- dist > 25
dist_log[1] <- FALSE
if(iter_n == 100){ ###this will stop repeating after 100 iterations. option 2 is to stop repeating if distinces between tmean are all less than e.g. 25
break
}
tmean1 <- data.frame()
for(i in 1:length(dist_log)){
if(dist_log[i]== T){
newpt <- new_point(tmean[i-1,], tmean[i,], di = norm_vec(tmean[i-1,]- tmean[i,])/2) #new point at midpoint
tmean1 <- rbind(tmean1, newpt) ## order matters
}
tmean1 <- rbind(tmean1, tmean[i,])
}
tmean <- tmean1
}
tmean <- SpatialLines(list(Lines(list(Line(cbind(tmean$x, tmean$y))), "id")))
tmean <- spsample(tmean, equi_n, type = "regular") ## 100 point equally placed along length of line
tmean <- tmean@coords
traj_l <- lapply(traj_l, FUN = data.frame)
for(i in 1:length(traj_l)){
names(traj_l[[i]]) <- c("lon", "lat")
}
ex2 <- c(52,59,65,79)
nn.ex2 <- data.frame()
for(i in ex2){
pmean <- as.numeric(tmean[i,]) ## select each point
nn <- data.frame()
for(j in seq_along(traj_l)){
x <- traj_l[[j]]
x <-x[x[,2] < pmean[2]+1 & x[,2] > pmean[2]-1,]
if (length(x$lon) == 0) { x <- traj_l[[j]]}
dist <- NA
for(k in seq_along(x[,1])){
point2 <- as.numeric(x[k,])
dist[k] <- deg.dist(pmean[1], pmean[2], point2[1], point2[2])
}
nearest_neighbour1 <- x[dist == min(dist),]
nn <- rbind(nn,nearest_neighbour1)
}
names(nn) <- c("x", "y")
nn$mn_seq <- i
nn.ex2 <- rbind(nn, nn.ex2)
}
nn_dist <- NA
for(l in seq_along(traj_l)){
x <- traj_l[[l]]
for(i in seq_along(tmean[,1])){
point <- tmean[i,]
dist <- NA
for(j in seq_along(x[,1])){
point2 <- x[j,]
dist[j] <- deg.dist(point[1], point[2], point2[1,1], point2[1,2])
}
nn_dist[i] <- min(dist)
}
tmean <- cbind(tmean, nn_dist)
}
tmean <- data.frame(tmean)
tmean$within.var <- rowSums(tmean[,3:ncol(tmean)])/(ncol(tmean)-2-1) ##var = sum of (x-x.mn)/n-1. here n = num traj = (ncol - 2(=tmean coords))
tmean <- tmean[,c(1:2,ncol(tmean))]
names(tmean) <- c("mn.lon", "mn.lat", "within.var")
nn.ex2$wvar <- rep(tmean[rev(ex2),]$within.var, each = length(traj_l))
SFd<- ggplot(map.world, aes(long, lat, group = group)) +
geom_polygon(fill = "white", col = "black", size = .25) +
geom_path(data=t0.0, aes(coords.x1, coords.x2, group = NA), size = 1, alpha = .5, linetype = '21') +
geom_path(data = t0.1, aes(coords.x1, coords.x2,group = NA), size = 1, alpha = .5, linetype = '21') +
geom_point(data = t0.0, aes(coords.x1, coords.x2,group = NA), size = 2, shape = 21, fill = "black") +
geom_point(data = t0.1, aes(coords.x1, coords.x2,group = NA), size = 2, shape = 21, fill = "black") +
geom_point(data = nn.ex2, aes(x,y,group = NA, fill = wvar), size = 4, shape = 21) +
geom_path(data = tmean, aes(mn.lon, mn.lat, group = NA), size = 1) +
geom_point(data = tmean, aes(mn.lon, mn.lat, group = NA, fill = within.var), size = 2, shape = 21) +
geom_point(data = tmean[ex2,], aes(mn.lon, mn.lat, group = NA, fill = within.var), size = 4, shape = 21) +
scale_fill_viridis_c(name = "Variance \n (km)") + theme(axis.title = element_blank(), panel.background = element_rect(fill = "gray93"),
panel.grid.major = element_line(colour= "white"), legend.position = c(.68,.33)) +
coord_fixed(xlim = c(-11, -1),
ylim = c(37, 44),ratio = 1.2)
png("FigS3.png", width = 20, height = 16, units = "cm", res = 400)
cowplot::plot_grid(SFa,SFb,SFc,SFd, nrow = 2)
dev.off()
|
# options(error=recover)
# options(show.error.locations=TRUE)
library(shiny)
library(ggplot2)
library(reshape2)
library(scales)
library(grid)
library(gridExtra)
library(plyr)
library(Cairo)
#upload size for file set to 40mb
options(shiny.maxRequestSize = 40*1024^2)
Sys.setenv(TZ='EST')
DEFAULT.INTEREVENT <- 12
DEFAULT.MIN.TOTAL <- 0.1
#function to render the ecdf plot
plot_ecdf <- function(d, column.label, column, xaxis.range, anno.num, anno.txt,anno.ecdf) {
#sets up the y axis to have breaks at intervals of 50
flow.y.axis <- local({
interval <- 5
axis.start <- 0
axis.end <- ((max(d[[column]], na.rm=TRUE) + interval) %/% interval) * interval
scale_y_continuous(breaks=seq(axis.start, axis.end, interval))
})
#sets the x axis to the range of the values
x.axis.size <- diff(xaxis.range)
#sets the break size for the x axis based on the range of values
x.axis.break.size <- if (x.axis.size > 50) 10 else if (x.axis.size > 25) 5 else 1
#subsets the data for the plot to only include the percentage of data within the user-determined min and max ranges
d <- d[d$quantile >= xaxis.range[1] & d$quantile <= xaxis.range[2], ]
# max.observation <- by(d, INDICES=list(d$source), FUN=function(g) {
# g[which.max(g$quantile), ]
# })
# max.observation <- do.call(rbind, max.observation)
max.observation <- d[which.max(d$quantile),]
ylab.text <- paste(column.label, '(ft, NAVD88)')
#renders the ecdf plot
p <- ggplot(data=d, aes_string(x='quantile', y=column, shape='source', colour='source')) +
scale_x_continuous(breaks=seq(xaxis.range[1], xaxis.range[2], by=x.axis.break.size), limits=xaxis.range) +
flow.y.axis +
geom_step(size=1) +
geom_point() +
geom_point(data=max.observation, size=4) +
annotate('text', x=max.observation$quantile, y=max.observation[[column]],
label=round(max.observation[[column]], 2), size=4, vjust=-.7, fontface='bold') + # vjust .7
theme_bw() +
theme(legend.position='bottom',
legend.title=element_blank(),
axis.title.y=element_text(vjust=-.1),
axis.title.x=element_text(vjust=-.1),
plot.margin=unit(c(0,1,1,1), 'lines')
) +
guides(col=guide_legend(ncol=1)) +
ylab(ylab.text) +
xlab('Percentage of time less than equal corresponding level')
#add annotation lines
if (length(anno.num) > 0) {
annotations <- data.frame(yintercept=anno.num, x=rep(min(d$quantile), length(anno.num)), labels=paste(anno.txt, anno.num, 'ft (',paste0(round(100*anno.ecdf, 2), "%"),')'))
p <- p +geom_hline(data=annotations, aes(yintercept=yintercept)) +
annotate('text', x=annotations$x, y=annotations$yintercept, label=annotations$labels, size=4, vjust=-1, hjust=0,fontface='bold')
}
return(p)
}
|
/global.r
|
no_license
|
codingbanana/CDFplot
|
R
| false
| false
| 2,919
|
r
|
# options(error=recover)
# options(show.error.locations=TRUE)
library(shiny)
library(ggplot2)
library(reshape2)
library(scales)
library(grid)
library(gridExtra)
library(plyr)
library(Cairo)
#upload size for file set to 40mb
options(shiny.maxRequestSize = 40*1024^2)
Sys.setenv(TZ='EST')
DEFAULT.INTEREVENT <- 12
DEFAULT.MIN.TOTAL <- 0.1
#function to render the ecdf plot
plot_ecdf <- function(d, column.label, column, xaxis.range, anno.num, anno.txt,anno.ecdf) {
#sets up the y axis to have breaks at intervals of 50
flow.y.axis <- local({
interval <- 5
axis.start <- 0
axis.end <- ((max(d[[column]], na.rm=TRUE) + interval) %/% interval) * interval
scale_y_continuous(breaks=seq(axis.start, axis.end, interval))
})
#sets the x axis to the range of the values
x.axis.size <- diff(xaxis.range)
#sets the break size for the x axis based on the range of values
x.axis.break.size <- if (x.axis.size > 50) 10 else if (x.axis.size > 25) 5 else 1
#subsets the data for the plot to only include the percentage of data within the user-determined min and max ranges
d <- d[d$quantile >= xaxis.range[1] & d$quantile <= xaxis.range[2], ]
# max.observation <- by(d, INDICES=list(d$source), FUN=function(g) {
# g[which.max(g$quantile), ]
# })
# max.observation <- do.call(rbind, max.observation)
max.observation <- d[which.max(d$quantile),]
ylab.text <- paste(column.label, '(ft, NAVD88)')
#renders the ecdf plot
p <- ggplot(data=d, aes_string(x='quantile', y=column, shape='source', colour='source')) +
scale_x_continuous(breaks=seq(xaxis.range[1], xaxis.range[2], by=x.axis.break.size), limits=xaxis.range) +
flow.y.axis +
geom_step(size=1) +
geom_point() +
geom_point(data=max.observation, size=4) +
annotate('text', x=max.observation$quantile, y=max.observation[[column]],
label=round(max.observation[[column]], 2), size=4, vjust=-.7, fontface='bold') + # vjust .7
theme_bw() +
theme(legend.position='bottom',
legend.title=element_blank(),
axis.title.y=element_text(vjust=-.1),
axis.title.x=element_text(vjust=-.1),
plot.margin=unit(c(0,1,1,1), 'lines')
) +
guides(col=guide_legend(ncol=1)) +
ylab(ylab.text) +
xlab('Percentage of time less than equal corresponding level')
#add annotation lines
if (length(anno.num) > 0) {
annotations <- data.frame(yintercept=anno.num, x=rep(min(d$quantile), length(anno.num)), labels=paste(anno.txt, anno.num, 'ft (',paste0(round(100*anno.ecdf, 2), "%"),')'))
p <- p +geom_hline(data=annotations, aes(yintercept=yintercept)) +
annotate('text', x=annotations$x, y=annotations$yintercept, label=annotations$labels, size=4, vjust=-1, hjust=0,fontface='bold')
}
return(p)
}
|
# jdk 받고
#콘솔창에
#install.packages("remotes")
#설치되면
#remotes::install_github('haven-jeon/KoNLP', upgrade = "never", INSTALL_opts=c("--no-multiarch"))
#문서폴더에 잠이 오질 않네요.txt 파일 받아서 넣으세요
install.packages("stringi")
getwd() #현재 저장된 작업경로
install.packages("wordcloud2") #패키지 설치
Sys.setenv(JAVA_HOME='C:\\Program Files\\Java\\jdk1.8.0_271') #':\\program files\\java\\jdk1.8.0_191')
library(KoNLP) #라이브러리 불러오기
library(wordcloud2)#라이브러리 불러오기
useSejongDic() #한국어 사전 불러오기
텍스트파일자체<-file("잠이 오질 않네요.txt", encoding = "UTF-8") #노래.txt 파일 utf-8 형식으로 불러오기
텍스트파일내용 <- readLines(텍스트파일자체) # 줄 별로 읽기
close(텍스트파일자체)#파일 종료
명사만<-sapply(텍스트파일내용, extractNoun, USE.NAMES = F) #명사만 고르기
명사만 <- unlist(명사만)#데이터 백터화
head(명사만, 30)#30개만 출력
중복횟수 <- table(명사만) #위의 '명사만'이라는 변수(데이터)를 같은단어를
#기준으로 합쳐서 몇번 중복됬는지 계산
head(sort(중복횟수, decreasing = T), 30) #빈도수가 많은 순으로 30개 출력
#워드클라우드 생성
wordcount2 <- head(sort(중복횟수, decreasing=T),30)
wordcloud2(wordcount2,gridSize=10,size=1,shape="circle")
|
/SourceCode/1일차/20922 천정윤.R
|
no_license
|
cksldfj/SD_R
|
R
| false
| false
| 1,455
|
r
|
# jdk 받고
#콘솔창에
#install.packages("remotes")
#설치되면
#remotes::install_github('haven-jeon/KoNLP', upgrade = "never", INSTALL_opts=c("--no-multiarch"))
#문서폴더에 잠이 오질 않네요.txt 파일 받아서 넣으세요
install.packages("stringi")
getwd() #현재 저장된 작업경로
install.packages("wordcloud2") #패키지 설치
Sys.setenv(JAVA_HOME='C:\\Program Files\\Java\\jdk1.8.0_271') #':\\program files\\java\\jdk1.8.0_191')
library(KoNLP) #라이브러리 불러오기
library(wordcloud2)#라이브러리 불러오기
useSejongDic() #한국어 사전 불러오기
텍스트파일자체<-file("잠이 오질 않네요.txt", encoding = "UTF-8") #노래.txt 파일 utf-8 형식으로 불러오기
텍스트파일내용 <- readLines(텍스트파일자체) # 줄 별로 읽기
close(텍스트파일자체)#파일 종료
명사만<-sapply(텍스트파일내용, extractNoun, USE.NAMES = F) #명사만 고르기
명사만 <- unlist(명사만)#데이터 백터화
head(명사만, 30)#30개만 출력
중복횟수 <- table(명사만) #위의 '명사만'이라는 변수(데이터)를 같은단어를
#기준으로 합쳐서 몇번 중복됬는지 계산
head(sort(중복횟수, decreasing = T), 30) #빈도수가 많은 순으로 30개 출력
#워드클라우드 생성
wordcount2 <- head(sort(중복횟수, decreasing=T),30)
wordcloud2(wordcount2,gridSize=10,size=1,shape="circle")
|
#Get BestGrad5 from smoothing results
getBestGrad5 <- function(AgeRatioScore_orig, AgeRatioScore_mav2, EduYrs, subgroup = c("adult","child")) {
if (subgroup == "adult") {
# select whether to use the straight 5-year data
# or use Mav2 or mav4 of the 5-year data
BestGrad5 <- NA
if (AgeRatioScore_orig < 4 ) {
BestGrad5 <- 1
} else {
if (EduYrs >= 4) {
BestGrad5 <-1
} else {
if (AgeRatioScore_mav2 < 4) {
BestGrad5 <- 2
} else {
BestGrad5 <- 4
}
}
}
}
if (subgroup == "child") {
BestGrad5 <- NA
if (AgeRatioScore_orig < 4 ) {
BestGrad5 <- 1
} else {
if (EduYrs >= 4) {
BestGrad5 <-1
} else {
BestGrad5 <- 2
}
}
}
return(BestGrad5)
}
|
/census_workflow_getBestGrad5.R
|
no_license
|
Shelmith-Kariuki/ddharmony
|
R
| false
| false
| 797
|
r
|
#Get BestGrad5 from smoothing results
getBestGrad5 <- function(AgeRatioScore_orig, AgeRatioScore_mav2, EduYrs, subgroup = c("adult","child")) {
if (subgroup == "adult") {
# select whether to use the straight 5-year data
# or use Mav2 or mav4 of the 5-year data
BestGrad5 <- NA
if (AgeRatioScore_orig < 4 ) {
BestGrad5 <- 1
} else {
if (EduYrs >= 4) {
BestGrad5 <-1
} else {
if (AgeRatioScore_mav2 < 4) {
BestGrad5 <- 2
} else {
BestGrad5 <- 4
}
}
}
}
if (subgroup == "child") {
BestGrad5 <- NA
if (AgeRatioScore_orig < 4 ) {
BestGrad5 <- 1
} else {
if (EduYrs >= 4) {
BestGrad5 <-1
} else {
BestGrad5 <- 2
}
}
}
return(BestGrad5)
}
|
##' @title Creates grid over the study area.
##'
##' @description If the argument thegrid of DetectClustersModel() is null, this function is
##' used to create a rectangular grid with a given step.
##' If step is NULL the step used is equal to 0.2*radius.
##' The grid contains the coordinates of the centers of the clusters explored.
##'
##' @param stfdf spatio-temporal class object containing the data.
##' @param radius maximum radius of the clusters.
##' @param step step of the grid.
##'
##' @return two columns matrix where each row represents a point of the grid.
##'
CreateGridDClusterm <- function(stfdf, radius, step) {
# Return: thegrid
if(is.null(step)) {
step <- .2 * radius
}
coordx <- (coordinates(stfdf@sp))[,1]
coordy <- (coordinates(stfdf@sp))[,2]
xgrid <- seq(min(coordx), max(coordx), by = step)
ygrid <- seq(min(coordy), max(coordy), by = step)
xlen <- length(xgrid)
ylen <- length(ygrid)
npoints <- xlen * ylen
thegrid < -matrix(rep(NA, 2 * npoints) , ncol = 2)
thegrid[, 1] <- rep(xgrid, times = ylen)
thegrid[, 2] <- rep(ygrid, each = xlen)
return(thegrid)
}
##' @title Obtains the clusters with the maximum log-likelihood ratio or minimum DIC
##' for each center and start and end dates.
##'
##' @description This function explores all possible clusters changing their center and start
##' and end dates. For each center and time periods, it obtains the cluster with
##' the maximum log-likelihood ratio or minimum DIC so that the maximum fraction
##' of the total population inside the cluster is less than fractpop,
##' and the maximum distance to the center is less than radius.
##'
##' @param thegrid grid with the coordinates of the centers of the clusters
##' explored.
##' @param CalcStatClusterGivenCenter function to obtain the cluster with the
##' maximum log-likelihood ratio of all the clusters with the same center and
##' start and end dates
##' @param stfdf spatio-temporal class object containing the data.
##' @param rr square of the maximum radius of the cluster.
##' @param typeCluster type of clusters to be detected. "ST" for spatio-temporal
##' clusters or "S" spatial clusters.
##' @param sortDates sorted vector of the times where disease cases occurred.
##' @param idMinDateCluster index of the closest date to the start date of the
##' cluster in the vector sortDates
##' @param idMaxDateCluster index of the closest date to the end date of the
##' cluster in the vector sortDates
##' @param fractpop maximum fraction of the total population inside the cluster.
##' @param model0 Initial model (including covariates).
##' This can be "glm" for generalized linear models (\link{glm} {stats}),
##' "glmer" for generalized linear mixed model (\link{glmer} {lme4}),
##' "zeroinfl" for zero-inflated models (zeroinfl), or
##' "inla" for generalized linear, generalized linear mixed or zero-inflated models fitted with \code{inla}.
##' @param ClusterSizeContribution Variable used to check the fraction of the
##' population at risk in the cluster
##' @param numCPUS Number of cpus used when using parallel to run the method.
##' If parallel is not used numCPUS is NULL.
##'
##' @return data frame with information of the clusters with the maximum
##' log-likelihood ratio or minimum DIC for each center and start and end dates.
##' It contains the coordinates of the center, the size, the start and end dates,
##' the log-likelihood ratio or DIC, the p-value and the risk of each of the clusters.
##'
CalcStatsAllClusters <- function(thegrid, CalcStatClusterGivenCenter, stfdf,
rr, typeCluster, sortDates, idMinDateCluster, idMaxDateCluster, fractpop,
model0, ClusterSizeContribution, numCPUS){
# Temporal dimension here, spatial dimension inside glmAndZIP.iscluster
if(typeCluster == "ST"){
statsAllClusters <- NULL
for (i in idMinDateCluster:idMaxDateCluster) {
for (j in i:idMaxDateCluster) {
if(is.null(numCPUS)) {
statClusterGivenCenter <- apply(thegrid, 1,
CalcStatClusterGivenCenter, stfdf, rr,
minDateCluster = sortDates[i],
maxDateCluster = sortDates[j], fractpop, model0, ClusterSizeContribution)
} else {
#SNOWFALL
#FIXME: REmove this commented code
# statClusterGivenCenter <- sfApply(thegrid, 1,
# CalcStatClusterGivenCenter, stfdf, rr,
# minDateCluster = sortDates[i], maxDateCluster = sortDates[j],
# fractpop, model0)
#PARALLEL
statClusterGivenCenter <- parApply(cl = NULL, thegrid, 1,
CalcStatClusterGivenCenter, stfdf, rr,
minDateCluster = sortDates[i], maxDateCluster = sortDates[j],
fractpop, model0, ClusterSizeContribution)
}
statsAllClusters <-
rbind(statsAllClusters, do.call(rbind, statClusterGivenCenter))
print(c(i, j))
}
}
}
if(typeCluster == "S") {
i <- idMinDateCluster
j <- idMaxDateCluster
if(is.null(numCPUS)) {
statsAllClusters <- apply(thegrid, 1, CalcStatClusterGivenCenter, stfdf,
rr, minDateCluster = sortDates[i], maxDateCluster = sortDates[j],
fractpop, model0, ClusterSizeContribution)
} else {
#SNOWFALL
#FIXME: Remove this commented code
# statsAllClusters <- sfApply(thegrid, 1, CalcStatClusterGivenCenter, stfdf,
# rr, minDateCluster = sortDates[i], maxDateCluster = sortDates[j],
# fractpop, model0)
#PARALLEL
statsAllClusters <- parApply(cl = NULL, thegrid, 1,
CalcStatClusterGivenCenter, stfdf,
rr, minDateCluster = sortDates[i], maxDateCluster = sortDates[j],
fractpop, model0, ClusterSizeContribution)
}
statsAllClusters <- do.call(rbind, statsAllClusters)
print(c(i, j))
}
names(statsAllClusters) <- c("x", "y", "size", "minDateCluster",
"maxDateCluster", "statistic", "pvalue", "risk")
return(as.data.frame(statsAllClusters))
}
##' @title Calls the function to obtain the cluster with the maximum log-likelihood ratio
##' or minimum DIC of all the clusters with the same center and start and end dates.
##'
##' @description This function orders the regions according to the distance to a given center
##' and selects the regions with distance to the center less than sqrt(rr).
##' Then it calls glmAndZIP.iscluster() to obtain the cluster with the maximum
##' log-likelihood ratio or minimum DIC of all the clusters with the same center
##' and start and end dates, and where the maximum fraction of the total
##' population inside the cluster is less than fractpop.
##'
##' @param point vector with the coordinates of the center of the cluster.
##' @param stfdf spatio-temporal class object containing the data.
##' @param rr square of the maximum radius of the cluster.
##' @param minDateCluster start date of the cluster.
##' @param maxDateCluster end date of the cluster.
##' @param fractpop maximum fraction of the total population inside the cluster.
##' @param model0 Initial model (including covariates).
##' @param ClusterSizeContribution Variable used to check the fraction of the
##' population at risk in the cluster
##' This can be "glm" for generalized linear models (\link{glm} {stats}),
##' "glmer" for generalized linear mixed model (\link{glmer} {lme4}),
##' "zeroinfl" for zero-inflated models (zeroinfl), or
##' "inla" for generalized linear, generalized linear mixed or zero-inflated models fitted with \code{inla}.
##'
##' @return vector containing the coordinates of the center, the size,
##' the start and end dates, the log-likelihood ratio or DIC, the p-value and
##' the risk of the cluster with the maximum log-likelihood ratio or minimum DIC.
##'
CalcStatClusterGivenCenter <- function(point, stfdf, rr, minDateCluster,
maxDateCluster, fractpop, model0, ClusterSizeContribution) {
coordx <- coordinates(stfdf@sp)[, 1]
coordy <- coordinates(stfdf@sp)[, 2]
xd <- (coordx - point[1])
yd <- (coordy - point[2])
dist <- xd * xd+ yd * yd
#
idx <- (dist <= rr)
idxorder <- order(dist)
# Only the regions with distance less than radius can be part of the cluster
idxorder <- idxorder[idx[idxorder]]
cl <- glmAndZIP.iscluster(stfdf = stfdf, idxorder = idxorder,
minDateCluster, maxDateCluster, fractpop, model0, ClusterSizeContribution)
return( cbind(data.frame(x = point[1], y = point[2]), cl) )
}
|
/R/Functions2.R
|
no_license
|
cran/DClusterm
|
R
| false
| false
| 8,606
|
r
|
##' @title Creates grid over the study area.
##'
##' @description If the argument thegrid of DetectClustersModel() is null, this function is
##' used to create a rectangular grid with a given step.
##' If step is NULL the step used is equal to 0.2*radius.
##' The grid contains the coordinates of the centers of the clusters explored.
##'
##' @param stfdf spatio-temporal class object containing the data.
##' @param radius maximum radius of the clusters.
##' @param step step of the grid.
##'
##' @return two columns matrix where each row represents a point of the grid.
##'
CreateGridDClusterm <- function(stfdf, radius, step) {
# Return: thegrid
if(is.null(step)) {
step <- .2 * radius
}
coordx <- (coordinates(stfdf@sp))[,1]
coordy <- (coordinates(stfdf@sp))[,2]
xgrid <- seq(min(coordx), max(coordx), by = step)
ygrid <- seq(min(coordy), max(coordy), by = step)
xlen <- length(xgrid)
ylen <- length(ygrid)
npoints <- xlen * ylen
thegrid < -matrix(rep(NA, 2 * npoints) , ncol = 2)
thegrid[, 1] <- rep(xgrid, times = ylen)
thegrid[, 2] <- rep(ygrid, each = xlen)
return(thegrid)
}
##' @title Obtains the clusters with the maximum log-likelihood ratio or minimum DIC
##' for each center and start and end dates.
##'
##' @description This function explores all possible clusters changing their center and start
##' and end dates. For each center and time periods, it obtains the cluster with
##' the maximum log-likelihood ratio or minimum DIC so that the maximum fraction
##' of the total population inside the cluster is less than fractpop,
##' and the maximum distance to the center is less than radius.
##'
##' @param thegrid grid with the coordinates of the centers of the clusters
##' explored.
##' @param CalcStatClusterGivenCenter function to obtain the cluster with the
##' maximum log-likelihood ratio of all the clusters with the same center and
##' start and end dates
##' @param stfdf spatio-temporal class object containing the data.
##' @param rr square of the maximum radius of the cluster.
##' @param typeCluster type of clusters to be detected. "ST" for spatio-temporal
##' clusters or "S" spatial clusters.
##' @param sortDates sorted vector of the times where disease cases occurred.
##' @param idMinDateCluster index of the closest date to the start date of the
##' cluster in the vector sortDates
##' @param idMaxDateCluster index of the closest date to the end date of the
##' cluster in the vector sortDates
##' @param fractpop maximum fraction of the total population inside the cluster.
##' @param model0 Initial model (including covariates).
##' This can be "glm" for generalized linear models (\link{glm} {stats}),
##' "glmer" for generalized linear mixed model (\link{glmer} {lme4}),
##' "zeroinfl" for zero-inflated models (zeroinfl), or
##' "inla" for generalized linear, generalized linear mixed or zero-inflated models fitted with \code{inla}.
##' @param ClusterSizeContribution Variable used to check the fraction of the
##' population at risk in the cluster
##' @param numCPUS Number of cpus used when using parallel to run the method.
##' If parallel is not used numCPUS is NULL.
##'
##' @return data frame with information of the clusters with the maximum
##' log-likelihood ratio or minimum DIC for each center and start and end dates.
##' It contains the coordinates of the center, the size, the start and end dates,
##' the log-likelihood ratio or DIC, the p-value and the risk of each of the clusters.
##'
CalcStatsAllClusters <- function(thegrid, CalcStatClusterGivenCenter, stfdf,
rr, typeCluster, sortDates, idMinDateCluster, idMaxDateCluster, fractpop,
model0, ClusterSizeContribution, numCPUS){
# Temporal dimension here, spatial dimension inside glmAndZIP.iscluster
if(typeCluster == "ST"){
statsAllClusters <- NULL
for (i in idMinDateCluster:idMaxDateCluster) {
for (j in i:idMaxDateCluster) {
if(is.null(numCPUS)) {
statClusterGivenCenter <- apply(thegrid, 1,
CalcStatClusterGivenCenter, stfdf, rr,
minDateCluster = sortDates[i],
maxDateCluster = sortDates[j], fractpop, model0, ClusterSizeContribution)
} else {
#SNOWFALL
#FIXME: REmove this commented code
# statClusterGivenCenter <- sfApply(thegrid, 1,
# CalcStatClusterGivenCenter, stfdf, rr,
# minDateCluster = sortDates[i], maxDateCluster = sortDates[j],
# fractpop, model0)
#PARALLEL
statClusterGivenCenter <- parApply(cl = NULL, thegrid, 1,
CalcStatClusterGivenCenter, stfdf, rr,
minDateCluster = sortDates[i], maxDateCluster = sortDates[j],
fractpop, model0, ClusterSizeContribution)
}
statsAllClusters <-
rbind(statsAllClusters, do.call(rbind, statClusterGivenCenter))
print(c(i, j))
}
}
}
if(typeCluster == "S") {
i <- idMinDateCluster
j <- idMaxDateCluster
if(is.null(numCPUS)) {
statsAllClusters <- apply(thegrid, 1, CalcStatClusterGivenCenter, stfdf,
rr, minDateCluster = sortDates[i], maxDateCluster = sortDates[j],
fractpop, model0, ClusterSizeContribution)
} else {
#SNOWFALL
#FIXME: Remove this commented code
# statsAllClusters <- sfApply(thegrid, 1, CalcStatClusterGivenCenter, stfdf,
# rr, minDateCluster = sortDates[i], maxDateCluster = sortDates[j],
# fractpop, model0)
#PARALLEL
statsAllClusters <- parApply(cl = NULL, thegrid, 1,
CalcStatClusterGivenCenter, stfdf,
rr, minDateCluster = sortDates[i], maxDateCluster = sortDates[j],
fractpop, model0, ClusterSizeContribution)
}
statsAllClusters <- do.call(rbind, statsAllClusters)
print(c(i, j))
}
names(statsAllClusters) <- c("x", "y", "size", "minDateCluster",
"maxDateCluster", "statistic", "pvalue", "risk")
return(as.data.frame(statsAllClusters))
}
##' @title Calls the function to obtain the cluster with the maximum log-likelihood ratio
##' or minimum DIC of all the clusters with the same center and start and end dates.
##'
##' @description This function orders the regions according to the distance to a given center
##' and selects the regions with distance to the center less than sqrt(rr).
##' Then it calls glmAndZIP.iscluster() to obtain the cluster with the maximum
##' log-likelihood ratio or minimum DIC of all the clusters with the same center
##' and start and end dates, and where the maximum fraction of the total
##' population inside the cluster is less than fractpop.
##'
##' @param point vector with the coordinates of the center of the cluster.
##' @param stfdf spatio-temporal class object containing the data.
##' @param rr square of the maximum radius of the cluster.
##' @param minDateCluster start date of the cluster.
##' @param maxDateCluster end date of the cluster.
##' @param fractpop maximum fraction of the total population inside the cluster.
##' @param model0 Initial model (including covariates).
##' @param ClusterSizeContribution Variable used to check the fraction of the
##' population at risk in the cluster
##' This can be "glm" for generalized linear models (\link{glm} {stats}),
##' "glmer" for generalized linear mixed model (\link{glmer} {lme4}),
##' "zeroinfl" for zero-inflated models (zeroinfl), or
##' "inla" for generalized linear, generalized linear mixed or zero-inflated models fitted with \code{inla}.
##'
##' @return vector containing the coordinates of the center, the size,
##' the start and end dates, the log-likelihood ratio or DIC, the p-value and
##' the risk of the cluster with the maximum log-likelihood ratio or minimum DIC.
##'
CalcStatClusterGivenCenter <- function(point, stfdf, rr, minDateCluster,
maxDateCluster, fractpop, model0, ClusterSizeContribution) {
coordx <- coordinates(stfdf@sp)[, 1]
coordy <- coordinates(stfdf@sp)[, 2]
xd <- (coordx - point[1])
yd <- (coordy - point[2])
dist <- xd * xd+ yd * yd
#
idx <- (dist <= rr)
idxorder <- order(dist)
# Only the regions with distance less than radius can be part of the cluster
idxorder <- idxorder[idx[idxorder]]
cl <- glmAndZIP.iscluster(stfdf = stfdf, idxorder = idxorder,
minDateCluster, maxDateCluster, fractpop, model0, ClusterSizeContribution)
return( cbind(data.frame(x = point[1], y = point[2]), cl) )
}
|
## Complicated gastritis/duodenitis, re-preparing GBD 2017 data with GBD 2019 data-preparation methods
rm(list=ls())
## Set up working environment
if (Sys.info()["sysname"] == "Linux") {
j <- "FILEPATH_J"
h <- "FILEPATH_H"
l <- "FILEPATH_L"
} else {
j <- "FILEPATH_J"
h <- "FILEPATH_H"
l <- "FILEPATH_L"
}
my.lib <- paste0(h, "R/")
central_fxn <- paste0(j, "FILEPATH_CENTRAL_FXNS")
out_path <- paste0(j, "FILEPATH_OUT")
date <- gsub("-", "_", Sys.Date())
pacman::p_load(data.table, ggplot2, openxlsx, readxl, readr, RMySQL, stringr, tidyr, plyr, dplyr, mvtnorm)
install.packages("msm", lib = my.lib)
library("msm", lib.loc = my.lib)
## Source central functions
source(paste0(central_fxn, "get_age_metadata.R"))
source(paste0(central_fxn, "get_location_metadata.R"))
source(paste0(central_fxn, "save_bundle_version.R"))
source(paste0(central_fxn, "get_bundle_version.R"))
source(paste0(central_fxn, "save_crosswalk_version.R"))
source(paste0(central_fxn, "get_bundle_data.R"))
source(paste0(central_fxn, "upload_bundle_data.R"))
## Source other functions
source(paste0(h, "code/getrawdata.R"))
source(paste0(h, "code/sexratio.R"))
source(paste0(h, "code/datascatters.R"))
source(paste0(h, "code/samplematching_wageaggregation.R"))
source(paste0(h, "code/prepmatchesforMRBRT.R"))
source(paste0(j, "FILEPATH/mr_brt_functions.R"))
source(paste0(h, "code/applycrosswalks.R"))
source(paste0(h, "code/outlierbyMAD.R"))
## Get metadata
loc_dt <- get_location_metadata(location_set_id = 22)
all_fine_ages <- as.data.table(get_age_metadata(age_group_set_id=12))
not_babies <- all_fine_ages[!age_group_id %in% c(2:4)]
not_babies[, age_end := age_group_years_end-1]
all_fine_babies <- as.data.table(get_age_metadata(age_group_set_id=18))
group_babies <- all_fine_babies[age_group_id %in% c(28)]
age_dt <- rbind(not_babies, group_babies, fill=TRUE)
age_dt[, age_start := age_group_years_start]
age_dt[age_group_id==28, age_end := 0.999]
## Get raw data and create covariates for marking different groups of clinical informatics data, subset to the measure being crosswalked
#Use argument bundle_version = 0 if you want to create a new bundle version
compgd_dt <- get_raw_data(3200, 'step2', bundle_version = 0)
#Bundle version ID: 7943
## Store bundle columns for later
bundle_columns <- names(compgd_dt)
## List of cvs that are useful tags for manipulating data, but not bias variables to crosswalk
cv_manip <- c("cv_marketscan_data")
## List of cvs that positively identify reference data
cv_ref <- c("cv_admin")
## Combined list of cvs to drop in match-finding (but keep in raw data)
cv_drop <- c(cv_manip, cv_ref)
## List of cvs that positively identify components of alternative case defs
cv_alts <- c("cv_marketscan_2000", "cv_marketscan_other")
## Fill out cases, sample size, standard error using Epi Uploader formulae; standardize the naming of case definitions; add IDs for higher-order geography
get_cases_sample_size(compgd_dt)
get_se(compgd_dt)
## Add variables for use in data-prep that will later need to be dropped to pass Uploader validations
compgd_dt <- get_definitions(compgd_dt)
compgd_dt <- merge(compgd_dt, loc_dt[ , c("location_id", "super_region_id", "region_id")], by = "location_id")
## Prep raw data for match-finding, crosswalking and transformation
compgd_findmatch <- copy(compgd_dt)
compgd_findmatch <- subnat_to_nat(compgd_findmatch)
compgd_findmatch <- calc_year(compgd_findmatch)
scatter_bydef(compgd_findmatch, upper = 0.025)
## Find matches
age_dts <- get_age_combos(compgd_findmatch)
compgd_findmatch <- age_dts[[1]]
age_match_dt <- age_dts[[2]]
pairs <- combn(compgd_findmatch[, unique(definition)], 2)
#combn function generates all combinations of the elements of vector x (in this case, a vector of the unique values found in the definition column of the data.table compgd_findmatch) taken m (in this case, 2) at a time
matches <- rbindlist(lapply(1:dim(pairs)[2], function(x) get_matches(n = x, pair_dt = compgd_findmatch)))
matches[ , nid_comb:=paste0(nid, nid2)]
## Write output of match-finding and ratio calculation for human review
write.xlsx(matches, paste0(out_path, "/3200_matches_", date, ".xlsx"), col.names=TRUE)
## Prep and perform meta-regression
#***comment out reading in the data table if you are running full pipeline uninterrupted, bring back in if running MR-BRT on previously prepped data
#ratios <- as.data.table(read_excel(paste0(out_path, "/3200_matches_", date, ".xlsx")))
## Calculate ratios and their standard errors, drop unneeded variables, calculate dummies, visualize ratios, take their logs and calc standard error of their logs
#Log-ratios are no longer the preferred input variable for MR-BRT modeling, but useful visualization
ratios <- calc_ratio(matches)
#This line is for dropping matches with infinite or zero values for the ratios
ratios <- drop_stuff(ratios)
ratios <- add_compdumminter(ratios)
histo_ratios(ratios, bins = 0.2)
lratios <- logtrans_ratios(ratios)
## Calculate logits, logit differences, and standard errors to go into meta-regression
logit_differences <- calc_logitdf(lratios)
## Take list of cvs for components of alternative case definitions, drop the alts that don't have enough direct or indirect matches to reference, and make into a list MR-BRT will understand
# cv_nomatch <- c(***)
# cv_alts <- setdiff(cv_alts, cv_nomatch)
for(c in cv_alts){
cov <- cov_info(c, "X", "network")
if(c == cv_alts[1]){
cov_list <- list(cov)
} else {
cov_list <- c(cov_list, list(cov))
}
}
## Ran various MR-BRT models to decide on best fit
#Final choice of model to carry forward
compgd_fit <- run_mr_brt(
output_dir = paste0(j, "FILEPATH"),
model_label = paste0("compgd_xwmodel_", date),
data = logit_differences,
mean_var = "diff_logit",
se_var = "se_diff_logit",
covs = cov_list,
remove_x_intercept = TRUE,
method = "remL",
overwrite_previous = TRUE,
study_id = "nid_comb"
)
## Get predicted coefficients with all sources of uncertainty, predict for training data and then for all data needing crosswalks (these sets of predictions will be the same if there are no continuous covariates or multi-dimensional case-definitions)
# Select data that need and can be crosswalked and mark parent seq and seq
compgd_transform <- compgd_dt[definition!="reference", ]
if ("crosswalk_parent_seq" %in% names(compgd_transform)) {
compgd_transform[is.na(crosswalk_parent_seq), `:=` (crosswalk_parent_seq = seq, seq = NA)]
compgd_transform[!is.na(crosswalk_parent_seq), seq = NA]
} else {
compgd_transform[, `:=` (crosswalk_parent_seq = seq, seq = NA)]
}
# Predict crosswalk for training data
compgd_trainingprediction <- unique(predict_xw(compgd_fit, "logit_dif"), by = cv_alts)
# Predict crosswalk for raw data
compgd_predictnew <- unique(predict_xw(compgd_fit, "logit_dif", compgd_transform), by = cv_alts)
# Transform data that need and can be crosswalked
compgd_transform <- transform_altdt(compgd_transform, compgd_predictnew, "logit_dif")
# Bind reference data and crosswalked data; make scatter-plot
compgd_dt2 <- rbind(compgd_transform, compgd_dt[definition=="reference", ], fill=TRUE)
scatter_bydef(compgd_dt2, raw = FALSE, upper = 0.025)
# Here we would subset to rows that were group-reviewed out or that could not be crosswalked, but none exist for this bundle, so just making a copy for consistent naming in steps below
compgd_modeling <- copy(compgd_dt2)
## Prep and upload transformed data as a crosswalk version for this bundle
#Choose how many MAD above and below median to outlier, defaults to 2 if no numb_mad argument supplied, if numb_mad=0 will remove series with age-standardized mean of 0, but nothing else
compgd_outliered <- auto_outlier(compgd_modeling)
scatter_markout(compgd_outliered, upper = 0.025)
columns_keep <- unique(c(bundle_columns, "crosswalk_parent_seq"))
columns_drop <- c("cv_admin", "cv_marketscan_2000", "cv_marketscan_other")
columns_keep <- setdiff(columns_keep, columns_drop)
compgd_final <- compgd_outliered[, ..columns_keep]
#Write the data to an Excel file in order to upload it
upload_path <- paste0(j, "FILEPATH/3200_", date, ".xlsx")
write.xlsx(compgd_final, upload_path, col.names=TRUE, sheetName = "extraction")
#Then add a description and upload
description <- "Same approach as XWV 866, but re-pulled clinical data to get corrected SGP claims and fixed my own automated outlier fxn"
compgd_upload_xw <- save_crosswalk_version(bundle_version_id=7943, data_filepath=upload_path, description = description)
# Crosswalk version ID 3089
|
/gbd_2019/nonfatal_code/digest_gastritis/prepolddata_3200_complicated_gastritis.R
|
no_license
|
Nermin-Ghith/ihme-modeling
|
R
| false
| false
| 8,621
|
r
|
## Complicated gastritis/duodenitis, re-preparing GBD 2017 data with GBD 2019 data-preparation methods
rm(list=ls())
## Set up working environment
if (Sys.info()["sysname"] == "Linux") {
j <- "FILEPATH_J"
h <- "FILEPATH_H"
l <- "FILEPATH_L"
} else {
j <- "FILEPATH_J"
h <- "FILEPATH_H"
l <- "FILEPATH_L"
}
my.lib <- paste0(h, "R/")
central_fxn <- paste0(j, "FILEPATH_CENTRAL_FXNS")
out_path <- paste0(j, "FILEPATH_OUT")
date <- gsub("-", "_", Sys.Date())
pacman::p_load(data.table, ggplot2, openxlsx, readxl, readr, RMySQL, stringr, tidyr, plyr, dplyr, mvtnorm)
install.packages("msm", lib = my.lib)
library("msm", lib.loc = my.lib)
## Source central functions
source(paste0(central_fxn, "get_age_metadata.R"))
source(paste0(central_fxn, "get_location_metadata.R"))
source(paste0(central_fxn, "save_bundle_version.R"))
source(paste0(central_fxn, "get_bundle_version.R"))
source(paste0(central_fxn, "save_crosswalk_version.R"))
source(paste0(central_fxn, "get_bundle_data.R"))
source(paste0(central_fxn, "upload_bundle_data.R"))
## Source other functions
source(paste0(h, "code/getrawdata.R"))
source(paste0(h, "code/sexratio.R"))
source(paste0(h, "code/datascatters.R"))
source(paste0(h, "code/samplematching_wageaggregation.R"))
source(paste0(h, "code/prepmatchesforMRBRT.R"))
source(paste0(j, "FILEPATH/mr_brt_functions.R"))
source(paste0(h, "code/applycrosswalks.R"))
source(paste0(h, "code/outlierbyMAD.R"))
## Get metadata
loc_dt <- get_location_metadata(location_set_id = 22)
all_fine_ages <- as.data.table(get_age_metadata(age_group_set_id=12))
not_babies <- all_fine_ages[!age_group_id %in% c(2:4)]
not_babies[, age_end := age_group_years_end-1]
all_fine_babies <- as.data.table(get_age_metadata(age_group_set_id=18))
group_babies <- all_fine_babies[age_group_id %in% c(28)]
age_dt <- rbind(not_babies, group_babies, fill=TRUE)
age_dt[, age_start := age_group_years_start]
age_dt[age_group_id==28, age_end := 0.999]
## Get raw data and create covariates for marking different groups of clinical informatics data, subset to the measure being crosswalked
#Use argument bundle_version = 0 if you want to create a new bundle version
compgd_dt <- get_raw_data(3200, 'step2', bundle_version = 0)
#Bundle version ID: 7943
## Store bundle columns for later
bundle_columns <- names(compgd_dt)
## List of cvs that are useful tags for manipulating data, but not bias variables to crosswalk
cv_manip <- c("cv_marketscan_data")
## List of cvs that positively identify reference data
cv_ref <- c("cv_admin")
## Combined list of cvs to drop in match-finding (but keep in raw data)
cv_drop <- c(cv_manip, cv_ref)
## List of cvs that positively identify components of alternative case defs
cv_alts <- c("cv_marketscan_2000", "cv_marketscan_other")
## Fill out cases, sample size, standard error using Epi Uploader formulae; standardize the naming of case definitions; add IDs for higher-order geography
get_cases_sample_size(compgd_dt)
get_se(compgd_dt)
## Add variables for use in data-prep that will later need to be dropped to pass Uploader validations
compgd_dt <- get_definitions(compgd_dt)
compgd_dt <- merge(compgd_dt, loc_dt[ , c("location_id", "super_region_id", "region_id")], by = "location_id")
## Prep raw data for match-finding, crosswalking and transformation
compgd_findmatch <- copy(compgd_dt)
compgd_findmatch <- subnat_to_nat(compgd_findmatch)
compgd_findmatch <- calc_year(compgd_findmatch)
scatter_bydef(compgd_findmatch, upper = 0.025)
## Find matches
age_dts <- get_age_combos(compgd_findmatch)
compgd_findmatch <- age_dts[[1]]
age_match_dt <- age_dts[[2]]
pairs <- combn(compgd_findmatch[, unique(definition)], 2)
#combn function generates all combinations of the elements of vector x (in this case, a vector of the unique values found in the definition column of the data.table compgd_findmatch) taken m (in this case, 2) at a time
matches <- rbindlist(lapply(1:dim(pairs)[2], function(x) get_matches(n = x, pair_dt = compgd_findmatch)))
matches[ , nid_comb:=paste0(nid, nid2)]
## Write output of match-finding and ratio calculation for human review
write.xlsx(matches, paste0(out_path, "/3200_matches_", date, ".xlsx"), col.names=TRUE)
## Prep and perform meta-regression
#***comment out reading in the data table if you are running full pipeline uninterrupted, bring back in if running MR-BRT on previously prepped data
#ratios <- as.data.table(read_excel(paste0(out_path, "/3200_matches_", date, ".xlsx")))
## Calculate ratios and their standard errors, drop unneeded variables, calculate dummies, visualize ratios, take their logs and calc standard error of their logs
#Log-ratios are no longer the preferred input variable for MR-BRT modeling, but useful visualization
ratios <- calc_ratio(matches)
#This line is for dropping matches with infinite or zero values for the ratios
ratios <- drop_stuff(ratios)
ratios <- add_compdumminter(ratios)
histo_ratios(ratios, bins = 0.2)
lratios <- logtrans_ratios(ratios)
## Calculate logits, logit differences, and standard errors to go into meta-regression
logit_differences <- calc_logitdf(lratios)
## Take list of cvs for components of alternative case definitions, drop the alts that don't have enough direct or indirect matches to reference, and make into a list MR-BRT will understand
# cv_nomatch <- c(***)
# cv_alts <- setdiff(cv_alts, cv_nomatch)
for(c in cv_alts){
cov <- cov_info(c, "X", "network")
if(c == cv_alts[1]){
cov_list <- list(cov)
} else {
cov_list <- c(cov_list, list(cov))
}
}
## Ran various MR-BRT models to decide on best fit
#Final choice of model to carry forward
compgd_fit <- run_mr_brt(
output_dir = paste0(j, "FILEPATH"),
model_label = paste0("compgd_xwmodel_", date),
data = logit_differences,
mean_var = "diff_logit",
se_var = "se_diff_logit",
covs = cov_list,
remove_x_intercept = TRUE,
method = "remL",
overwrite_previous = TRUE,
study_id = "nid_comb"
)
## Get predicted coefficients with all sources of uncertainty, predict for training data and then for all data needing crosswalks (these sets of predictions will be the same if there are no continuous covariates or multi-dimensional case-definitions)
# Select data that need and can be crosswalked and mark parent seq and seq
compgd_transform <- compgd_dt[definition!="reference", ]
if ("crosswalk_parent_seq" %in% names(compgd_transform)) {
compgd_transform[is.na(crosswalk_parent_seq), `:=` (crosswalk_parent_seq = seq, seq = NA)]
compgd_transform[!is.na(crosswalk_parent_seq), seq = NA]
} else {
compgd_transform[, `:=` (crosswalk_parent_seq = seq, seq = NA)]
}
# Predict crosswalk for training data
compgd_trainingprediction <- unique(predict_xw(compgd_fit, "logit_dif"), by = cv_alts)
# Predict crosswalk for raw data
compgd_predictnew <- unique(predict_xw(compgd_fit, "logit_dif", compgd_transform), by = cv_alts)
# Transform data that need and can be crosswalked
compgd_transform <- transform_altdt(compgd_transform, compgd_predictnew, "logit_dif")
# Bind reference data and crosswalked data; make scatter-plot
compgd_dt2 <- rbind(compgd_transform, compgd_dt[definition=="reference", ], fill=TRUE)
scatter_bydef(compgd_dt2, raw = FALSE, upper = 0.025)
# Here we would subset to rows that were group-reviewed out or that could not be crosswalked, but none exist for this bundle, so just making a copy for consistent naming in steps below
compgd_modeling <- copy(compgd_dt2)
## Prep and upload transformed data as a crosswalk version for this bundle
#Choose how many MAD above and below median to outlier, defaults to 2 if no numb_mad argument supplied, if numb_mad=0 will remove series with age-standardized mean of 0, but nothing else
compgd_outliered <- auto_outlier(compgd_modeling)
scatter_markout(compgd_outliered, upper = 0.025)
columns_keep <- unique(c(bundle_columns, "crosswalk_parent_seq"))
columns_drop <- c("cv_admin", "cv_marketscan_2000", "cv_marketscan_other")
columns_keep <- setdiff(columns_keep, columns_drop)
compgd_final <- compgd_outliered[, ..columns_keep]
#Write the data to an Excel file in order to upload it
upload_path <- paste0(j, "FILEPATH/3200_", date, ".xlsx")
write.xlsx(compgd_final, upload_path, col.names=TRUE, sheetName = "extraction")
#Then add a description and upload
description <- "Same approach as XWV 866, but re-pulled clinical data to get corrected SGP claims and fixed my own automated outlier fxn"
compgd_upload_xw <- save_crosswalk_version(bundle_version_id=7943, data_filepath=upload_path, description = description)
# Crosswalk version ID 3089
|
#uri-r, count
#tailData <- as.matrix(read.csv("../deferredTLDtail.csv", sep=",", header=TRUE))
#eventNew <- as.matrix(read.csv("../newURIsByEvent.txt.bkp", sep=",", header=TRUE))
#eventNew <- as.matrix(read.csv("../newURIsByEventSort.txt.bkp", sep=",", header=TRUE))
#mimeSize <- as.matrix(read.csv("../massagedMimeSizes.csv", sep=",", header=TRUE))
#eventNew[,1]<-log10(as.integer(eventNew[,1]))
#imgs, js, text, json, css, font, html, other
unarchiveMimes <- c(12200, 5760, 526, 985, 220, 29, 5241, 706)
counts<-as.numeric(tailData[,2])
uris<-(tailData[,1])
counts[is.na(counts)]<-0
uris<-uris[counts>200]
counts<-counts[counts>200]
counts<-counts[1:300]/5.5
print("longTail.png")
png(filename="longTail.png", type="cairo", height=300,
width=500, bg="white")
par(xpd=FALSE, mar=c(5.1, 5.1, 5.1, 5.1))
barplot((counts[1:300]), #main="URI-R Counts",
xlab="Top 300 Occurring Embedded Resources", ylab="Occurrences", ylim=c(0, 2000))
axis(1, at=c(0, 115, 240, 360),labels=c(0, 100, 200, 300), las=2)
dev.off()
##Run Number, URI, Number of States, Depth, Breadth, New URIs, <Events>
#defData <- as.matrix(read.csv("../deferredRunStats.txt", sep=",", header=TRUE))
#nondefData <- as.matrix(read.csv("../nondeferredRunStats.txt", sep=",", header=TRUE))
#depthData<-rbind(cbind(3640, 3640, 3640), cbind(8188, 41080, 63491))
depthData<-rbind(cbind(4250, 4250, 4250), cbind(7692, 56957, 66320))
print("growth1.png")
png(filename="growth1.png", type="cairo", height=300,
width=500, bg="white")
par(xpd=FALSE, mar=c(5.1, 5.1, 5.1, 5.1))
plot(0:(length(depthData[1,])-1), depthData[1, ], #main="URI-R Counts",
xlab="Depth", ylab="Total Frontier Size", type="b", pch=2, ylim=c(0, 70000))
points(0:(length(depthData[1,])-1), depthData[2, ], type="b", pch=4)
legend("topleft", inset=.05, #title="URI Set",
c("Deferred", "Nondeferred"), pch=c(4, 2), horiz=FALSE)
#legend('topleft', c("Deferred", "Nondeferred"),
# pch=c(4, 2), cex=1)
dev.off()
##total uris, total archived, new uris, new archived
####need to update the nondeferred "total archived"
#arcData<-rbind(cbind(3640, ??, 0, 0), cbind(63491, 55799, 53706, 49265))
arcData<-cbind(rbind(3640, 3000, 0, 0), rbind(63491, 55799, 53706, 49265))
print("growth2.png")
png(filename="growth2.png", type="cairo", height=500,
width=500, bg="white")
par(xpd=FALSE, mar=c(5.1, 5.1, 5.1, 5.1))
barplot(arcData,
xlab="Depth", ylab="URI-Rs", col=c("#d01c8b","#f1b6da", "#4dac26", "#b8e186"))
legend('topleft', c("Total URIs", "Total archived", "New URIs", "New Archived"),
col=c("#d01c8b","#f1b6da", "#4dac26", "#b8e186"), cex=1)
dev.off()
##state 0, state 0 archived, state 1, state 1 archived, state 2, state 2 archived
####need to update the nondeferred "total archived"
arcData<-cbind(rbind(3640, 1469, 0, 0, 0, 0), rbind(63491, 55799, 53706, 49265, 10208, 9363))
arcData2<-cbind(
#rbind(3640, 1469, 63491, 55799),
#rbind(0, 0, 53706, 49265),
#rbind(0, 0, 10208, 9363)
#rbind(2171, 1469, 7692, 55799),
#rbind(0, 0, 4441, 49265),
#rbind(0, 0, 845, 9363)
rbind(2171, 1469, 760, 6223),
rbind(0, 0, 4441, 49265),
rbind(0, 0, 845, 9363)
)
print("growth2.png")
png(filename="growth2.png", type="cairo", height=500,
width=500, bg="white")
par(xpd=FALSE, mar=c(7.1, 5.1, 5.1, 5.1))
barplot(arcData,
ylab="URI-Rs", ylim=c(0, 250000), col=c("#c51b7d",
"#e9a3c9",
"#fde0ef",
"#e6f5d0",
"#a1d76a",
"#4d9221"))
legend('topleft', c("Archived State 0", "Unarchived in State 0", "Archived State 1", "Unarchived in State 1", "Archived State 2", "Unarchived in State 2"),
col=c("#c51b7d",
"#e9a3c9",
"#fde0ef",
"#e6f5d0",
"#a1d76a",
"#4d9221"),
pch=16, cex=1)
axis(1, at=c(0.7, 1.9),labels=c("Nondeferred", "Deferred"), las=2)
dev.off()
frontierSize<-cbind(
rbind((0+0),(845+9363)),
rbind((0+0),(4441+49265)),
rbind((2171+1469),(760+6223))
)
print("growth2a_split1.png")
png(filename="growth2a_split1.png", type="cairo", height=250,
width=500, bg="white")
par(xpd=TRUE, mar=c(5.1, 5.1, 5.1, 5.1))
barplot(frontierSize,
xlab="Frontier Size (URI-Rs)", xlim=c(0, 70000), col=c("#c51b7d",
"#e9a3c9",
"#a1d76a",
"#4d9221"), horiz=TRUE)
legend('topright', c("Nondeferred", "Deferred"),
col=c("#c51b7d",
"#e9a3c9"),
pch=16, cex=1)
axis(2, at=c(0.7, 1.9, 3.1), labels=c("State 2", "State 1", "State 0"), las=2)
dev.off()
unarchivedSize<-cbind(
rbind((0+0),(9363)),
rbind((0+0),(49265)),
rbind((1469),(6223))
)
print("growth2a_split2.png")
png(filename="growth2a_split2.png", type="cairo", height=250,
width=500, bg="white")
par(xpd=TRUE, mar=c(5.1, 5.1, 5.1, 5.1))
barplot(unarchivedSize,
xlab="Number URI-Rs unarchived", xlim=c(0, 70000), col=c("#c51b7d",
"#e9a3c9",
"#a1d76a",
"#4d9221"), horiz=TRUE)
legend('topright', c("Nondeferred", "Deferred"),
col=c("#c51b7d",
"#e9a3c9"),
pch=16, cex=1)
axis(2, at=c(0.7, 1.9, 3.1), labels=c("State 2", "State 1", "State 0"), las=2)
dev.off()
print("growth2a.png")
png(filename="growth2a.png", type="cairo", height=500,
width=500, bg="white")
par(xpd=FALSE, mar=c(7.1, 5.1, 5.1, 5.1))
barplot(arcData2,
ylab="URI-Rs", ylim=c(0, 70000), col=c("#c51b7d",
"#e9a3c9",
"#a1d76a",
"#4d9221"))
legend('topleft', c("Nondeferred Archived", "Nondeferred unarchived","Deferred Archived", "Deferred unarchived"),
col=c("#c51b7d",
"#e9a3c9",
"#a1d76a",
"#4d9221"),
pch=16, cex=1)
axis(1, at=c(0.7, 1.9, 3.1), labels=c("State 0", "State 1", "State 2"), las=2)
dev.off()
newDepthData<-c(0.12, 0.91, 0.96)
newDepthData2<-c(7692, 49265, 9363)
print("archivedByDepthPct.png")
png(filename="archivedByDepthPct.png", type="cairo", height=500,
width=500, bg="white")
par(xpd=FALSE, mar=c(7.1, 5.1, 5.1, 5.1))
barplot(newDepthData,
ylab="Pct New URI-Rs Unarchived", ylim=c(0, 1))
axis(1, at=c(0.7, 1.9, 3.1), labels=c("State 0", "State 1", "State 2"), las=2)
dev.off()
print("archivedByDepthNum.png")
png(filename="archivedByDepthNum.png", type="cairo", height=500,
width=500, bg="white")
par(xpd=FALSE, mar=c(7.1, 5.1, 5.1, 5.1))
#barplot(log(newDepthData2),
# ylab="Number New URI-Rs Unarchived (log10)", ylim=c(0, 15))
barplot((newDepthData2),
ylab="Number New URI-Rs Unarchived", ylim=c(0, 50000))
axis(1, at=c(0.7, 1.9, 3.1), labels=c("State 0", "State 1", "State 2"), las=2)
dev.off()
print("archivedByDepthPctHorz.png")
png(filename="archivedByDepthPctHorz.png", type="cairo", height=300,
width=500, bg="white")
par(xpd=FALSE, mar=c(7.1, 5.1, 5.1, 5.1))
barplot(rev(newDepthData*100),
xlab="Percent New URI-Rs Unarchived", xlim=c(0, 100), horiz=TRUE)
axis(2, at=c(0.7, 1.9, 3.1), labels=rev(c("State 0", "State 1", "State 2")), las=2)
dev.off()
print("archivedByDepthNumHorz.png")
png(filename="archivedByDepthNumHorz.png", type="cairo", height=300,
width=500, bg="white")
par(xpd=FALSE, mar=c(7.1, 5.1, 5.1, 5.1))
#barplot(log(newDepthData2),
# ylab="Number New URI-Rs Unarchived (log10)", ylim=c(0, 15))
barplot((newDepthData2),
xlab="Number New URI-Rs Unarchived", xlim=c(0, 50000), horiz=TRUE)
axis(2, at=c(0.7, 1.9, 3.1), labels=c("State 0", "State 1", "State 2"), las=2)
dev.off()
#imgs, js, text, json, css, font, html, other
#unarchiveMimes
print("unarchivedByMime.png")
png(filename="unarchivedByMime.png", type="cairo", height=300,
width=500, bg="white")
par(xpd=FALSE, mgp=c(3, 1, 0), mar=c(5.1, 8.1, 5.1, 5.1))
#font, css, text, other, json, html, js, img
unarchiveMimes2<-rev(c(unarchiveMimes[6], unarchiveMimes[5],
unarchiveMimes[3], unarchiveMimes[8],
unarchiveMimes[4], unarchiveMimes[7],
unarchiveMimes[2], unarchiveMimes[1]
))
barplot(unarchiveMimes2,
ylab="", xlab="Number Unarchived", xlim=c(0, max(unarchiveMimes)), horiz=TRUE)
axis(2, at=c(0.7, 1.9, 3.1, 4.3, 5.5, 6.7, 7.9, 9.1), labels=c("Images", "JavaScript", "HTML", "JSON", "CSS", "Font", "Text", "Other"), las=2)
dev.off()
print("newByEvent.png")
#exit()
png(filename="newByEvent.png", type="cairo", height=500,
width=500, bg="white")
par(xpd=FALSE, mar=c(7.1, 7.1, 5.1, 5.1))
plot(9999999, 9999999,
ylab="Number of new URI-Rs (log10)",
#xlab="Event",
ylim=c(0, 5),
xaxt='n',
yaxt='n',
xlab="",
xlim=c(0,11))
#plot(9999999, 9999999,
# xlab="Number of new URI-Rs (log10)",
#xlab="Event",
# xlim=c(0, 5),
# yaxt='n',
# xaxt='n',
# ylab="",
# ylim=c(0,11))
x<-1
i<-1
sum<-0
sums<-c()
while(i < length(eventNew[,1]))
{
if(i > 1)
{
if(eventNew[i,2] == eventNew[i-1, 2])
{
}
else
{
sums<-c(sums, sum)
sum<-0
x<-x+1
}
}
points(x, eventNew[i,1], pch=1)
sum<-sum+as.integer(eventNew[i,1])
i<-i+1
}
#lines(1:length(sums), sums);
axis(1, at=c(1:11), labels=c("change", "keydown", "keypress", "submit", "unload", "blur", "mousedown", "mouseout", "other", "mouseover", "click"), las=2)
axis(2, at=c(1:5), labels=c("1", "10", "100", "1,000", "10,000"), las=2)
#axis(2, at=c(1:11), labels=c("blur", "change", "click", "keydown", "keypress", "mousedown", "mouseout", "mouseover", "submit", "unload", "other"), las=2)
#axis(1, at=c(1:5), labels=c("1", "10", "100", "1,000", "10,000"), las=2)
dev.off()
print("newByEventHorz.png")
#exit()
png(filename="newByEventHorz.png", type="cairo", height=500,
width=500, bg="white")
par(xpd=FALSE, mar=c(5.1, 7.1, 5.1, 5.1))
#plot(9999999, 9999999,
# ylab="Number of new URI-Rs (log10)",
#xlab="Event",
# ylim=c(0, 5),
# xaxt='n',
# yaxt='n',
# xlab="",
# xlim=c(0,11))
plot(9999999, 9999999,
xlab="Number of new URI-Rs (log10)",
#xlab="Event",
xlim=c(0, 5),
yaxt='n',
xaxt='n',
ylab="",
ylim=c(0,11))
eventNew[,1]<-rev(eventNew[,1])
eventNew[,2]<-rev(eventNew[,2])
x<-1
i<-1
sum<-0
sums<-c()
while(i < length(eventNew[,1]))
{
if(i > 1)
{
if(eventNew[i,2] == eventNew[i-1, 2])
{
}
else
{
sums<-c(sums, sum)
sum<-0
x<-x+1
}
}
#points(x, eventNew[i,1], pch=x)
points(eventNew[i,1], x, pch=1)
sum<-sum+as.integer(eventNew[i,1])
i<-i+1
}
#lines(1:length(sums), sums);
#axis(1, at=c(1:11), labels=c("blur", "change", "click", "keydown", "keypress", "mousedown", "mouseout", "mouseover", "submit", "unload", "other"), las=2)
#axis(2, at=c(1:5), labels=c("1", "10", "100", "1,000", "10,000"), las=2)
axis(2, at=c(1:11), labels=rev(c("change", "keydown", "keypress", "submit", "unload", "blur", "mousedown", "mouseout", "other", "mouseover", "click")), las=2)
axis(1, at=c(1:5), labels=c("1", "10", "100", "1,000", "10,000"), las=2)
dev.off()
##Run Number, URI, Number of States, Depth, Breadth, New URIs, <Events>
#defData <- as.matrix(read.csv("../deferredRunStats.txt", sep=",", header=TRUE))
#nondefData <- as.matrix(read.csv("../nondeferredRunStats.txt", sep=",", header=TRUE))
roiTime<-rbind(1035, 8452, 27990, 40258)
roiSize<-rbind(4250, 11942, 56957, 66320)
print("ROItime.png")
png(filename="ROItime.png", type="cairo", height=300,
width=500, bg="white")
par(xpd=FALSE, mar=c(7.1, 5.1, 5.1, 5.1))
plot(c(0:3), roiTime, main="440 URI-R Crawl",
xlab="Crawl Depth", ylab="Crawl time (s)", type="b", pch=2, xaxt='n', ylim=c(0, 70000))
#abline(a=roiTime[1], 0)
axis(1, at=c(0:3), labels=c("Heritrix-only", "State 0", "State 1", "State 2"), las=2)
dev.off()
print("ROIsize.png")
png(filename="ROIsize.png", type="cairo", height=300,
width=500, bg="white")
par(xpd=FALSE, mar=c(7.1, 5.1, 5.1, 5.1))
plot(c(0:3), roiSize, main="440 URI-R Crawl",
xlab="Crawl Depth", ylab="Frontier Size (URI-Rs)", type="b", pch=2, xaxt='n', ylim=c(0, 70000))
axis(1, at=c(0:3), labels=c("Heritrix-only", "State 0", "State 1", "State 2"), las=2)
dev.off()
print("ROIint.png")
png(filename="ROIint.png", type="cairo", height=300,
width=500, bg="white")
par(xpd=FALSE, mar=c(7.1, 5.1, 5.1, 5.1))
plot(c(0:3), roiSize, main="440 URI-R Crawl",
xlab="Crawl Depth", ylab="Frontier Size (URI-Rs)", type="b", pch=2, xaxt='n', ylim=c(0, 70000))
lines(c(0:3), roiTime, type="b", pch=3)
axis(1, at=c(0:3), labels=c("Heritrix-only", "State 0", "State 1", "State 2"), las=2)
legend('topleft', c("Frontier Size (URI-Rs)", "Crawl Time (s)"),
pch=c(2,3), cex=1)
dev.off()
print("ROIboth.png")
png(filename="ROIboth.png", type="cairo", height=500,
width=500, bg="white")
par(xpd=FALSE, mar=c(7.1, 5.1, 5.1, 5.1))
plot(roiTime, roiSize, main="440 URI-R Crawl",
xlab="Crawl Time (s)", ylab="Frontier Size (URI-Rs)", type="b", pch=2)
dev.off()
mimeSizesSort<-rev(sort(as.integer(mimeSize[,2])))
#mimeSizesSort<-log10(rev(sort(as.integer(mimeSize[,2]))))
print("mimeSizeHisto.png")
png(filename="mimeSizeHisto.png", type="cairo", height=250,
width=500, bg="white")
par(xpd=FALSE, mar=c(5.1, 5.1, 5.1, 5.1))
barplot((mimeSizesSort[1:100]), #main="URI-R Counts",
yaxt='n',
xlab="Unarchived Images",
#ylab="Size",
ylim=c(0, 5000000))
axis(2, at=c(0, 2500000, 5000000), labels=c("0MB", "2.5MB", "5MB"), las=2)
dev.off()
massaged <- as.numeric(mimeSizesSort)
massaged[is.na(massaged)]<-0
massaged<-rev(sort(massaged))
i<-1
#while(i<(length(massaged)-1))
while(i<12000)
{
if(i<(length(massaged)-1))
{
massaged[i+1]=massaged[i+1]+massaged[i]
}
else
{
massaged<-c(massaged, 34684569)
}
i<-i+1
}
massaged[9999]<-34684569
massaged<-massaged/max(massaged)
print("cdfMimeSize.png")
png(filename="cdfMimeSize.png", type="cairo", height=400,
width=500, bg="white")
par(xpd=FALSE, mar=c(5.1, 5.1, 5.1, 5.1))
plot(
#ecdf(rights),
1:(length(massaged)-1), massaged[1:(length(massaged)-1)],
main="", type="b",
xlim=c(0,12000),
col="red", xlab="Images", ylab="Proportion of bytes of images",
#xlim=c(),
#ylim=c(),
pch=19)
dev.off()
massagedNew<- as.numeric(mimeSizesSort)
massagedNew<-rev(massagedNew)
last<-0
i<-1
massagedX<-c(0)
massagedY<-c(0)
#while(i<(length(massaged)-1))
while(i<12000)
{
if(i > length(massagedNew))
{
massagedY[1]<-massagedY[1]+1
print(i)
}
else if(massagedNew[i] > last)
{
p<-sprintf("%i: %i", i, massagedNew[i])
print(p)
last<-massagedNew[i]
massagedY<-c(massagedY, (max(massagedY)+1))
massagedX<-c(massagedX, last)
}
else
{
massagedY[length(massagedY)]<-massagedY[length(massagedY)]+1
}
i<-i+1
}
massagedY<-massagedY/max(massagedY)
massagedX<-log10(massagedX*1024)
print("cdfMimeSize2.png")
png(filename="cdfMimeSize2.png", type="cairo", height=400,
width=500, bg="white")
par(xpd=FALSE, mar=c(5.1, 5.1, 5.1, 5.1))
plot(
#ecdf(rights),
massagedX, massagedY[1:(length(massagedY))],
main="", type="l", lwd=4,
xlim=c(2.5,10),
col="red", xlab="Observed Image Sizes",
xaxt='n',
ylab="Proportion of Images",
#xlim=c(),
#ylim=c(),
pch=19)
axis(1, at=c(2.7, 4.2, 5.6, 7, 8.6, 9.8), labels=c("500B", "1KB", "500KB", "1MB", "2.5MB", "5MB"), las=1)
dev.off()
uriContribs <- as.matrix(read.csv("../newThingsByURI.txt", sep=",", header=TRUE))
massaged <- as.numeric(uriContribs[,2])
massaged[is.na(massaged)]<-0
massaged<-rev(sort(massaged))
i<-1
while(i<(length(massaged)-1))
{
massaged[i+1]=massaged[i+1]+massaged[i]
i<-i+1
}
massaged<-massaged/max(massaged)
print("cdfURIs.png")
png(filename="cdfURIs.png", type="cairo", height=400,
width=500, bg="white")
par(xpd=FALSE, mar=c(5.1, 5.1, 5.1, 5.1))
plot(
#ecdf(rights),
1:(length(massaged)-1), massaged[1:(length(massaged)-1)],
main="", type="l", lwd=4,
col="red", xlab="Deferred URI-Rs", ylab="Proportion Contributed New Crawl Frontier",
#xlim=c(),
#ylim=c(),
pch=19)
dev.off()
|
/clientSideState_TechReport/imgs/processStats (1).R
|
no_license
|
jbrunelle/papers
|
R
| false
| false
| 16,207
|
r
|
#uri-r, count
#tailData <- as.matrix(read.csv("../deferredTLDtail.csv", sep=",", header=TRUE))
#eventNew <- as.matrix(read.csv("../newURIsByEvent.txt.bkp", sep=",", header=TRUE))
#eventNew <- as.matrix(read.csv("../newURIsByEventSort.txt.bkp", sep=",", header=TRUE))
#mimeSize <- as.matrix(read.csv("../massagedMimeSizes.csv", sep=",", header=TRUE))
#eventNew[,1]<-log10(as.integer(eventNew[,1]))
#imgs, js, text, json, css, font, html, other
unarchiveMimes <- c(12200, 5760, 526, 985, 220, 29, 5241, 706)
counts<-as.numeric(tailData[,2])
uris<-(tailData[,1])
counts[is.na(counts)]<-0
uris<-uris[counts>200]
counts<-counts[counts>200]
counts<-counts[1:300]/5.5
print("longTail.png")
png(filename="longTail.png", type="cairo", height=300,
width=500, bg="white")
par(xpd=FALSE, mar=c(5.1, 5.1, 5.1, 5.1))
barplot((counts[1:300]), #main="URI-R Counts",
xlab="Top 300 Occurring Embedded Resources", ylab="Occurrences", ylim=c(0, 2000))
axis(1, at=c(0, 115, 240, 360),labels=c(0, 100, 200, 300), las=2)
dev.off()
##Run Number, URI, Number of States, Depth, Breadth, New URIs, <Events>
#defData <- as.matrix(read.csv("../deferredRunStats.txt", sep=",", header=TRUE))
#nondefData <- as.matrix(read.csv("../nondeferredRunStats.txt", sep=",", header=TRUE))
#depthData<-rbind(cbind(3640, 3640, 3640), cbind(8188, 41080, 63491))
depthData<-rbind(cbind(4250, 4250, 4250), cbind(7692, 56957, 66320))
print("growth1.png")
png(filename="growth1.png", type="cairo", height=300,
width=500, bg="white")
par(xpd=FALSE, mar=c(5.1, 5.1, 5.1, 5.1))
plot(0:(length(depthData[1,])-1), depthData[1, ], #main="URI-R Counts",
xlab="Depth", ylab="Total Frontier Size", type="b", pch=2, ylim=c(0, 70000))
points(0:(length(depthData[1,])-1), depthData[2, ], type="b", pch=4)
legend("topleft", inset=.05, #title="URI Set",
c("Deferred", "Nondeferred"), pch=c(4, 2), horiz=FALSE)
#legend('topleft', c("Deferred", "Nondeferred"),
# pch=c(4, 2), cex=1)
dev.off()
##total uris, total archived, new uris, new archived
####need to update the nondeferred "total archived"
#arcData<-rbind(cbind(3640, ??, 0, 0), cbind(63491, 55799, 53706, 49265))
arcData<-cbind(rbind(3640, 3000, 0, 0), rbind(63491, 55799, 53706, 49265))
print("growth2.png")
png(filename="growth2.png", type="cairo", height=500,
width=500, bg="white")
par(xpd=FALSE, mar=c(5.1, 5.1, 5.1, 5.1))
barplot(arcData,
xlab="Depth", ylab="URI-Rs", col=c("#d01c8b","#f1b6da", "#4dac26", "#b8e186"))
legend('topleft', c("Total URIs", "Total archived", "New URIs", "New Archived"),
col=c("#d01c8b","#f1b6da", "#4dac26", "#b8e186"), cex=1)
dev.off()
##state 0, state 0 archived, state 1, state 1 archived, state 2, state 2 archived
####need to update the nondeferred "total archived"
arcData<-cbind(rbind(3640, 1469, 0, 0, 0, 0), rbind(63491, 55799, 53706, 49265, 10208, 9363))
arcData2<-cbind(
#rbind(3640, 1469, 63491, 55799),
#rbind(0, 0, 53706, 49265),
#rbind(0, 0, 10208, 9363)
#rbind(2171, 1469, 7692, 55799),
#rbind(0, 0, 4441, 49265),
#rbind(0, 0, 845, 9363)
rbind(2171, 1469, 760, 6223),
rbind(0, 0, 4441, 49265),
rbind(0, 0, 845, 9363)
)
print("growth2.png")
png(filename="growth2.png", type="cairo", height=500,
width=500, bg="white")
par(xpd=FALSE, mar=c(7.1, 5.1, 5.1, 5.1))
barplot(arcData,
ylab="URI-Rs", ylim=c(0, 250000), col=c("#c51b7d",
"#e9a3c9",
"#fde0ef",
"#e6f5d0",
"#a1d76a",
"#4d9221"))
legend('topleft', c("Archived State 0", "Unarchived in State 0", "Archived State 1", "Unarchived in State 1", "Archived State 2", "Unarchived in State 2"),
col=c("#c51b7d",
"#e9a3c9",
"#fde0ef",
"#e6f5d0",
"#a1d76a",
"#4d9221"),
pch=16, cex=1)
axis(1, at=c(0.7, 1.9),labels=c("Nondeferred", "Deferred"), las=2)
dev.off()
frontierSize<-cbind(
rbind((0+0),(845+9363)),
rbind((0+0),(4441+49265)),
rbind((2171+1469),(760+6223))
)
print("growth2a_split1.png")
png(filename="growth2a_split1.png", type="cairo", height=250,
width=500, bg="white")
par(xpd=TRUE, mar=c(5.1, 5.1, 5.1, 5.1))
barplot(frontierSize,
xlab="Frontier Size (URI-Rs)", xlim=c(0, 70000), col=c("#c51b7d",
"#e9a3c9",
"#a1d76a",
"#4d9221"), horiz=TRUE)
legend('topright', c("Nondeferred", "Deferred"),
col=c("#c51b7d",
"#e9a3c9"),
pch=16, cex=1)
axis(2, at=c(0.7, 1.9, 3.1), labels=c("State 2", "State 1", "State 0"), las=2)
dev.off()
unarchivedSize<-cbind(
rbind((0+0),(9363)),
rbind((0+0),(49265)),
rbind((1469),(6223))
)
print("growth2a_split2.png")
png(filename="growth2a_split2.png", type="cairo", height=250,
width=500, bg="white")
par(xpd=TRUE, mar=c(5.1, 5.1, 5.1, 5.1))
barplot(unarchivedSize,
xlab="Number URI-Rs unarchived", xlim=c(0, 70000), col=c("#c51b7d",
"#e9a3c9",
"#a1d76a",
"#4d9221"), horiz=TRUE)
legend('topright', c("Nondeferred", "Deferred"),
col=c("#c51b7d",
"#e9a3c9"),
pch=16, cex=1)
axis(2, at=c(0.7, 1.9, 3.1), labels=c("State 2", "State 1", "State 0"), las=2)
dev.off()
print("growth2a.png")
png(filename="growth2a.png", type="cairo", height=500,
width=500, bg="white")
par(xpd=FALSE, mar=c(7.1, 5.1, 5.1, 5.1))
barplot(arcData2,
ylab="URI-Rs", ylim=c(0, 70000), col=c("#c51b7d",
"#e9a3c9",
"#a1d76a",
"#4d9221"))
legend('topleft', c("Nondeferred Archived", "Nondeferred unarchived","Deferred Archived", "Deferred unarchived"),
col=c("#c51b7d",
"#e9a3c9",
"#a1d76a",
"#4d9221"),
pch=16, cex=1)
axis(1, at=c(0.7, 1.9, 3.1), labels=c("State 0", "State 1", "State 2"), las=2)
dev.off()
newDepthData<-c(0.12, 0.91, 0.96)
newDepthData2<-c(7692, 49265, 9363)
print("archivedByDepthPct.png")
png(filename="archivedByDepthPct.png", type="cairo", height=500,
width=500, bg="white")
par(xpd=FALSE, mar=c(7.1, 5.1, 5.1, 5.1))
barplot(newDepthData,
ylab="Pct New URI-Rs Unarchived", ylim=c(0, 1))
axis(1, at=c(0.7, 1.9, 3.1), labels=c("State 0", "State 1", "State 2"), las=2)
dev.off()
print("archivedByDepthNum.png")
png(filename="archivedByDepthNum.png", type="cairo", height=500,
width=500, bg="white")
par(xpd=FALSE, mar=c(7.1, 5.1, 5.1, 5.1))
#barplot(log(newDepthData2),
# ylab="Number New URI-Rs Unarchived (log10)", ylim=c(0, 15))
barplot((newDepthData2),
ylab="Number New URI-Rs Unarchived", ylim=c(0, 50000))
axis(1, at=c(0.7, 1.9, 3.1), labels=c("State 0", "State 1", "State 2"), las=2)
dev.off()
print("archivedByDepthPctHorz.png")
png(filename="archivedByDepthPctHorz.png", type="cairo", height=300,
width=500, bg="white")
par(xpd=FALSE, mar=c(7.1, 5.1, 5.1, 5.1))
barplot(rev(newDepthData*100),
xlab="Percent New URI-Rs Unarchived", xlim=c(0, 100), horiz=TRUE)
axis(2, at=c(0.7, 1.9, 3.1), labels=rev(c("State 0", "State 1", "State 2")), las=2)
dev.off()
print("archivedByDepthNumHorz.png")
png(filename="archivedByDepthNumHorz.png", type="cairo", height=300,
width=500, bg="white")
par(xpd=FALSE, mar=c(7.1, 5.1, 5.1, 5.1))
#barplot(log(newDepthData2),
# ylab="Number New URI-Rs Unarchived (log10)", ylim=c(0, 15))
barplot((newDepthData2),
xlab="Number New URI-Rs Unarchived", xlim=c(0, 50000), horiz=TRUE)
axis(2, at=c(0.7, 1.9, 3.1), labels=c("State 0", "State 1", "State 2"), las=2)
dev.off()
#imgs, js, text, json, css, font, html, other
#unarchiveMimes
print("unarchivedByMime.png")
png(filename="unarchivedByMime.png", type="cairo", height=300,
width=500, bg="white")
par(xpd=FALSE, mgp=c(3, 1, 0), mar=c(5.1, 8.1, 5.1, 5.1))
#font, css, text, other, json, html, js, img
unarchiveMimes2<-rev(c(unarchiveMimes[6], unarchiveMimes[5],
unarchiveMimes[3], unarchiveMimes[8],
unarchiveMimes[4], unarchiveMimes[7],
unarchiveMimes[2], unarchiveMimes[1]
))
barplot(unarchiveMimes2,
ylab="", xlab="Number Unarchived", xlim=c(0, max(unarchiveMimes)), horiz=TRUE)
axis(2, at=c(0.7, 1.9, 3.1, 4.3, 5.5, 6.7, 7.9, 9.1), labels=c("Images", "JavaScript", "HTML", "JSON", "CSS", "Font", "Text", "Other"), las=2)
dev.off()
print("newByEvent.png")
#exit()
png(filename="newByEvent.png", type="cairo", height=500,
width=500, bg="white")
par(xpd=FALSE, mar=c(7.1, 7.1, 5.1, 5.1))
plot(9999999, 9999999,
ylab="Number of new URI-Rs (log10)",
#xlab="Event",
ylim=c(0, 5),
xaxt='n',
yaxt='n',
xlab="",
xlim=c(0,11))
#plot(9999999, 9999999,
# xlab="Number of new URI-Rs (log10)",
#xlab="Event",
# xlim=c(0, 5),
# yaxt='n',
# xaxt='n',
# ylab="",
# ylim=c(0,11))
x<-1
i<-1
sum<-0
sums<-c()
while(i < length(eventNew[,1]))
{
if(i > 1)
{
if(eventNew[i,2] == eventNew[i-1, 2])
{
}
else
{
sums<-c(sums, sum)
sum<-0
x<-x+1
}
}
points(x, eventNew[i,1], pch=1)
sum<-sum+as.integer(eventNew[i,1])
i<-i+1
}
#lines(1:length(sums), sums);
axis(1, at=c(1:11), labels=c("change", "keydown", "keypress", "submit", "unload", "blur", "mousedown", "mouseout", "other", "mouseover", "click"), las=2)
axis(2, at=c(1:5), labels=c("1", "10", "100", "1,000", "10,000"), las=2)
#axis(2, at=c(1:11), labels=c("blur", "change", "click", "keydown", "keypress", "mousedown", "mouseout", "mouseover", "submit", "unload", "other"), las=2)
#axis(1, at=c(1:5), labels=c("1", "10", "100", "1,000", "10,000"), las=2)
dev.off()
print("newByEventHorz.png")
#exit()
png(filename="newByEventHorz.png", type="cairo", height=500,
width=500, bg="white")
par(xpd=FALSE, mar=c(5.1, 7.1, 5.1, 5.1))
#plot(9999999, 9999999,
# ylab="Number of new URI-Rs (log10)",
#xlab="Event",
# ylim=c(0, 5),
# xaxt='n',
# yaxt='n',
# xlab="",
# xlim=c(0,11))
plot(9999999, 9999999,
xlab="Number of new URI-Rs (log10)",
#xlab="Event",
xlim=c(0, 5),
yaxt='n',
xaxt='n',
ylab="",
ylim=c(0,11))
eventNew[,1]<-rev(eventNew[,1])
eventNew[,2]<-rev(eventNew[,2])
x<-1
i<-1
sum<-0
sums<-c()
while(i < length(eventNew[,1]))
{
if(i > 1)
{
if(eventNew[i,2] == eventNew[i-1, 2])
{
}
else
{
sums<-c(sums, sum)
sum<-0
x<-x+1
}
}
#points(x, eventNew[i,1], pch=x)
points(eventNew[i,1], x, pch=1)
sum<-sum+as.integer(eventNew[i,1])
i<-i+1
}
#lines(1:length(sums), sums);
#axis(1, at=c(1:11), labels=c("blur", "change", "click", "keydown", "keypress", "mousedown", "mouseout", "mouseover", "submit", "unload", "other"), las=2)
#axis(2, at=c(1:5), labels=c("1", "10", "100", "1,000", "10,000"), las=2)
axis(2, at=c(1:11), labels=rev(c("change", "keydown", "keypress", "submit", "unload", "blur", "mousedown", "mouseout", "other", "mouseover", "click")), las=2)
axis(1, at=c(1:5), labels=c("1", "10", "100", "1,000", "10,000"), las=2)
dev.off()
##Run Number, URI, Number of States, Depth, Breadth, New URIs, <Events>
#defData <- as.matrix(read.csv("../deferredRunStats.txt", sep=",", header=TRUE))
#nondefData <- as.matrix(read.csv("../nondeferredRunStats.txt", sep=",", header=TRUE))
roiTime<-rbind(1035, 8452, 27990, 40258)
roiSize<-rbind(4250, 11942, 56957, 66320)
print("ROItime.png")
png(filename="ROItime.png", type="cairo", height=300,
width=500, bg="white")
par(xpd=FALSE, mar=c(7.1, 5.1, 5.1, 5.1))
plot(c(0:3), roiTime, main="440 URI-R Crawl",
xlab="Crawl Depth", ylab="Crawl time (s)", type="b", pch=2, xaxt='n', ylim=c(0, 70000))
#abline(a=roiTime[1], 0)
axis(1, at=c(0:3), labels=c("Heritrix-only", "State 0", "State 1", "State 2"), las=2)
dev.off()
print("ROIsize.png")
png(filename="ROIsize.png", type="cairo", height=300,
width=500, bg="white")
par(xpd=FALSE, mar=c(7.1, 5.1, 5.1, 5.1))
plot(c(0:3), roiSize, main="440 URI-R Crawl",
xlab="Crawl Depth", ylab="Frontier Size (URI-Rs)", type="b", pch=2, xaxt='n', ylim=c(0, 70000))
axis(1, at=c(0:3), labels=c("Heritrix-only", "State 0", "State 1", "State 2"), las=2)
dev.off()
print("ROIint.png")
png(filename="ROIint.png", type="cairo", height=300,
width=500, bg="white")
par(xpd=FALSE, mar=c(7.1, 5.1, 5.1, 5.1))
plot(c(0:3), roiSize, main="440 URI-R Crawl",
xlab="Crawl Depth", ylab="Frontier Size (URI-Rs)", type="b", pch=2, xaxt='n', ylim=c(0, 70000))
lines(c(0:3), roiTime, type="b", pch=3)
axis(1, at=c(0:3), labels=c("Heritrix-only", "State 0", "State 1", "State 2"), las=2)
legend('topleft', c("Frontier Size (URI-Rs)", "Crawl Time (s)"),
pch=c(2,3), cex=1)
dev.off()
print("ROIboth.png")
png(filename="ROIboth.png", type="cairo", height=500,
width=500, bg="white")
par(xpd=FALSE, mar=c(7.1, 5.1, 5.1, 5.1))
plot(roiTime, roiSize, main="440 URI-R Crawl",
xlab="Crawl Time (s)", ylab="Frontier Size (URI-Rs)", type="b", pch=2)
dev.off()
mimeSizesSort<-rev(sort(as.integer(mimeSize[,2])))
#mimeSizesSort<-log10(rev(sort(as.integer(mimeSize[,2]))))
print("mimeSizeHisto.png")
png(filename="mimeSizeHisto.png", type="cairo", height=250,
width=500, bg="white")
par(xpd=FALSE, mar=c(5.1, 5.1, 5.1, 5.1))
barplot((mimeSizesSort[1:100]), #main="URI-R Counts",
yaxt='n',
xlab="Unarchived Images",
#ylab="Size",
ylim=c(0, 5000000))
axis(2, at=c(0, 2500000, 5000000), labels=c("0MB", "2.5MB", "5MB"), las=2)
dev.off()
massaged <- as.numeric(mimeSizesSort)
massaged[is.na(massaged)]<-0
massaged<-rev(sort(massaged))
i<-1
#while(i<(length(massaged)-1))
while(i<12000)
{
if(i<(length(massaged)-1))
{
massaged[i+1]=massaged[i+1]+massaged[i]
}
else
{
massaged<-c(massaged, 34684569)
}
i<-i+1
}
massaged[9999]<-34684569
massaged<-massaged/max(massaged)
print("cdfMimeSize.png")
png(filename="cdfMimeSize.png", type="cairo", height=400,
width=500, bg="white")
par(xpd=FALSE, mar=c(5.1, 5.1, 5.1, 5.1))
plot(
#ecdf(rights),
1:(length(massaged)-1), massaged[1:(length(massaged)-1)],
main="", type="b",
xlim=c(0,12000),
col="red", xlab="Images", ylab="Proportion of bytes of images",
#xlim=c(),
#ylim=c(),
pch=19)
dev.off()
massagedNew<- as.numeric(mimeSizesSort)
massagedNew<-rev(massagedNew)
last<-0
i<-1
massagedX<-c(0)
massagedY<-c(0)
#while(i<(length(massaged)-1))
while(i<12000)
{
if(i > length(massagedNew))
{
massagedY[1]<-massagedY[1]+1
print(i)
}
else if(massagedNew[i] > last)
{
p<-sprintf("%i: %i", i, massagedNew[i])
print(p)
last<-massagedNew[i]
massagedY<-c(massagedY, (max(massagedY)+1))
massagedX<-c(massagedX, last)
}
else
{
massagedY[length(massagedY)]<-massagedY[length(massagedY)]+1
}
i<-i+1
}
massagedY<-massagedY/max(massagedY)
massagedX<-log10(massagedX*1024)
print("cdfMimeSize2.png")
png(filename="cdfMimeSize2.png", type="cairo", height=400,
width=500, bg="white")
par(xpd=FALSE, mar=c(5.1, 5.1, 5.1, 5.1))
plot(
#ecdf(rights),
massagedX, massagedY[1:(length(massagedY))],
main="", type="l", lwd=4,
xlim=c(2.5,10),
col="red", xlab="Observed Image Sizes",
xaxt='n',
ylab="Proportion of Images",
#xlim=c(),
#ylim=c(),
pch=19)
axis(1, at=c(2.7, 4.2, 5.6, 7, 8.6, 9.8), labels=c("500B", "1KB", "500KB", "1MB", "2.5MB", "5MB"), las=1)
dev.off()
uriContribs <- as.matrix(read.csv("../newThingsByURI.txt", sep=",", header=TRUE))
massaged <- as.numeric(uriContribs[,2])
massaged[is.na(massaged)]<-0
massaged<-rev(sort(massaged))
i<-1
while(i<(length(massaged)-1))
{
massaged[i+1]=massaged[i+1]+massaged[i]
i<-i+1
}
massaged<-massaged/max(massaged)
print("cdfURIs.png")
png(filename="cdfURIs.png", type="cairo", height=400,
width=500, bg="white")
par(xpd=FALSE, mar=c(5.1, 5.1, 5.1, 5.1))
plot(
#ecdf(rights),
1:(length(massaged)-1), massaged[1:(length(massaged)-1)],
main="", type="l", lwd=4,
col="red", xlab="Deferred URI-Rs", ylab="Proportion Contributed New Crawl Frontier",
#xlim=c(),
#ylim=c(),
pch=19)
dev.off()
|
#---------------------------------------------------------------------------
#
# Hidden global environment for class 'Stem' stuff.
#
# Note that this now holds constants/parameters, etc. for other classes
# within sampSurf as well. JHG 16-Dec-2010.
#
# Note that this environment and its bindings are locked so that no one
# can change it when it is attached. If it is not locked, then it is quite
# simple to mess with it by changing any of the quantities inside. However,
# locking can not be done at the end of this file, it must be done inside
# the .onLoad() function as the package is loaded. 23-Mar-2011, JHG.
#
#Author... Date: 6-Aug-2010
# Jeffrey H. Gove
# USDA Forest Service
# Northern Research Station
# 271 Mast Road
# Durham, NH 03824
# jhgove@unh.edu
# phone: 603-868-7667 fax: 603-868-7604
#---------------------------------------------------------------------------
#---------------------------------------------------------------------------
#
# Specifically for 'Stem' class and subclass objects and pertinent methods.
#
# Creates a hidden environment for any global constants that might be
# shared among different routines. Just run this to re-create. Add any
# new assignments below.
#
#---------------------------------------------------------------------------
#
# create hidden environment to store things in within the package...
#
.StemEnv = new.env()
#
# units of measure types, conversions and constants...
#
.StemEnv$msrUnits = list(English='English', metric='metric')
.StemEnv$cm2m = 1/100
.StemEnv$m2cm = 100
.StemEnv$in2ft = 1/12
.StemEnv$ft2in = 12
.StemEnv$dbhHgt = c(English = 4.5, metric = 1.3716) #in English and metric
.StemEnv$smpHectare = 10000
.StemEnv$sfpAcre = 43560
#used in, e.g., mirage, walkthrough...
.StemEnv$cardinal = c('north', 'south', 'east', 'west')
.StemEnv$Cardinal = c('North', 'South', 'East', 'West')
#the following is ba in ft^2 or m^2 conversion, NOT the BAF for prism cruising!...
.StemEnv$baFactor = c( English = pi/4, metric = pi/4) #dbh in feet or meters
#.StemEnv$baFactor = c( English = pi/(4*144), metric = pi/(4*10000) ) #dbh in inches or cm
.StemEnv$angleGaugeMaxDegrees = 6.5 #maximum angle for angleGauge objects in ArealSampling Class
#horizontal line sampling (hls) constants...
.StemEnv$HLSSegment = c(English = 66, metric = 20) #hls segment base length in ft or m
#
# per unit area names for the list object slot in the InclusionZone class or subclasses;
# I have perhaps made this too difficult, but one can assign the slots based on the names
# of the list, while the values could someday change; e.g.
# eval(parse(text=paste('list(',.StemEnv$puaEstimates$volume,'=3)')))
# will do it...
#
.StemEnv$puaEstimates = list(volume = 'volume', #cubic volume in correct units
#bfVolume = 'bfVolume', #board foot volume--for trees!
Density = 'Density', #number of logs or trees
Length = 'Length', #total length of logs
surfaceArea = 'surfaceArea', #surface area
coverageArea = 'coverageArea', #projected coverage area--down logs
biomass = 'biomass', #could be green or dry
carbon = 'carbon', #carbon content
basalArea = 'basalArea', #basal area for standing trees
variance = 'variance' #required for MC methods
)
#not pua or estimates, other sampling related attributes: pp stands for per point...
.StemEnv$ppEstimates = list(depth = 'depth') #sample or overlap depth per point
#
# these are the valid estimates from the above list for each main subclass of Stem object...
#
.StemEnv$validEstimates = list(downLogs = c('volume','Density','Length','surfaceArea','coverageArea',
'biomass','carbon'),
standingTrees = c('volume','Density','surfaceArea','basalArea',
'biomass','carbon')
)
#.StemEnv$puaNames = list(English =
# list(volume = 'volume in cubic feet per acre',
# bfVolume = 'board foot volume per acre',
# logDensity = 'number of logs per acre'
# ),
# metric =
# list(volume = 'volume in cubic meters per hectare',
# bfVolume = NULL,
# logDensity = 'number of logs per hectare'
# )
# ) #puaNames
#
# stuff for PDS...
#
.StemEnv$pdsTypes = c('volume', 'surfaceArea', 'coverageArea') #main PPS variable
#
# some plausible species codes/names--note that they can be any character string...
#
.StemEnv$species = c('wp','rm','sm','hemlock','Picea glauca','shagbark hickory','BW')
#
# possible log lie angles & other angular constants...
#
.StemEnv$logAngles = c(0, 2*pi)
.StemEnv$deg2rad = pi/180
.StemEnv$rad2deg = 1/.StemEnv$deg2rad
#
# some useful defaults for plotting consistently in different classes...
#
.StemEnv$alphaTrans = 0.5
#log colors, etc.
.StemEnv$logBorderColor = 'brown4' #perimeter color of downLog objects
.StemEnv$logColor = transparentColorBase('sandybrown', .StemEnv$alphaTrans) #internal shade for down logs
.StemEnv$logAttributeColor = transparentColorBase('tan3', #needle & center color
ifelse(1.5*.StemEnv$alphaTrans>1, 1, 1.5*.StemEnv$alphaTrans))
#standing tree colors, etc. (could be for dbh circle or standing tree as needed)...
.StemEnv$treeBorderColor = 'brown4' #perimeter color for dbh circle
.StemEnv$treeColor = transparentColorBase('sandybrown', .StemEnv$alphaTrans) #internal shade for dbh circle
.StemEnv$treeAttributeColor = transparentColorBase('tan3', #center color
ifelse(1.5*.StemEnv$alphaTrans>1, 1, 1.5*.StemEnv$alphaTrans))
#inclusion zones or plot-related...
.StemEnv$izBorderColor = transparentColorBase('slategray', #plot perimeter color
ifelse(1.5*.StemEnv$alphaTrans>1, 1, 1.5*.StemEnv$alphaTrans))
.StemEnv$izColor = transparentColorBase('gray95', .StemEnv$alphaTrans) #interior color for sample plots
.StemEnv$izCenterColor = transparentColorBase('slategray', .StemEnv$alphaTrans) #center point color
#zero=lightest
.StemEnv$blue.colors = colorRampPalette(c('lightsteelblue1','steelblue','steelblue4'))
.StemEnv$gray.colors = colorRampPalette(c('grey90','grey50'))
#blue.colors = colorRampPalette(c('lightsteelblue1','steelblue','steelblue4'))
#zero=darkest...
#.StemEnv$blue.colors = colorRampPalette(c('steelblue4','steelblue','lightsteelblue'))
#blue.colors = colorRampPalette(c('steelblue4','steelblue','lightsteelblue'))
.StemEnv$gridLineColor = transparentColorBase('slategray',.StemEnv$alphaTrans)
.StemEnv$gridCenterColor = transparentColorBase('firebrick4',.StemEnv$alphaTrans)
#
# critical height and importance variants of CHS...
#
.StemEnv$referenceCHSIZ = c('butt', 'dbh') #reference height for critical height inclusion zone
#
# Monte Carlo Sampling (importance, etc.)...
#
.StemEnv$isHeightColor = transparentColorBase('tan4', #height locations
ifelse(1.5*.StemEnv$alphaTrans>1, 1, 1.5*.StemEnv$alphaTrans))
#
# sampleLogs & sampleTrees stuff...
#
.StemEnv$sampleLogsNames = c('species', 'logLen', 'buttDiam', 'topDiam', 'solidType',
'x', 'y', 'logAngle', 'logAngle.D')
.StemEnv$sampleTreesNames = c('species', 'height', 'dbh', 'topDiam', 'solidType',
'x', 'y')
#
# taper/volume stuff...
#
.StemEnv$solidTypes = c(1,10) #range for valid solidType in log taper/volume
#================================================================================
#
# default taper function...
#
# Note that the taper function must have diameters in the same units as length,
# so if length is in meters, diameters must be as well...
#
# Arguments...
# botDiam = the diameter at the large end in height/length units
# topDiam = the diameter at the small end in height/length units
# logLen = the length of the log
# nSegs = the number of segments desired
# solidType = between 1 and 10 is legal
# hgt = NULL to calculate sections here; otherwise a vector of hgt/length
# section information
# isLog = TRUE: a down log, so use "length" in taper; FALSE: a standing tree
# so use "hgt" in taper
#
# Note: little error checking has been added yet!!!!
#
wbTaper = function(botDiam, topDiam, logLen, nSegs=20, solidType, hgt=NULL, isLog=TRUE) {
if(nSegs < 1)
stop('Must have positive number of log segments for taper!')
nSegs = nSegs + 1 #becomes the number of diameters required for taper
if(is.null(solidType) || solidType < .StemEnv$solidTypes[1] || solidType > .StemEnv$solidTypes[2])
stop('solidType=',solidType,' out of range, must be in: [',solidTypes[1],',',solidTypes[2],']')
r = solidType
if(is.null(hgt))
hgt = seq(0, logLen, length.out=nSegs)
diameter = topDiam + (botDiam - topDiam) * ((logLen - hgt)/logLen)^(2/r)
taper = data.frame(diameter=diameter, hgt=hgt)
if(isLog)
colnames(taper) = c('diameter','length')
else
colnames(taper) = c('diameter','height')
return(taper)
} #wbTaper
assign('wbTaper', wbTaper, envir=.StemEnv) #move to .StemEnv
environment(.StemEnv$wbTaper) = .StemEnv #assign its environment
rm(wbTaper) #and remove from .GlobalEnv
#================================================================================
#
# default volume function based on default taper function...
#
# k is the conversion factor that takes diameter to radius and puts it into the
# same units as length. But diameters should be in length units for downLogs,
# so k just represents taking squared diameter to squared radius...
#
# To get actual bolt volume of a segment somewhere on the stem, call this twice,
# first with the shorter length, then the longer length (both defining the bolt)
# and get the volume by subtraction--see segmentVolumes() and chainsawSliver
# for examples--but now one should simply use segmentVolumes 25-Apr-2013.
#
wbVolume = function(botDiam, topDiam, logLen, solidType, boltLen=NULL) {
if(is.null(solidType) || solidType < .StemEnv$solidTypes[1] || solidType > .StemEnv$solidTypes[2])
stop('solidType=',solidType,' out of range, must be in: (',
.StemEnv$solidTypes[1],',',.StemEnv$solidTypes[2],')')
r = solidType
k = 1/4 #diameter to radius; diam to length units conversion==1
if(is.null(boltLen))
h = logLen #any height/length is possible, default is for total volume
else
h = boltLen #some intermediate volume
logVol = pi*k*topDiam^2*h +
pi*k*logLen*(botDiam - topDiam)^2*r/(4+r)*(1-(1-h/logLen)^((4+r)/r)) +
2*pi*k*logLen*topDiam*(botDiam - topDiam)*r/(2+r)*(1-(1-h/logLen)^((2+r)/r))
#logVol = pi*k*logLen*((buttD-topD)^2*(r/(r+4)) + topD*(topD + 2*(buttD-topD)*(r/(r+2))))
return(logVol)
} #wbVolume
assign('wbVolume', wbVolume, envir=.StemEnv) #move to .StemEnv
environment(.StemEnv$wbVolume) = .StemEnv #assign its environment
rm(wbVolume) #and remove from .GlobalEnv
#================================================================================
#
# Smalian's volume function for passed taper...
#
# k is the conversion factor that takes diameter to radius and puts it into the
# same units as length. But diameters should be in length units for downLogs,
# so k just represents taking squared diameter to squared radius...
#
SmalianVolume = function(taper, isLog=TRUE) {
k = 1/4 #diameter to radius; diam to length units conversion==1
nSegs = nrow(taper) - 1
if(nSegs < 1)
stop("Must have positive number of log segments for Smalian's!")
if(isLog)
hgtName = 'length'
else
hgtName = 'height'
vol = matrix(NA, nrow=nSegs, ncol=1)
diam = taper[,'diameter']
csArea = diam^2
length = taper[,hgtName]
for(i in 1:nSegs) {
sectLen = length[i+1] - length[i]
if(isTRUE(all.equal(diam[i+1],0.0)))
vol[i,1] = pi*k*csArea[i+1]*sectLen/3 #cone for tip
else
vol[i,1] = pi*k*(csArea[i] + csArea[i+1])*sectLen/2 #Smalian's
}
sumVol = colSums(vol)
return(list(boltVol = vol, logVol = sumVol))
} #SmalianVolume
assign('SmalianVolume', SmalianVolume, envir=.StemEnv) #move to .StemEnv
environment(.StemEnv$SmalianVolume) = .StemEnv #assign its environment
rm(SmalianVolume) #and remove from .GlobalEnv
#================================================================================
#
# spline volume function based on taper points...
#
# taper = the taper data frame for the stem object
# lenBot = length at the beginning of the section (default whole stem)
# lenTop = length at the top of the section (default whole stem)
# isLog = TRUE: a "downLog" object; FALSE: a "standingTree" object
# units = units for the stem (doesn't really matter as csa conversion is the
# same for both--see .StemEnv$baFactor above)
#
# Please note: Always pass the entire log's taper data frame as the first argument,
# even if you only want some intermediate bolt volume, as
# the spline function is defined on the entirety of what is in
# taper, and will not reflect the entire stem if only that portion
# is passed that contains the bolt, for example.
#
# Also: The reason we do not simply pass a "Stem" subclass object instead of
# taper, units, etc. here is that we want this routine to possibly be of
# use in the constructor of a "Stem" object. We could, of course, make
# this a generic with "data.frame" and "Stem" signatures--see commented-out
# section below...
#
# Additionally, lenBot is the length to the bottom of the bolt to be integrate,
# and lenTop is to the top of the bolt, both are relative to the butt of the
# stem, which is always zero in a "Stem" subclass object.
#
splineVolume = function(taper, lenBot, lenTop, isLog=TRUE, units='metric') {
if(isLog)
length = taper$length
else
length = taper$height
if(lenBot > lenTop || lenBot < 0 || lenTop < 0 || lenTop > max(length))
stop('Nonsensical lengths in splineVolume!')
diameter = taper$diameter
#splineVolume = function(stemObject, lenBot, lenTop) {
# if(!is(stemObject, 'Stem'))
# stop('You must pass a \"Stem"\" subclass object to splineVolume!')
# taper = stemObject@taper
# if(is(stemObject, 'downLog'))
# length = taper$length
# else
# length = taper$height
# diameter = taper$diameter
# if(lenBot > lenTop || lenBot < 0 || lenTop < 0 || lenTop > max(length))
# stop('Nonsensical lengths in splineVolume!')
# units = stemObject@units
#cross-sectional area (ba) conversion factor and height to dbh...
csaFactor = ifelse(units==.StemEnv$msrUnits$English, .StemEnv$baFactor['English'],
.StemEnv$baFactor['metric'])
csa.spline = splinefun(length, csaFactor*diameter^2) #spline the cross-sectional area
vol = integrate(csa.spline, lenBot, lenTop)$value #integrates "hgt" from lenBot to lenTop
return(vol)
} #splineVolume
assign('splineVolume', splineVolume, envir=.StemEnv) #move to .StemEnv
environment(.StemEnv$splineVolume) = .StemEnv #assign its environment
rm(splineVolume) #and remove from .GlobalEnv
#================================================================================
#
# default surface area function based on default taper function...
#
# botDiam = buttDiam for the log
# topDiam = topTiam for the log
# logLen = logLen for the log
# solidType = solidType for the log
# lenBot = length at the beginning of the section (default whole log)
# lenTop = length at the top of the section (default whole log)
#
wbSurfaceArea = function(botDiam, topDiam, logLen, solidType, lenBot=0, lenTop=logLen) {
if(lenBot > lenTop || lenBot < 0 || lenTop < 0)
stop('Nonsensical lengths in wbSurfaceArea!')
sa.taper = function(hgt, botDiam, topDiam, logLen, solidType) {
diam = .StemEnv$wbTaper(botDiam, topDiam, logLen, 1, solidType, hgt)$diameter
deriv = -2*(logLen-hgt)^(2/solidType-1) * (botDiam-topDiam)/(solidType*logLen^(2/solidType))
tsa = pi*diam* sqrt(1 + deriv^2/4)
return(tsa)
} #sa.taper
sa = integrate(sa.taper, lenBot, lenTop, botDiam=botDiam, topDiam=topDiam, logLen=logLen,
solidType=solidType)$value
return(sa)
} #wbSurfaceArea
assign('wbSurfaceArea', wbSurfaceArea, envir=.StemEnv) #move to .StemEnv
environment(.StemEnv$wbSurfaceArea) = .StemEnv #assign its environment
rm(wbSurfaceArea) #and remove from .GlobalEnv
#================================================================================
#
# spline surface area function based on taper points...
#
# taper = the taper data frame for the log
# lenBot = length at the beginning of the section (default whole log)
# lenTop = length at the top of the section (default whole log)
#
# Please note: Always pass the entire log's taper data frame as the first argument,
# even if you only want some intermediate bolt surface area, as
# the spline function is defined on the entirety of what is in
# taper, and will not reflect the entire log if only that portion
# is passed that contains the bolt, for example.
#
# Additionally, lenBot is the length to the bottom of the bolt to be integrate,
# and lenTop is to the top of the bolt, both are relative to the butt of the
# log, which is always zero in a downLog object.
#
splineSurfaceArea = function(taper, lenBot, lenTop, isLog=TRUE) {
if(lenBot > lenTop || lenBot < 0 || lenTop < 0)
stop('Nonsensical lengths in splineSurfaceArea!')
if(isLog)
length = taper$length
else
length = taper$height
diameter = taper$diameter
taper.spline = splinefun(length, diameter)
sa.spline = function(hgt) { #"hgt" is length dummy argument
diam = taper.spline(hgt)
deriv = taper.spline(hgt, 1)
ssa = pi*diam * sqrt(1 + deriv^2/4)
return(ssa)
} #sa.spline
sa = integrate(sa.spline, lenBot, lenTop)$value #integrates "hgt" from lenBot to lenTop
return(sa)
} #splineSurfaceArea
assign('splineSurfaceArea', splineSurfaceArea, envir=.StemEnv) #move to .StemEnv
environment(.StemEnv$splineSurfaceArea) = .StemEnv #assign its environment
rm(splineSurfaceArea) #and remove from .GlobalEnv
#================================================================================
#
# default coverage area function based on default taper function...
#
# botDiam = buttDiam for the log
# topDiam = topTiam for the log
# logLen = logLen for the log
# solidType = solidType for the log
# lenBot = length at the beginning of the section (default whole log)
# lenTop = length at the top of the section (default whole log)
#
wbCoverageArea = function(botDiam, topDiam, logLen, solidType, lenBot=0, lenTop=logLen) {
if(lenBot > lenTop || lenBot < 0 || lenTop < 0)
stop('Nonsensical lengths in wbCoverageArea!')
if(identical(logLen, (lenBot - lenTop)))
ca = (botDiam*solidType + 2*topDiam)*logLen/(solidType+2)
else {
r = solidType
a = lenBot
b = lenTop
Du = topDiam
Dl = botDiam
L = logLen
p1 = ((b - a)*Du*r + (2*b - 2*a)*Du)*L^(2/r)
p2 = (L - b)^(2/r) * ((Du - Dl)*r*L + (b*Dl - b*Du)*r)
p3 = (L - a)^(2/r) * ((Dl - Du)*r*L + (a*Du - a*Dl)*r)
ca = (p1 + p2 + p3)/((r+2)*L^(2/r))
}
return(ca)
} #wbCoverageArea
assign('wbCoverageArea', wbCoverageArea, envir=.StemEnv) #move to .StemEnv
environment(.StemEnv$wbCoverageArea) = .StemEnv #assign its environment
rm(wbCoverageArea) #and remove from .GlobalEnv
#================================================================================
#
# spline coverage area function based on taper points...
#
# taper = the taper data frame for the log
# lenBot = length at the beginning of the section (default whole log)
# lenTop = length at the top of the section (default whole log)
#
# Please note: Always pass the entire log's taper data frame as the first argument,
# even if you only want some intermediate bolt coverage area, as
# the spline function is defined on the entirety of what is in
# taper, and will not reflect the entire log if only that portion
# is passed that contains the bolt, for example.
#
# Additionally, lenBot is the length to the bottom of the bolt to be integrate,
# and lenTop is to the top of the bolt, both are relative to the butt of the
# log, which is always zero in a downLog object.
#
splineCoverageArea = function(taper, lenBot, lenTop) {
if(lenBot > lenTop || lenBot < 0 || lenTop < 0)
stop('Nonsensical lengths in splineCoverageArea!')
length = taper$length
diameter = taper$diameter
taper.spline = splinefun(length, diameter)
ca = integrate(taper.spline, lenBot, lenTop)$value #integrates "hgt" from lenBot to lenTop
return(ca)
} #splineCoverageArea
assign('splineCoverageArea', splineCoverageArea, envir=.StemEnv) #move to .StemEnv
environment(.StemEnv$splineCoverageArea) = .StemEnv #assign its environment
rm(splineCoverageArea) #and remove from .GlobalEnv
#================================================================================
#
# converts degrees to radians on [0,2pi]...
#
# or use simply angle%%360 ??...
#
.deg2Rad = function(angle) {
twoPi = 2.0*pi
if(angle > 360) {
fact = floor(angle/360)
angle = angle - fact*360
}
return(angle*twoPi/360)
} #.deg2Rad
assign('deg2Rad', .deg2Rad, envir=.StemEnv) #move to .StemEnv
environment(.StemEnv$deg2Rad) = .StemEnv #assign its environment
rm(.deg2Rad) #and remove from .GlobalEnv
#================================================================================
#
# converts radians to degrees on [0,360]...
#
.rad2Deg = function(angle) {
twoPi = 2.0*pi
if(angle > twoPi) {
fact = floor(angle/twoPi)
angle = angle - fact*twoPi
}
return(angle*360/twoPi)
} #.rad2Deg
assign('rad2Deg', .rad2Deg, envir=.StemEnv) #move to .StemEnv
environment(.StemEnv$rad2Deg) = .StemEnv #assign its environment
rm(.rad2Deg) #and remove from .GlobalEnv
#================================================================================
#
.underLine = function(lineLength = 0, #length of line
lineChar = '-', #character for line
prologue = '\n', #some character (vector) to be output first
postfix = '\n' #character (vector) to be output last
)
{
#------------------------------------------------------------------------------
# just cat()'s an underline for nicer output...
#------------------------------------------------------------------------------
if(lineLength>0 && lineLength<200)
cat(prologue,rep(lineChar, lineLength),postfix,sep='')
return(invisible())
} #.underLine
assign('underLine', .underLine, envir=.StemEnv) #move to .StemEnv
environment(.StemEnv$underLine) = .StemEnv #assign its environment
rm(.underLine) #and remove from .GlobalEnv
#================================================================================
#
# just generates a random ID in character string that can be combined with
# any prefix for spatial IDs in the package...
#
randomID = function(lenDigits=4, #number of digits to use
lenAlpha=4, #number of alpha characters
...)
{
if(lenDigits<1 || lenAlpha<1)
stop('random IDs must have at least one digit and one letter!')
dig = sample(0:9, lenDigits)
alpha = sample(letters, lenAlpha)
comb = c(dig, alpha)
nn = length(comb)
idx = sample(1:nn, nn)
ranid = paste(comb[idx], collapse='')
return(ranid)
} #randomID
assign('randomID', randomID, envir=.StemEnv) #move to .StemEnv
environment(.StemEnv$randomID) = .StemEnv #assign its environment
rm(randomID) #and remove from .GlobalEnv
|
/R/defStemEnv.R
|
no_license
|
cran/sampSurf
|
R
| false
| false
| 26,043
|
r
|
#---------------------------------------------------------------------------
#
# Hidden global environment for class 'Stem' stuff.
#
# Note that this now holds constants/parameters, etc. for other classes
# within sampSurf as well. JHG 16-Dec-2010.
#
# Note that this environment and its bindings are locked so that no one
# can change it when it is attached. If it is not locked, then it is quite
# simple to mess with it by changing any of the quantities inside. However,
# locking can not be done at the end of this file, it must be done inside
# the .onLoad() function as the package is loaded. 23-Mar-2011, JHG.
#
#Author... Date: 6-Aug-2010
# Jeffrey H. Gove
# USDA Forest Service
# Northern Research Station
# 271 Mast Road
# Durham, NH 03824
# jhgove@unh.edu
# phone: 603-868-7667 fax: 603-868-7604
#---------------------------------------------------------------------------
#---------------------------------------------------------------------------
#
# Specifically for 'Stem' class and subclass objects and pertinent methods.
#
# Creates a hidden environment for any global constants that might be
# shared among different routines. Just run this to re-create. Add any
# new assignments below.
#
#---------------------------------------------------------------------------
#
# create hidden environment to store things in within the package...
#
.StemEnv = new.env()
#
# units of measure types, conversions and constants...
#
.StemEnv$msrUnits = list(English='English', metric='metric')
.StemEnv$cm2m = 1/100
.StemEnv$m2cm = 100
.StemEnv$in2ft = 1/12
.StemEnv$ft2in = 12
.StemEnv$dbhHgt = c(English = 4.5, metric = 1.3716) #in English and metric
.StemEnv$smpHectare = 10000
.StemEnv$sfpAcre = 43560
#used in, e.g., mirage, walkthrough...
.StemEnv$cardinal = c('north', 'south', 'east', 'west')
.StemEnv$Cardinal = c('North', 'South', 'East', 'West')
#the following is ba in ft^2 or m^2 conversion, NOT the BAF for prism cruising!...
.StemEnv$baFactor = c( English = pi/4, metric = pi/4) #dbh in feet or meters
#.StemEnv$baFactor = c( English = pi/(4*144), metric = pi/(4*10000) ) #dbh in inches or cm
.StemEnv$angleGaugeMaxDegrees = 6.5 #maximum angle for angleGauge objects in ArealSampling Class
#horizontal line sampling (hls) constants...
.StemEnv$HLSSegment = c(English = 66, metric = 20) #hls segment base length in ft or m
#
# per unit area names for the list object slot in the InclusionZone class or subclasses;
# I have perhaps made this too difficult, but one can assign the slots based on the names
# of the list, while the values could someday change; e.g.
# eval(parse(text=paste('list(',.StemEnv$puaEstimates$volume,'=3)')))
# will do it...
#
.StemEnv$puaEstimates = list(volume = 'volume', #cubic volume in correct units
#bfVolume = 'bfVolume', #board foot volume--for trees!
Density = 'Density', #number of logs or trees
Length = 'Length', #total length of logs
surfaceArea = 'surfaceArea', #surface area
coverageArea = 'coverageArea', #projected coverage area--down logs
biomass = 'biomass', #could be green or dry
carbon = 'carbon', #carbon content
basalArea = 'basalArea', #basal area for standing trees
variance = 'variance' #required for MC methods
)
#not pua or estimates, other sampling related attributes: pp stands for per point...
.StemEnv$ppEstimates = list(depth = 'depth') #sample or overlap depth per point
#
# these are the valid estimates from the above list for each main subclass of Stem object...
#
.StemEnv$validEstimates = list(downLogs = c('volume','Density','Length','surfaceArea','coverageArea',
'biomass','carbon'),
standingTrees = c('volume','Density','surfaceArea','basalArea',
'biomass','carbon')
)
#.StemEnv$puaNames = list(English =
# list(volume = 'volume in cubic feet per acre',
# bfVolume = 'board foot volume per acre',
# logDensity = 'number of logs per acre'
# ),
# metric =
# list(volume = 'volume in cubic meters per hectare',
# bfVolume = NULL,
# logDensity = 'number of logs per hectare'
# )
# ) #puaNames
#
# stuff for PDS...
#
.StemEnv$pdsTypes = c('volume', 'surfaceArea', 'coverageArea') #main PPS variable
#
# some plausible species codes/names--note that they can be any character string...
#
.StemEnv$species = c('wp','rm','sm','hemlock','Picea glauca','shagbark hickory','BW')
#
# possible log lie angles & other angular constants...
#
.StemEnv$logAngles = c(0, 2*pi)
.StemEnv$deg2rad = pi/180
.StemEnv$rad2deg = 1/.StemEnv$deg2rad
#
# some useful defaults for plotting consistently in different classes...
#
.StemEnv$alphaTrans = 0.5
#log colors, etc.
.StemEnv$logBorderColor = 'brown4' #perimeter color of downLog objects
.StemEnv$logColor = transparentColorBase('sandybrown', .StemEnv$alphaTrans) #internal shade for down logs
.StemEnv$logAttributeColor = transparentColorBase('tan3', #needle & center color
ifelse(1.5*.StemEnv$alphaTrans>1, 1, 1.5*.StemEnv$alphaTrans))
#standing tree colors, etc. (could be for dbh circle or standing tree as needed)...
.StemEnv$treeBorderColor = 'brown4' #perimeter color for dbh circle
.StemEnv$treeColor = transparentColorBase('sandybrown', .StemEnv$alphaTrans) #internal shade for dbh circle
.StemEnv$treeAttributeColor = transparentColorBase('tan3', #center color
ifelse(1.5*.StemEnv$alphaTrans>1, 1, 1.5*.StemEnv$alphaTrans))
#inclusion zones or plot-related...
.StemEnv$izBorderColor = transparentColorBase('slategray', #plot perimeter color
ifelse(1.5*.StemEnv$alphaTrans>1, 1, 1.5*.StemEnv$alphaTrans))
.StemEnv$izColor = transparentColorBase('gray95', .StemEnv$alphaTrans) #interior color for sample plots
.StemEnv$izCenterColor = transparentColorBase('slategray', .StemEnv$alphaTrans) #center point color
#zero=lightest
.StemEnv$blue.colors = colorRampPalette(c('lightsteelblue1','steelblue','steelblue4'))
.StemEnv$gray.colors = colorRampPalette(c('grey90','grey50'))
#blue.colors = colorRampPalette(c('lightsteelblue1','steelblue','steelblue4'))
#zero=darkest...
#.StemEnv$blue.colors = colorRampPalette(c('steelblue4','steelblue','lightsteelblue'))
#blue.colors = colorRampPalette(c('steelblue4','steelblue','lightsteelblue'))
.StemEnv$gridLineColor = transparentColorBase('slategray',.StemEnv$alphaTrans)
.StemEnv$gridCenterColor = transparentColorBase('firebrick4',.StemEnv$alphaTrans)
#
# critical height and importance variants of CHS...
#
.StemEnv$referenceCHSIZ = c('butt', 'dbh') #reference height for critical height inclusion zone
#
# Monte Carlo Sampling (importance, etc.)...
#
.StemEnv$isHeightColor = transparentColorBase('tan4', #height locations
ifelse(1.5*.StemEnv$alphaTrans>1, 1, 1.5*.StemEnv$alphaTrans))
#
# sampleLogs & sampleTrees stuff...
#
.StemEnv$sampleLogsNames = c('species', 'logLen', 'buttDiam', 'topDiam', 'solidType',
'x', 'y', 'logAngle', 'logAngle.D')
.StemEnv$sampleTreesNames = c('species', 'height', 'dbh', 'topDiam', 'solidType',
'x', 'y')
#
# taper/volume stuff...
#
.StemEnv$solidTypes = c(1,10) #range for valid solidType in log taper/volume
#================================================================================
#
# default taper function...
#
# Note that the taper function must have diameters in the same units as length,
# so if length is in meters, diameters must be as well...
#
# Arguments...
# botDiam = the diameter at the large end in height/length units
# topDiam = the diameter at the small end in height/length units
# logLen = the length of the log
# nSegs = the number of segments desired
# solidType = between 1 and 10 is legal
# hgt = NULL to calculate sections here; otherwise a vector of hgt/length
# section information
# isLog = TRUE: a down log, so use "length" in taper; FALSE: a standing tree
# so use "hgt" in taper
#
# Note: little error checking has been added yet!!!!
#
wbTaper = function(botDiam, topDiam, logLen, nSegs=20, solidType, hgt=NULL, isLog=TRUE) {
if(nSegs < 1)
stop('Must have positive number of log segments for taper!')
nSegs = nSegs + 1 #becomes the number of diameters required for taper
if(is.null(solidType) || solidType < .StemEnv$solidTypes[1] || solidType > .StemEnv$solidTypes[2])
stop('solidType=',solidType,' out of range, must be in: [',solidTypes[1],',',solidTypes[2],']')
r = solidType
if(is.null(hgt))
hgt = seq(0, logLen, length.out=nSegs)
diameter = topDiam + (botDiam - topDiam) * ((logLen - hgt)/logLen)^(2/r)
taper = data.frame(diameter=diameter, hgt=hgt)
if(isLog)
colnames(taper) = c('diameter','length')
else
colnames(taper) = c('diameter','height')
return(taper)
} #wbTaper
assign('wbTaper', wbTaper, envir=.StemEnv) #move to .StemEnv
environment(.StemEnv$wbTaper) = .StemEnv #assign its environment
rm(wbTaper) #and remove from .GlobalEnv
#================================================================================
#
# default volume function based on default taper function...
#
# k is the conversion factor that takes diameter to radius and puts it into the
# same units as length. But diameters should be in length units for downLogs,
# so k just represents taking squared diameter to squared radius...
#
# To get actual bolt volume of a segment somewhere on the stem, call this twice,
# first with the shorter length, then the longer length (both defining the bolt)
# and get the volume by subtraction--see segmentVolumes() and chainsawSliver
# for examples--but now one should simply use segmentVolumes 25-Apr-2013.
#
wbVolume = function(botDiam, topDiam, logLen, solidType, boltLen=NULL) {
if(is.null(solidType) || solidType < .StemEnv$solidTypes[1] || solidType > .StemEnv$solidTypes[2])
stop('solidType=',solidType,' out of range, must be in: (',
.StemEnv$solidTypes[1],',',.StemEnv$solidTypes[2],')')
r = solidType
k = 1/4 #diameter to radius; diam to length units conversion==1
if(is.null(boltLen))
h = logLen #any height/length is possible, default is for total volume
else
h = boltLen #some intermediate volume
logVol = pi*k*topDiam^2*h +
pi*k*logLen*(botDiam - topDiam)^2*r/(4+r)*(1-(1-h/logLen)^((4+r)/r)) +
2*pi*k*logLen*topDiam*(botDiam - topDiam)*r/(2+r)*(1-(1-h/logLen)^((2+r)/r))
#logVol = pi*k*logLen*((buttD-topD)^2*(r/(r+4)) + topD*(topD + 2*(buttD-topD)*(r/(r+2))))
return(logVol)
} #wbVolume
assign('wbVolume', wbVolume, envir=.StemEnv) #move to .StemEnv
environment(.StemEnv$wbVolume) = .StemEnv #assign its environment
rm(wbVolume) #and remove from .GlobalEnv
#================================================================================
#
# Smalian's volume function for passed taper...
#
# k is the conversion factor that takes diameter to radius and puts it into the
# same units as length. But diameters should be in length units for downLogs,
# so k just represents taking squared diameter to squared radius...
#
SmalianVolume = function(taper, isLog=TRUE) {
k = 1/4 #diameter to radius; diam to length units conversion==1
nSegs = nrow(taper) - 1
if(nSegs < 1)
stop("Must have positive number of log segments for Smalian's!")
if(isLog)
hgtName = 'length'
else
hgtName = 'height'
vol = matrix(NA, nrow=nSegs, ncol=1)
diam = taper[,'diameter']
csArea = diam^2
length = taper[,hgtName]
for(i in 1:nSegs) {
sectLen = length[i+1] - length[i]
if(isTRUE(all.equal(diam[i+1],0.0)))
vol[i,1] = pi*k*csArea[i+1]*sectLen/3 #cone for tip
else
vol[i,1] = pi*k*(csArea[i] + csArea[i+1])*sectLen/2 #Smalian's
}
sumVol = colSums(vol)
return(list(boltVol = vol, logVol = sumVol))
} #SmalianVolume
assign('SmalianVolume', SmalianVolume, envir=.StemEnv) #move to .StemEnv
environment(.StemEnv$SmalianVolume) = .StemEnv #assign its environment
rm(SmalianVolume) #and remove from .GlobalEnv
#================================================================================
#
# spline volume function based on taper points...
#
# taper = the taper data frame for the stem object
# lenBot = length at the beginning of the section (default whole stem)
# lenTop = length at the top of the section (default whole stem)
# isLog = TRUE: a "downLog" object; FALSE: a "standingTree" object
# units = units for the stem (doesn't really matter as csa conversion is the
# same for both--see .StemEnv$baFactor above)
#
# Please note: Always pass the entire log's taper data frame as the first argument,
# even if you only want some intermediate bolt volume, as
# the spline function is defined on the entirety of what is in
# taper, and will not reflect the entire stem if only that portion
# is passed that contains the bolt, for example.
#
# Also: The reason we do not simply pass a "Stem" subclass object instead of
# taper, units, etc. here is that we want this routine to possibly be of
# use in the constructor of a "Stem" object. We could, of course, make
# this a generic with "data.frame" and "Stem" signatures--see commented-out
# section below...
#
# Additionally, lenBot is the length to the bottom of the bolt to be integrate,
# and lenTop is to the top of the bolt, both are relative to the butt of the
# stem, which is always zero in a "Stem" subclass object.
#
splineVolume = function(taper, lenBot, lenTop, isLog=TRUE, units='metric') {
if(isLog)
length = taper$length
else
length = taper$height
if(lenBot > lenTop || lenBot < 0 || lenTop < 0 || lenTop > max(length))
stop('Nonsensical lengths in splineVolume!')
diameter = taper$diameter
#splineVolume = function(stemObject, lenBot, lenTop) {
# if(!is(stemObject, 'Stem'))
# stop('You must pass a \"Stem"\" subclass object to splineVolume!')
# taper = stemObject@taper
# if(is(stemObject, 'downLog'))
# length = taper$length
# else
# length = taper$height
# diameter = taper$diameter
# if(lenBot > lenTop || lenBot < 0 || lenTop < 0 || lenTop > max(length))
# stop('Nonsensical lengths in splineVolume!')
# units = stemObject@units
#cross-sectional area (ba) conversion factor and height to dbh...
csaFactor = ifelse(units==.StemEnv$msrUnits$English, .StemEnv$baFactor['English'],
.StemEnv$baFactor['metric'])
csa.spline = splinefun(length, csaFactor*diameter^2) #spline the cross-sectional area
vol = integrate(csa.spline, lenBot, lenTop)$value #integrates "hgt" from lenBot to lenTop
return(vol)
} #splineVolume
assign('splineVolume', splineVolume, envir=.StemEnv) #move to .StemEnv
environment(.StemEnv$splineVolume) = .StemEnv #assign its environment
rm(splineVolume) #and remove from .GlobalEnv
#================================================================================
#
# default surface area function based on default taper function...
#
# botDiam = buttDiam for the log
# topDiam = topTiam for the log
# logLen = logLen for the log
# solidType = solidType for the log
# lenBot = length at the beginning of the section (default whole log)
# lenTop = length at the top of the section (default whole log)
#
wbSurfaceArea = function(botDiam, topDiam, logLen, solidType, lenBot=0, lenTop=logLen) {
if(lenBot > lenTop || lenBot < 0 || lenTop < 0)
stop('Nonsensical lengths in wbSurfaceArea!')
sa.taper = function(hgt, botDiam, topDiam, logLen, solidType) {
diam = .StemEnv$wbTaper(botDiam, topDiam, logLen, 1, solidType, hgt)$diameter
deriv = -2*(logLen-hgt)^(2/solidType-1) * (botDiam-topDiam)/(solidType*logLen^(2/solidType))
tsa = pi*diam* sqrt(1 + deriv^2/4)
return(tsa)
} #sa.taper
sa = integrate(sa.taper, lenBot, lenTop, botDiam=botDiam, topDiam=topDiam, logLen=logLen,
solidType=solidType)$value
return(sa)
} #wbSurfaceArea
assign('wbSurfaceArea', wbSurfaceArea, envir=.StemEnv) #move to .StemEnv
environment(.StemEnv$wbSurfaceArea) = .StemEnv #assign its environment
rm(wbSurfaceArea) #and remove from .GlobalEnv
#================================================================================
#
# spline surface area function based on taper points...
#
# taper = the taper data frame for the log
# lenBot = length at the beginning of the section (default whole log)
# lenTop = length at the top of the section (default whole log)
#
# Please note: Always pass the entire log's taper data frame as the first argument,
# even if you only want some intermediate bolt surface area, as
# the spline function is defined on the entirety of what is in
# taper, and will not reflect the entire log if only that portion
# is passed that contains the bolt, for example.
#
# Additionally, lenBot is the length to the bottom of the bolt to be integrate,
# and lenTop is to the top of the bolt, both are relative to the butt of the
# log, which is always zero in a downLog object.
#
splineSurfaceArea = function(taper, lenBot, lenTop, isLog=TRUE) {
if(lenBot > lenTop || lenBot < 0 || lenTop < 0)
stop('Nonsensical lengths in splineSurfaceArea!')
if(isLog)
length = taper$length
else
length = taper$height
diameter = taper$diameter
taper.spline = splinefun(length, diameter)
sa.spline = function(hgt) { #"hgt" is length dummy argument
diam = taper.spline(hgt)
deriv = taper.spline(hgt, 1)
ssa = pi*diam * sqrt(1 + deriv^2/4)
return(ssa)
} #sa.spline
sa = integrate(sa.spline, lenBot, lenTop)$value #integrates "hgt" from lenBot to lenTop
return(sa)
} #splineSurfaceArea
assign('splineSurfaceArea', splineSurfaceArea, envir=.StemEnv) #move to .StemEnv
environment(.StemEnv$splineSurfaceArea) = .StemEnv #assign its environment
rm(splineSurfaceArea) #and remove from .GlobalEnv
#================================================================================
#
# default coverage area function based on default taper function...
#
# botDiam = buttDiam for the log
# topDiam = topTiam for the log
# logLen = logLen for the log
# solidType = solidType for the log
# lenBot = length at the beginning of the section (default whole log)
# lenTop = length at the top of the section (default whole log)
#
wbCoverageArea = function(botDiam, topDiam, logLen, solidType, lenBot=0, lenTop=logLen) {
if(lenBot > lenTop || lenBot < 0 || lenTop < 0)
stop('Nonsensical lengths in wbCoverageArea!')
if(identical(logLen, (lenBot - lenTop)))
ca = (botDiam*solidType + 2*topDiam)*logLen/(solidType+2)
else {
r = solidType
a = lenBot
b = lenTop
Du = topDiam
Dl = botDiam
L = logLen
p1 = ((b - a)*Du*r + (2*b - 2*a)*Du)*L^(2/r)
p2 = (L - b)^(2/r) * ((Du - Dl)*r*L + (b*Dl - b*Du)*r)
p3 = (L - a)^(2/r) * ((Dl - Du)*r*L + (a*Du - a*Dl)*r)
ca = (p1 + p2 + p3)/((r+2)*L^(2/r))
}
return(ca)
} #wbCoverageArea
assign('wbCoverageArea', wbCoverageArea, envir=.StemEnv) #move to .StemEnv
environment(.StemEnv$wbCoverageArea) = .StemEnv #assign its environment
rm(wbCoverageArea) #and remove from .GlobalEnv
#================================================================================
#
# spline coverage area function based on taper points...
#
# taper = the taper data frame for the log
# lenBot = length at the beginning of the section (default whole log)
# lenTop = length at the top of the section (default whole log)
#
# Please note: Always pass the entire log's taper data frame as the first argument,
# even if you only want some intermediate bolt coverage area, as
# the spline function is defined on the entirety of what is in
# taper, and will not reflect the entire log if only that portion
# is passed that contains the bolt, for example.
#
# Additionally, lenBot is the length to the bottom of the bolt to be integrate,
# and lenTop is to the top of the bolt, both are relative to the butt of the
# log, which is always zero in a downLog object.
#
splineCoverageArea = function(taper, lenBot, lenTop) {
if(lenBot > lenTop || lenBot < 0 || lenTop < 0)
stop('Nonsensical lengths in splineCoverageArea!')
length = taper$length
diameter = taper$diameter
taper.spline = splinefun(length, diameter)
ca = integrate(taper.spline, lenBot, lenTop)$value #integrates "hgt" from lenBot to lenTop
return(ca)
} #splineCoverageArea
assign('splineCoverageArea', splineCoverageArea, envir=.StemEnv) #move to .StemEnv
environment(.StemEnv$splineCoverageArea) = .StemEnv #assign its environment
rm(splineCoverageArea) #and remove from .GlobalEnv
#================================================================================
#
# converts degrees to radians on [0,2pi]...
#
# or use simply angle%%360 ??...
#
.deg2Rad = function(angle) {
twoPi = 2.0*pi
if(angle > 360) {
fact = floor(angle/360)
angle = angle - fact*360
}
return(angle*twoPi/360)
} #.deg2Rad
assign('deg2Rad', .deg2Rad, envir=.StemEnv) #move to .StemEnv
environment(.StemEnv$deg2Rad) = .StemEnv #assign its environment
rm(.deg2Rad) #and remove from .GlobalEnv
#================================================================================
#
# converts radians to degrees on [0,360]...
#
.rad2Deg = function(angle) {
twoPi = 2.0*pi
if(angle > twoPi) {
fact = floor(angle/twoPi)
angle = angle - fact*twoPi
}
return(angle*360/twoPi)
} #.rad2Deg
assign('rad2Deg', .rad2Deg, envir=.StemEnv) #move to .StemEnv
environment(.StemEnv$rad2Deg) = .StemEnv #assign its environment
rm(.rad2Deg) #and remove from .GlobalEnv
#================================================================================
#
.underLine = function(lineLength = 0, #length of line
lineChar = '-', #character for line
prologue = '\n', #some character (vector) to be output first
postfix = '\n' #character (vector) to be output last
)
{
#------------------------------------------------------------------------------
# just cat()'s an underline for nicer output...
#------------------------------------------------------------------------------
if(lineLength>0 && lineLength<200)
cat(prologue,rep(lineChar, lineLength),postfix,sep='')
return(invisible())
} #.underLine
assign('underLine', .underLine, envir=.StemEnv) #move to .StemEnv
environment(.StemEnv$underLine) = .StemEnv #assign its environment
rm(.underLine) #and remove from .GlobalEnv
#================================================================================
#
# just generates a random ID in character string that can be combined with
# any prefix for spatial IDs in the package...
#
randomID = function(lenDigits=4, #number of digits to use
lenAlpha=4, #number of alpha characters
...)
{
if(lenDigits<1 || lenAlpha<1)
stop('random IDs must have at least one digit and one letter!')
dig = sample(0:9, lenDigits)
alpha = sample(letters, lenAlpha)
comb = c(dig, alpha)
nn = length(comb)
idx = sample(1:nn, nn)
ranid = paste(comb[idx], collapse='')
return(ranid)
} #randomID
assign('randomID', randomID, envir=.StemEnv) #move to .StemEnv
environment(.StemEnv$randomID) = .StemEnv #assign its environment
rm(randomID) #and remove from .GlobalEnv
|
#############################################
### esta funcion calcula rasters de media ###
### y desvio estandar para una coleccion ###
### de n rasters ndvi ###
### input: carpeta con rasters ndvi ###
### output: raster media y raster sd ###
#############################################
GO_E5_NDVI_MEAN_SD=function(ndvi_dir="NDVI/Trim/",ndvi_mean_out="NDVI/Mean/NDVI_mean.tif",ndvi_sd_out="NDVI/Mean/NDVI_sd.tif"){
##
library(raster)
library(sp)
library(rgdal)
## genero una lista de los ndvi en ndvi_dir
ndvi_files <- list.files(ndvi_dir,pattern=".tif$",ignore.case=TRUE,full.names=TRUE)
## genero un stack con los ndvi unibanda
ndvi_stack=raster::stack(ndvi_files)
## habilito procesamiento en paralelo
#cores <- 4
#beginCluster(cores, type='SOCK')
## calculo para ndvi_stack la media
ndvi_mean=raster::calc(ndvi_stack,fun=mean,na.rm=TRUE)
## calculo para ndvi_stack la SD
ndvi_sd=raster::calc(ndvi_stack,fun=sd)
## deabilito el cluster
#endCluster()
## genero un stack con mean y sd
ndvi_mean_stack=stack(ndvi_mean,ndvi_sd)
## exporto la media y SD idividualmente para control
writeRaster(ndvi_mean,filename=file.path(getwd(),ndvi_mean_out), bylayer=TRUE,format="GTiff",overwrite=TRUE)
writeRaster(ndvi_sd,filename=file.path(getwd(),ndvi_sd_out), bylayer=TRUE,format="GTiff",overwrite=TRUE)
## la funcion devuelve el raster stack
return(ndvi_mean_stack)
}
############### fin de la funcion #########
|
/GO_E05_NDVI_MEAN_SD.r
|
no_license
|
mlcastellan/GO
|
R
| false
| false
| 1,487
|
r
|
#############################################
### esta funcion calcula rasters de media ###
### y desvio estandar para una coleccion ###
### de n rasters ndvi ###
### input: carpeta con rasters ndvi ###
### output: raster media y raster sd ###
#############################################
GO_E5_NDVI_MEAN_SD=function(ndvi_dir="NDVI/Trim/",ndvi_mean_out="NDVI/Mean/NDVI_mean.tif",ndvi_sd_out="NDVI/Mean/NDVI_sd.tif"){
##
library(raster)
library(sp)
library(rgdal)
## genero una lista de los ndvi en ndvi_dir
ndvi_files <- list.files(ndvi_dir,pattern=".tif$",ignore.case=TRUE,full.names=TRUE)
## genero un stack con los ndvi unibanda
ndvi_stack=raster::stack(ndvi_files)
## habilito procesamiento en paralelo
#cores <- 4
#beginCluster(cores, type='SOCK')
## calculo para ndvi_stack la media
ndvi_mean=raster::calc(ndvi_stack,fun=mean,na.rm=TRUE)
## calculo para ndvi_stack la SD
ndvi_sd=raster::calc(ndvi_stack,fun=sd)
## deabilito el cluster
#endCluster()
## genero un stack con mean y sd
ndvi_mean_stack=stack(ndvi_mean,ndvi_sd)
## exporto la media y SD idividualmente para control
writeRaster(ndvi_mean,filename=file.path(getwd(),ndvi_mean_out), bylayer=TRUE,format="GTiff",overwrite=TRUE)
writeRaster(ndvi_sd,filename=file.path(getwd(),ndvi_sd_out), bylayer=TRUE,format="GTiff",overwrite=TRUE)
## la funcion devuelve el raster stack
return(ndvi_mean_stack)
}
############### fin de la funcion #########
|
#' Adds Controls for Each Pesticide You are Using
#'
#' @param raw.data Raw toxicology in standard Batterham Lab Format
#' @param key Do you want to specify your own insecticide:solvent key? If so put it in here. Default is NULL
#' @param new.key Do you want to specify your own insecticide solvent key? Defaults to FALSE
#' @param solvent.override You can manually set all of the solvents to "water" "dmso" or "ethanol" if you please. Defaults to NA
#' @return returns data frame which has added the 0 dose for all of your insecticides
#' @export
dmc.control.add <- function(raw.data,key=NULL,new.key=F,solvent.overide=NA){
## Determine if a new key is to be accepted
if(new.key==T){
insecticide.key <- key
}else{
key <- list(c(insecticide="imidacloprid",solvent="h2o"),
c(insecticide="spinosad",solvent="h2o"),
c(insecticide="nicotine",solvent="h2o"),
c(insecticide="malathion",solvent="h2o"),
c(insecticide="imi-5-oh",solvent="dmso"),
c(insecticide="imi-olefin",solvent="dmso"),
c(insecticide="clothianidin",solvent="dmso"),
c(insecticide="acetamiprid",solvent="dmso"),
c(insecticide="lufenuron",solvent="dmso"),
c(insecticide="sulfoxaflor",solvent="dmso"),
c(insecticide="permethrin",solvent="dmso"),
c(insecticide="pyripole",solvent="ethanol"),
c(insecticide="nitenpyram",solvent="h2o"),
c(insecticide="chlorantraniliprole",solvent="h2o"),
c(insecticide="ivermectin",solvent="dmso"),
c(insecticide="abamectin",solvent="dmso"),
c(insecticide="mix",solvent="dmso"),
c(insecticide="ethanol",solvent="ethanol"),
c(insecticide="dmso",solvent="dmso"),
c(insecticide="h2o",solvent="h2o"))
}
names(key) <- sapply(key,function(x) x[1])
### Modify data frame
colnames(raw.data) <- tolower(colnames(raw.data))
raw.data$genotype <- tolower(raw.data$genotype)
raw.data$pesticide <- tolower(raw.data$pesticide)
raw.data$pesticide <- gsub(" ","",raw.data$pesticide)
## Add control doses for each pesticide and genotpe
index <- unique(select(raw.data,pesticide,genotype))
clean.data.list <- vector('list',nrow(index))
for(i in 1:nrow(index)){
p <- index[i,1]
#g <- index[i,2]
sub.data <- subset(raw.data,pesticide==p)
control.chemical <- key[[which(names(key)==p)]][2]
if(is.na(solvent.overide)){
control.data <- subset(raw.data,pesticide==control.chemical)
}else if(!is.na(solvent.overide)){
control.data <- subset(raw.data,pesticide==solvent.overide)
}
control.data$pesticide=sub.data$pesticide[1]
clean.data.list[[paste(p,sep="_")]] <- rbind(sub.data,control.data)
if (p==control.chemical) clean.data.list[[paste(p,sep="_")]] <- sub.data
}
return(rbindlist(clean.data.list,use.names=T))
}
|
/R/dmc.control.add.R
|
no_license
|
shanedenecke/insect.toxicology
|
R
| false
| false
| 3,362
|
r
|
#' Adds Controls for Each Pesticide You are Using
#'
#' @param raw.data Raw toxicology in standard Batterham Lab Format
#' @param key Do you want to specify your own insecticide:solvent key? If so put it in here. Default is NULL
#' @param new.key Do you want to specify your own insecticide solvent key? Defaults to FALSE
#' @param solvent.override You can manually set all of the solvents to "water" "dmso" or "ethanol" if you please. Defaults to NA
#' @return returns data frame which has added the 0 dose for all of your insecticides
#' @export
dmc.control.add <- function(raw.data,key=NULL,new.key=F,solvent.overide=NA){
## Determine if a new key is to be accepted
if(new.key==T){
insecticide.key <- key
}else{
key <- list(c(insecticide="imidacloprid",solvent="h2o"),
c(insecticide="spinosad",solvent="h2o"),
c(insecticide="nicotine",solvent="h2o"),
c(insecticide="malathion",solvent="h2o"),
c(insecticide="imi-5-oh",solvent="dmso"),
c(insecticide="imi-olefin",solvent="dmso"),
c(insecticide="clothianidin",solvent="dmso"),
c(insecticide="acetamiprid",solvent="dmso"),
c(insecticide="lufenuron",solvent="dmso"),
c(insecticide="sulfoxaflor",solvent="dmso"),
c(insecticide="permethrin",solvent="dmso"),
c(insecticide="pyripole",solvent="ethanol"),
c(insecticide="nitenpyram",solvent="h2o"),
c(insecticide="chlorantraniliprole",solvent="h2o"),
c(insecticide="ivermectin",solvent="dmso"),
c(insecticide="abamectin",solvent="dmso"),
c(insecticide="mix",solvent="dmso"),
c(insecticide="ethanol",solvent="ethanol"),
c(insecticide="dmso",solvent="dmso"),
c(insecticide="h2o",solvent="h2o"))
}
names(key) <- sapply(key,function(x) x[1])
### Modify data frame
colnames(raw.data) <- tolower(colnames(raw.data))
raw.data$genotype <- tolower(raw.data$genotype)
raw.data$pesticide <- tolower(raw.data$pesticide)
raw.data$pesticide <- gsub(" ","",raw.data$pesticide)
## Add control doses for each pesticide and genotpe
index <- unique(select(raw.data,pesticide,genotype))
clean.data.list <- vector('list',nrow(index))
for(i in 1:nrow(index)){
p <- index[i,1]
#g <- index[i,2]
sub.data <- subset(raw.data,pesticide==p)
control.chemical <- key[[which(names(key)==p)]][2]
if(is.na(solvent.overide)){
control.data <- subset(raw.data,pesticide==control.chemical)
}else if(!is.na(solvent.overide)){
control.data <- subset(raw.data,pesticide==solvent.overide)
}
control.data$pesticide=sub.data$pesticide[1]
clean.data.list[[paste(p,sep="_")]] <- rbind(sub.data,control.data)
if (p==control.chemical) clean.data.list[[paste(p,sep="_")]] <- sub.data
}
return(rbindlist(clean.data.list,use.names=T))
}
|
\name{rland.graph}
\alias{rland.graph}
\title{
Creates random landscape graph
}
\description{
One of the key functions of the package, which allows the creation of random landscapes (represented as graphs) with two categories: habitat patch and non-habitat matrix. The landscapes can be different depending on the parameters chosen.
}
\usage{
rland.graph(mapsize, dist_m, areaM, areaSD, Npatch, disp, plotG)
}
\arguments{
\item{mapsize}{Landscape mosaic side length, in meters.}
\item{dist_m}{Minimum distance between patches (centroid).}
\item{areaM}{Mean area (in hectares).}
\item{areaSD}{SD of the area of patches, in order to give variability to the patches area.}
\item{Npatch}{Number of patches (might be impaired by the dist_m, see the "Note" section).}
\item{disp}{Species mean dispersal ability, in meters.}
\item{plotG}{TRUE/FALSE, to show graphic output.}
}
\details{
The dispersal distance, as given by the parameter 'disp', is used for the computation of some of the connectivity metrics (function \code{\link{metrics.graph}}) and for the graphic representation of the landscapes (in both cases defining the groups of patches, or components). For the simulation of the metapopulational dynamics, the dispersal distance is given through the 'alpha' parameter (the inverse of the mean dispersal ability) in the parameter data frame created by \code{\link{create.parameter.df}}. This has an important consequence: no thresholding (considering the dispersal ability) is assumed when simulating the metapopulational dynamics.
}
\value{
Returns a list, with the following elements:
\itemize{
\item{mapsize}{Side of the landscape in meters.}
\item{minimum.distance}{Minimum distance between patches centroids, in meters.}
\item{mean.area}{Mean patch area in hectares.}
\item{SD.area}{Standard deviation of patches area.}
\item{number.patches}{Total number of patches.}
\item{dispersal}{Species mean dispersal ability, in meters.}
\item{nodes.characteristics}{Data frame with patch (node) information (coordinates, area, radius, cluster, distance to nearest neighbour and ID).
}
An additional field, colour, has only graphical purposes.}
}
\author{
Frederico Mestre and Fernando Canovas
}
\note{
If the mean distance between patches centroid and the number of
patches are both too high then the number of patches is lower
than the defined by the user.
}
\seealso{
\code{\link{span.graph}}, \code{\link{species.graph}}
}
\examples{
#Example to create a random landscape graph with 60 patches with a mean area
#of 0.05 hectares.
#The landscape mosaic is a square with 1000 meters side.
#The species mean dispersal ability is 120 meters (in order to connect the patches).
#A plot with the landscape graph is displayed graphically.
rl1 <- rland.graph(mapsize=1000, dist_m=80, areaM=0.05, areaSD=0.02, Npatch=60,
disp=120, plotG=TRUE)
}
|
/man/rland.graph.Rd
|
no_license
|
cran/MetaLandSim
|
R
| false
| false
| 2,881
|
rd
|
\name{rland.graph}
\alias{rland.graph}
\title{
Creates random landscape graph
}
\description{
One of the key functions of the package, which allows the creation of random landscapes (represented as graphs) with two categories: habitat patch and non-habitat matrix. The landscapes can be different depending on the parameters chosen.
}
\usage{
rland.graph(mapsize, dist_m, areaM, areaSD, Npatch, disp, plotG)
}
\arguments{
\item{mapsize}{Landscape mosaic side length, in meters.}
\item{dist_m}{Minimum distance between patches (centroid).}
\item{areaM}{Mean area (in hectares).}
\item{areaSD}{SD of the area of patches, in order to give variability to the patches area.}
\item{Npatch}{Number of patches (might be impaired by the dist_m, see the "Note" section).}
\item{disp}{Species mean dispersal ability, in meters.}
\item{plotG}{TRUE/FALSE, to show graphic output.}
}
\details{
The dispersal distance, as given by the parameter 'disp', is used for the computation of some of the connectivity metrics (function \code{\link{metrics.graph}}) and for the graphic representation of the landscapes (in both cases defining the groups of patches, or components). For the simulation of the metapopulational dynamics, the dispersal distance is given through the 'alpha' parameter (the inverse of the mean dispersal ability) in the parameter data frame created by \code{\link{create.parameter.df}}. This has an important consequence: no thresholding (considering the dispersal ability) is assumed when simulating the metapopulational dynamics.
}
\value{
Returns a list, with the following elements:
\itemize{
\item{mapsize}{Side of the landscape in meters.}
\item{minimum.distance}{Minimum distance between patches centroids, in meters.}
\item{mean.area}{Mean patch area in hectares.}
\item{SD.area}{Standard deviation of patches area.}
\item{number.patches}{Total number of patches.}
\item{dispersal}{Species mean dispersal ability, in meters.}
\item{nodes.characteristics}{Data frame with patch (node) information (coordinates, area, radius, cluster, distance to nearest neighbour and ID).
}
An additional field, colour, has only graphical purposes.}
}
\author{
Frederico Mestre and Fernando Canovas
}
\note{
If the mean distance between patches centroid and the number of
patches are both too high then the number of patches is lower
than the defined by the user.
}
\seealso{
\code{\link{span.graph}}, \code{\link{species.graph}}
}
\examples{
#Example to create a random landscape graph with 60 patches with a mean area
#of 0.05 hectares.
#The landscape mosaic is a square with 1000 meters side.
#The species mean dispersal ability is 120 meters (in order to connect the patches).
#A plot with the landscape graph is displayed graphically.
rl1 <- rland.graph(mapsize=1000, dist_m=80, areaM=0.05, areaSD=0.02, Npatch=60,
disp=120, plotG=TRUE)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gapfill.R
\name{Array2Matrix}
\alias{Array2Matrix}
\title{Convert an Array with 4 Dimensions into a Matrix}
\usage{
Array2Matrix(a)
}
\arguments{
\item{a}{Array with 4 dimensions.}
}
\value{
A matrix. If \code{a} has the attribute \code{mp}, the transformed attribute is returned as well.
See \code{\link{ArrayAround}} for more information about \code{mp}.
}
\description{
Converts the array, \code{a}, with 4 dimensions, \code{c(d1, d2, d3, d4)},
into a matrix with \code{d1*d2} rows and \code{d3*d4} columns.
}
\examples{
a <- array(data = 1:16, dim = c(2, 2, 2, 2))
Array2Matrix(a = a)
attr(a, "mp") <- c(1, 2, 2, 1)
Array2Matrix(a = a)
Array2Matrix(ArrayAround(data = a, mp = c(1, 1, 1, 1),
size = c(1, 1, 2, 2)))
}
\seealso{
\link{Index}, \code{\link{ArrayAround}}.
}
\author{
Florian Gerber, \email{florian.gerber@math.uzh.ch}.
}
|
/gapfill/man/Array2Matrix.Rd
|
no_license
|
akhikolla/TestedPackages-NoIssues
|
R
| false
| true
| 941
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gapfill.R
\name{Array2Matrix}
\alias{Array2Matrix}
\title{Convert an Array with 4 Dimensions into a Matrix}
\usage{
Array2Matrix(a)
}
\arguments{
\item{a}{Array with 4 dimensions.}
}
\value{
A matrix. If \code{a} has the attribute \code{mp}, the transformed attribute is returned as well.
See \code{\link{ArrayAround}} for more information about \code{mp}.
}
\description{
Converts the array, \code{a}, with 4 dimensions, \code{c(d1, d2, d3, d4)},
into a matrix with \code{d1*d2} rows and \code{d3*d4} columns.
}
\examples{
a <- array(data = 1:16, dim = c(2, 2, 2, 2))
Array2Matrix(a = a)
attr(a, "mp") <- c(1, 2, 2, 1)
Array2Matrix(a = a)
Array2Matrix(ArrayAround(data = a, mp = c(1, 1, 1, 1),
size = c(1, 1, 2, 2)))
}
\seealso{
\link{Index}, \code{\link{ArrayAround}}.
}
\author{
Florian Gerber, \email{florian.gerber@math.uzh.ch}.
}
|
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(formattable)
#Define Simple interest
calcSimpleInt <- function(p,r,t=1){
r <- r/100
round(p*(1+r/t),2)
}
#Define Compound interest
calcCmpdInt <- function(p,r,t=1,n="Yearly"){
compIntBreaks <- c(12,4,2,1)
names(compIntBreaks) <- c("Monthly","Quarterly","Half-Yearly","Yearly")
r <- r/100
nt<-compIntBreaks[n]
round(p*((1+r/nt)^(nt*t)),2)
}
# Define server logic required to draw a histogram
shinyServer(function(input, output, session) {
output$distPlot <- renderPlot({
totAmt <- 0;
if(input$intType=="Simple")
totAmt<-calcSimpleInt(input$principal, input$int, input$tenure)
else
totAmt<-calcCmpdInt(input$principal, input$int, input$tenure, input$compounding)
# draw the pie chart
pie(x=c(input$principal,totAmt-input$principal), labels = c("Principal","Interest"), main = "Interest Calculations - Overall amount and interest", col = c("red","blue"))
})
output$intTable <- renderTable({
totAmt <- 0
if(input$intType=="Simple")
totAmt<-calcSimpleInt(input$principal, input$int, input$tenure)
else
totAmt<-calcCmpdInt(input$principal, input$int, input$tenure, input$compounding)
prVal = as.character(currency(input$principal, symbol = input$currFormat))
intVal = as.character(currency(totAmt - input$principal, symbol = input$currFormat))
totalVal = as.character(currency(totAmt, symbol = input$currFormat))
opDF = data.frame(Principal = prVal,
Interest = intVal,
Total = totalVal)
opDF
}
)
# for dynamically adding compound interest
observeEvent(input$intType, {
if(input$intType=="Compound"){
insertUI(
selector = "#intType",
where = "afterEnd",
ui = selectInput("compounding", "Compounding Frequency", c("Monthly","Quarterly","Half-Yearly","Yearly"),
"Yearly", FALSE, FALSE, width='100%',
1)
)
}else{
updateSelectInput(session, "compounding", "")
#This update Select is put for workaround for removing the label
removeUI(
selector = "#compounding"
)
}
}
)
})
|
/server.R
|
no_license
|
sanjaynvs/datasciencecoursera
|
R
| false
| false
| 2,508
|
r
|
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(formattable)
#Define Simple interest
calcSimpleInt <- function(p,r,t=1){
r <- r/100
round(p*(1+r/t),2)
}
#Define Compound interest
calcCmpdInt <- function(p,r,t=1,n="Yearly"){
compIntBreaks <- c(12,4,2,1)
names(compIntBreaks) <- c("Monthly","Quarterly","Half-Yearly","Yearly")
r <- r/100
nt<-compIntBreaks[n]
round(p*((1+r/nt)^(nt*t)),2)
}
# Define server logic required to draw a histogram
shinyServer(function(input, output, session) {
output$distPlot <- renderPlot({
totAmt <- 0;
if(input$intType=="Simple")
totAmt<-calcSimpleInt(input$principal, input$int, input$tenure)
else
totAmt<-calcCmpdInt(input$principal, input$int, input$tenure, input$compounding)
# draw the pie chart
pie(x=c(input$principal,totAmt-input$principal), labels = c("Principal","Interest"), main = "Interest Calculations - Overall amount and interest", col = c("red","blue"))
})
output$intTable <- renderTable({
totAmt <- 0
if(input$intType=="Simple")
totAmt<-calcSimpleInt(input$principal, input$int, input$tenure)
else
totAmt<-calcCmpdInt(input$principal, input$int, input$tenure, input$compounding)
prVal = as.character(currency(input$principal, symbol = input$currFormat))
intVal = as.character(currency(totAmt - input$principal, symbol = input$currFormat))
totalVal = as.character(currency(totAmt, symbol = input$currFormat))
opDF = data.frame(Principal = prVal,
Interest = intVal,
Total = totalVal)
opDF
}
)
# for dynamically adding compound interest
observeEvent(input$intType, {
if(input$intType=="Compound"){
insertUI(
selector = "#intType",
where = "afterEnd",
ui = selectInput("compounding", "Compounding Frequency", c("Monthly","Quarterly","Half-Yearly","Yearly"),
"Yearly", FALSE, FALSE, width='100%',
1)
)
}else{
updateSelectInput(session, "compounding", "")
#This update Select is put for workaround for removing the label
removeUI(
selector = "#compounding"
)
}
}
)
})
|
source("lib/load_exec_align.R")
source("lib/load_data_processing.R")
source("lib/load_verif_lib.R")
source("lib/load_phylo.R")
source("parallel_config.R")
method <- commandArgs(trailingOnly = TRUE)[1]
input_dir <- commandArgs(trailingOnly = TRUE)[2]
output_dir <- commandArgs(trailingOnly = TRUE)[3]
cv_sep <- commandArgs(trailingOnly = TRUE)[4]
ext_name <- commandArgs(trailingOnly = TRUE)[5]
if (is.na(ext_name)) {
ext_name <- NULL
} else {
ext_name <- paste("_", ext_name, sep = "")
}
output_dir <- paste(output_dir, "/", "psa_", method, ext_name, "/", sep = "")
if (!dir.exists(output_dir))
dir.create(output_dir)
acc_file <- paste(output_dir, "/", "acc_psa_", method, ext_name, ".txt", sep = "")
word_list <- make_word_list(paste(input_dir, "input.csv", sep = "/"))
word_list_gold <- make_word_list(paste(input_dir, "gold.csv", sep = "/"))
psa_rlt <- execute_psa(method, word_list, output_dir, cv_sep)
psa_list_gold <- lapply(word_list_gold, make_gold_psa)
if ((method == "ld") || (method == "ld2")) {
s <- psa_rlt$s
psa_list <- psa_rlt$psa_list
} else {
pmi_list <- psa_rlt$pmi_list
s <- psa_rlt$s
psa_list <- psa_rlt$psa_list
# Save the matrix of the PMIs and the scoring matrix.
save(pmi_list, file = paste(output_dir, "/", "list_psa_", method, ".RData", sep = ""))
save(s, file = paste(output_dir, "/", "score_psa_", method, ".RData", sep = ""))
}
output_dir_aln <- paste(output_dir, "/alignment/", sep = "")
# Calculate the PSAs accuracy.
verify_psa(psa_list, psa_list_gold, acc_file, output_dir_aln)
# Output the PSAs.
output_psa(psa_list, output_dir = output_dir_aln, ext = ".csv")
output_psa(psa_list_gold, output_dir = output_dir_aln, ext = "_lg.csv")
save(psa_list, file = paste(output_dir_aln, "psa_", method, ".RData", sep = ""))
save(psa_list_gold, file = paste(output_dir_aln, "psa_", method, "_lg.RData", sep = ""))
# Plot the phylogenetic trees and the networks.
phylo_each_word(psa_list, output_dir, method, s)
# Calculate the regional distance matrix.
phylo_all_word(word_list, method, s, output_dir)
|
/R/verification/verification_psa.R
|
no_license
|
e155721/src
|
R
| false
| false
| 2,097
|
r
|
source("lib/load_exec_align.R")
source("lib/load_data_processing.R")
source("lib/load_verif_lib.R")
source("lib/load_phylo.R")
source("parallel_config.R")
method <- commandArgs(trailingOnly = TRUE)[1]
input_dir <- commandArgs(trailingOnly = TRUE)[2]
output_dir <- commandArgs(trailingOnly = TRUE)[3]
cv_sep <- commandArgs(trailingOnly = TRUE)[4]
ext_name <- commandArgs(trailingOnly = TRUE)[5]
if (is.na(ext_name)) {
ext_name <- NULL
} else {
ext_name <- paste("_", ext_name, sep = "")
}
output_dir <- paste(output_dir, "/", "psa_", method, ext_name, "/", sep = "")
if (!dir.exists(output_dir))
dir.create(output_dir)
acc_file <- paste(output_dir, "/", "acc_psa_", method, ext_name, ".txt", sep = "")
word_list <- make_word_list(paste(input_dir, "input.csv", sep = "/"))
word_list_gold <- make_word_list(paste(input_dir, "gold.csv", sep = "/"))
psa_rlt <- execute_psa(method, word_list, output_dir, cv_sep)
psa_list_gold <- lapply(word_list_gold, make_gold_psa)
if ((method == "ld") || (method == "ld2")) {
s <- psa_rlt$s
psa_list <- psa_rlt$psa_list
} else {
pmi_list <- psa_rlt$pmi_list
s <- psa_rlt$s
psa_list <- psa_rlt$psa_list
# Save the matrix of the PMIs and the scoring matrix.
save(pmi_list, file = paste(output_dir, "/", "list_psa_", method, ".RData", sep = ""))
save(s, file = paste(output_dir, "/", "score_psa_", method, ".RData", sep = ""))
}
output_dir_aln <- paste(output_dir, "/alignment/", sep = "")
# Calculate the PSAs accuracy.
verify_psa(psa_list, psa_list_gold, acc_file, output_dir_aln)
# Output the PSAs.
output_psa(psa_list, output_dir = output_dir_aln, ext = ".csv")
output_psa(psa_list_gold, output_dir = output_dir_aln, ext = "_lg.csv")
save(psa_list, file = paste(output_dir_aln, "psa_", method, ".RData", sep = ""))
save(psa_list_gold, file = paste(output_dir_aln, "psa_", method, "_lg.RData", sep = ""))
# Plot the phylogenetic trees and the networks.
phylo_each_word(psa_list, output_dir, method, s)
# Calculate the regional distance matrix.
phylo_all_word(word_list, method, s, output_dir)
|
#' Any Transaction From One Committee To Another
#'
#' \code{read_all_transactions} returns a dataframe about transaction data
#'
#' @param n_max Integer specifying the max amount of entries in the dataset. Defaults to the possible maximum.
#' @param verbose A progress bar is shown if R is running interactively. Defaults to `interactive()`.
#' @return The entire dataframe. More information about variables is at `?transactions`.
#' @examples
#' \donttest{read_all_transactions()}
#' \donttest{read_all_transactions(n_max = 250)}
#' @import dplyr
#' @import readr
#' @export
read_all_transactions <- function(n_max = Inf, verbose = interactive()) {
if (!verbose) {
invisible(utils::capture.output(
dir <- usethis::use_zip(
"https://www.fec.gov/files/bulk-downloads/2016/oth16.zip",
destdir = tempdir(), cleanup = TRUE
)
))
} else {
dir <- usethis::use_zip(
"https://www.fec.gov/files/bulk-downloads/2016/oth16.zip",
destdir = tempdir(), cleanup = TRUE
)
}
transactions_path <- fs::path(dir, "itoth.txt")
transactions_names <- read_csv("https://www.fec.gov/files/bulk-downloads/data_dictionaries/oth_header_file.csv") %>%
names() %>%
tolower()
transactions_all <- vroom::vroom(
transactions_path,
n_max = n_max,
col_names = transactions_names,
col_types = cols(
zip_code = col_character(),
other_id = col_character(),
memo_cd = col_character(),
memo_text = col_character(),
sub_id = col_character()
),
delim = "|"
)
transactions <- transactions_all %>%
select(-c(image_num, sub_id, memo_text, memo_cd, file_num)) %>%
mutate(
transaction_dt = lubridate::mdy(transaction_dt)
)
return(transactions)
}
|
/R/fn_transactions.R
|
no_license
|
baumer-lab/fec16
|
R
| false
| false
| 1,758
|
r
|
#' Any Transaction From One Committee To Another
#'
#' \code{read_all_transactions} returns a dataframe about transaction data
#'
#' @param n_max Integer specifying the max amount of entries in the dataset. Defaults to the possible maximum.
#' @param verbose A progress bar is shown if R is running interactively. Defaults to `interactive()`.
#' @return The entire dataframe. More information about variables is at `?transactions`.
#' @examples
#' \donttest{read_all_transactions()}
#' \donttest{read_all_transactions(n_max = 250)}
#' @import dplyr
#' @import readr
#' @export
read_all_transactions <- function(n_max = Inf, verbose = interactive()) {
if (!verbose) {
invisible(utils::capture.output(
dir <- usethis::use_zip(
"https://www.fec.gov/files/bulk-downloads/2016/oth16.zip",
destdir = tempdir(), cleanup = TRUE
)
))
} else {
dir <- usethis::use_zip(
"https://www.fec.gov/files/bulk-downloads/2016/oth16.zip",
destdir = tempdir(), cleanup = TRUE
)
}
transactions_path <- fs::path(dir, "itoth.txt")
transactions_names <- read_csv("https://www.fec.gov/files/bulk-downloads/data_dictionaries/oth_header_file.csv") %>%
names() %>%
tolower()
transactions_all <- vroom::vroom(
transactions_path,
n_max = n_max,
col_names = transactions_names,
col_types = cols(
zip_code = col_character(),
other_id = col_character(),
memo_cd = col_character(),
memo_text = col_character(),
sub_id = col_character()
),
delim = "|"
)
transactions <- transactions_all %>%
select(-c(image_num, sub_id, memo_text, memo_cd, file_num)) %>%
mutate(
transaction_dt = lubridate::mdy(transaction_dt)
)
return(transactions)
}
|
\name{I2edge}
\alias{I2edge}
\title{Create edge data frame from gene - geneset indicator matrix}
\description{Used for input to \code{bp} function, \code{edge} argument}
\usage{
I2edge(I)
}
\arguments{
\item{I}{indicator matrix, rows are genes and columns are genesets}
}
\value{
data frame of edges:
\item{column 1}{gene sets}
\item{column 2}{genes}
}
\author{
Aimee Teo Broman
}
\seealso{
\code{gsEdge}
}
\examples{
data(t2d)
## eout <- I2edge(t2d$I)
}
|
/man/I2edge.Rd
|
no_license
|
tienv/Rolemodel
|
R
| false
| false
| 462
|
rd
|
\name{I2edge}
\alias{I2edge}
\title{Create edge data frame from gene - geneset indicator matrix}
\description{Used for input to \code{bp} function, \code{edge} argument}
\usage{
I2edge(I)
}
\arguments{
\item{I}{indicator matrix, rows are genes and columns are genesets}
}
\value{
data frame of edges:
\item{column 1}{gene sets}
\item{column 2}{genes}
}
\author{
Aimee Teo Broman
}
\seealso{
\code{gsEdge}
}
\examples{
data(t2d)
## eout <- I2edge(t2d$I)
}
|
#
# glob-def-usage.R, 7 Feb 20
# Data from:
# Understanding Source Code Evolution Using Abstract Syntax Tree Matching
# Iulian Neamtiu and Jeffrey S. Foster and Michael Hicks
#
# Example from:
# Evidence-based Software Engineering: based on the publicly available data
# Derek M. Jones
#
# TAG variables_C types_C functions_C LOC_global-variable variable_globals
source("ESEUR_config.r")
library("plyr")
gd=read.csv(paste0(ESEUR_dir, "sourcecode/glob-def-usage.csv.xz"), as.is=TRUE)
gd$Release.Date=as.Date(gd$Release.Date, format="%d/%m/%y")
# plot(gd$Release.Date, gd$Global.Variables)
plot(gd$LOC/1e3, gd$Global.Variables, type="n", log="x",
xlab="KLOC", ylab="Global variables\n")
u_prog=unique(gd$Program)
pal_col=rainbow(length(u_prog))
gd$col_str=mapvalues(gd$Program, u_prog, pal_col)
d_ply(gd, .(Program), function(df) points(df$LOC/1e3, df$Global.Variables, col=df$col_str[1]))
legend(x="topleft", legend=u_prog, bty="n", fill=pal_col, cex=1.2)
|
/sourcecode/glob-def-usage.R
|
no_license
|
shanechin/ESEUR-code-data
|
R
| false
| false
| 970
|
r
|
#
# glob-def-usage.R, 7 Feb 20
# Data from:
# Understanding Source Code Evolution Using Abstract Syntax Tree Matching
# Iulian Neamtiu and Jeffrey S. Foster and Michael Hicks
#
# Example from:
# Evidence-based Software Engineering: based on the publicly available data
# Derek M. Jones
#
# TAG variables_C types_C functions_C LOC_global-variable variable_globals
source("ESEUR_config.r")
library("plyr")
gd=read.csv(paste0(ESEUR_dir, "sourcecode/glob-def-usage.csv.xz"), as.is=TRUE)
gd$Release.Date=as.Date(gd$Release.Date, format="%d/%m/%y")
# plot(gd$Release.Date, gd$Global.Variables)
plot(gd$LOC/1e3, gd$Global.Variables, type="n", log="x",
xlab="KLOC", ylab="Global variables\n")
u_prog=unique(gd$Program)
pal_col=rainbow(length(u_prog))
gd$col_str=mapvalues(gd$Program, u_prog, pal_col)
d_ply(gd, .(Program), function(df) points(df$LOC/1e3, df$Global.Variables, col=df$col_str[1]))
legend(x="topleft", legend=u_prog, bty="n", fill=pal_col, cex=1.2)
|
\name{loadNetwork}
\Rdversion{1.1}
\alias{loadNetwork}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Load a Boolean network from a file
}
\description{
Loads a Boolean network or probabilistic Boolean network from a file and converts it to an internal transition table representation.
}
\usage{
loadNetwork(file,
bodySeparator = ",",
lowercaseGenes = FALSE,
symbolic = FALSE)
}
\arguments{
\item{file}{
The name of the file to be read
}
\item{bodySeparator}{
An optional separation character to divide the target factors and the formulas. Default is ",".
}
\item{lowercaseGenes}{
If set to \code{TRUE}, all gene names are converted to lower case, i.e. the gene names are case-insensitive. This corresponds to the behaviour of \pkg{BoolNet} versions prior to 1.5. Defaults to \code{FALSE}.
}
\item{symbolic}{
If set to \code{TRUE}, a symbolic representation of class \code{SymbolicBooleanNetwork} is returned. This is not available for asynchronous or probabilistic Boolean networks, but is required for the simulation of networks with extended temporal predicates and time delays (see \code{\link{simulateSymbolicModel}}). If such predicates are detected, the switch is activated by default.
}
}
\details{
Depending on whether the network is loaded in truth table representation or not, the supported network file formats differ slightly.
For the truth table representation (\code{symbolic=FALSE}), the language basically consists of expressions based on the Boolean operators AND (&), or (|), and NOT (!). In addition, some convenience operators are included (see EBNF and operator description below).
The first line contains a header. In case of a Boolean network with only one function per gene, the header is "targets, functions"; in a probabilistic network, there is an optional third column "probabilities". All subsequent lines contain Boolean rules or comment lines that are omitted by the parser.
A rule consists of a target gene, a separator, a Boolean expression to calculate a transition step for the target gene, and an optional probability for the rule (for probabilistic Boolean networks only -- see below).
The EBNF description of the network file format is as follows:
\preformatted{
Network = Header Newline {Rule Newline | Comment Newline};
Header = "targets" Separator "factors";
Rule = GeneName Separator BooleanExpression [Separator Probability];
Comment = "#" String;
BooleanExpression = GeneName
| "!" BooleanExpression
| "(" BooleanExpression ")"
| BooleanExpression " & " BooleanExpression
| BooleanExpression " | " BooleanExpression;
| "all(" BooleanExpression {"," BooleanExpression} ")"
| "any(" BooleanExpression {"," BooleanExpression} ")"
| "maj(" BooleanExpression {"," BooleanExpression} ")"
| "sumgt(" BooleanExpression {"," BooleanExpression} "," Integer ")"
| "sumlt(" BooleanExpression {"," BooleanExpression} "," Integer ")";
GeneName = ? A gene name from the list of involved genes ?;
Separator = ",";
Integer = ? An integer value?;
Probability = ? A floating-point number ?;
String = ? Any sequence of characters (except a line break) ?;
Newline = ? A line break character ?;
}
The extended format for Boolean networks with temporal elements that can be loaded if \code{symbolic=TRUE} additionally allows for a specification of time steps. Furthermore, the operators can be extended with iterators that evaluate their arguments over multiple time steps.
\preformatted{
Network = Header Newline
{Function Newline | Comment Newline};
Header = "targets" Separator "factors";
Function = GeneName Separator BooleanExpression;
Comment = "#" String;
BooleanExpression = GeneName | GeneName TemporalSpecification | BooleanOperator | TemporalOperator
BooleanOperator = BooleanExpression
| "!" BooleanExpression
| "(" BooleanExpression ")"
| BooleanExpression " & " BooleanExpression
| BooleanExpression " | " BooleanExpression;
TemporalOperator = "all" [TemporalIteratorDef]
"(" BooleanExpression {"," BooleanExpression} ")"
| "any" [TemporalIteratorDef]
"(" BooleanExpression {"," BooleanExpression} ")"
| "maj" [TemporalIteratorDef]
"(" BooleanExpression {"," BooleanExpression} ")"
| "sumgt" [TemporalIteratorDef]
"(" BooleanExpression {"," BooleanExpression} "," Integer ")"
| "sumlt" [TemporalIteratorDef]
"(" BooleanExpression {"," BooleanExpression} "," Integer ")"
| "timeis" "(" Integer ")"
| "timegt" "(" Integer ")"
| "timelt" "(" Integer ")";
TemporalIteratorDef = "[" TemporalIterator "=" Integer ".." Integer "]";
TemporalSpecification = "[" TemporalOperand {"+" TemporalOperand | "-" TemporalOperand} "]";
TemporalOperand = TemporalIterator | Integer
TemporalIterator = ? An alphanumeric string ?;
GeneName = ? A gene name from the list of involved genes ?;
Separator = ",";
Integer = ? An integer value?;
String = ? Any sequence of characters (except a line break) ?;
Newline = ? A line break character ?;
}
The meaning of the operators is as follows:
\describe{
\item{\code{all}}{Equivalent to a conjunction of all arguments. For symbolic networks, the operator can have a time range, in which case the arguments are evaluated for each time point specified in the iterator.}
\item{\code{any}}{Equivalent to a disjunction of all arguments. For symbolic networks, the operator can have a time range, in which case the arguments are evaluated for each time point specified in the iterator.}
\item{\code{maj}}{Evaluates to true if the majority of the arguments evaluate to true. For symbolic networks, the operator can have a time range, in which case the arguments are evaluated for each time point specified in the iterator.}
\item{\code{sumgt}}{Evaluates to true if the number of arguments (except the last) that evaluate to true is greater than the number specified in the last argument. For symbolic networks, the operator can have a time range, in which case the arguments are evaluated for each time point specified in the iterator.}
\item{\code{sumlt}}{Evaluates to true if the number of arguments (except the last) that evaluate to true is less than the number specified in the last argument. For symbolic networks, the operator can have a time range, in which case the arguments are evaluated for each time point specified in the iterator.}
\item{\code{timeis}}{Evaluates to true if the current absolute time step (i.e. number of state transitions performed from the current start state) is the same as the argument.}
\item{\code{timelt}}{Evaluates to true if the current absolute time step (i.e. number of state transitions performed from the current start state) is the less than the argument.}
\item{\code{timegt}}{Evaluates to true if the current absolute time step (i.e. number of state transitions performed from the current start state) is greater than the argument.}
}
If \code{symbolic=FALSE} and there is exactly one rule for each gene, a Boolean network of class \code{BooleanNetwork} is created. In these networks, constant genes are automatically fixed (e.g. knocked-out or over-expressed). This means that they are always set to the constant value, and states with the complementary value are not considered in transition tables etc. If you would like to change this behaviour, use \code{\link{fixGenes}} to reset the fixing.
If \code{symbolic=FALSE} and two or more rules exist for the same gene, the function returns a probabilistic network of class \code{ProbabilisticBooleanNetwork}. In this case, alternative rules may be annotated with probabilities, which must sum up to 1 for all rules that belong to the same gene. If no probabilities are supplied, uniform distribution is assumed.
If \code{symbolic=TRUE}, a symbolic representation of a (possibly temporal) Boolean network of class \code{SymbolicBooleanNetwork} is created.
}
\value{
If \code{symbolic=FALSE} and only one function per gene is specified, a structure of class \code{BooleanNetwork} representing the network is returned. It has the following components:
\item{genes}{A vector of gene names involved in the network. This list determines the indices of genes in inputs of functions or in state bit vectors.}
\item{interactions}{A list with \code{length(genes)} elements, where the i-th element describes the transition function for the i-th gene. Each element has the following sub-components:
\describe{
\item{input}{A vector of indices of the genes that serve as the input of the Boolean transition function. If the function has no input (i.e. the gene is constant), the vector consists of a zero element.}
\item{func}{The transition function in truth table representation. This vector has \if{latex}{\cr}\code{2^length(input)} entries, one for each combination of input variables. If the gene is constant, the function is 1 or 0.}
\item{expression}{A string representation of the Boolean expression from which the truth table was generated}
}}
\item{fixed}{A vector specifying which genes are knocked-out or over-expressed. For each gene, there is one element which is set to 0 if the gene is knocked-out, to 1 if the gene is over-expressed, and to -1 if the gene is not fixed at all, i. e. can change its value according to the supplied transition function. Constant genes are automatically set to fixed values.}
If \code{symbolic=FALSE} and there is at least one gene with two or more alternative transition functions, a structure of class \code{ProbabilisticBooleanNetwork} is returned. This structure is similar to \code{BooleanNetwork}, but allows for storing more than one function in an interaction. It consists of the following components:
\item{genes}{A vector of gene names involved in the network. This list determines the indices of genes in inputs of functions or in state bit vectors.}
\item{interactions}{A list with \code{length(genes)} elements, where the i-th element describes the alternative transition functions for the i-th gene. Each element is a list of transition functions. In this second-level list, each element has the the following sub-components:
\describe{
\item{input}{A vector of indices of the genes that serve as the input of the Boolean transition function. If the function has no input (i.e. the gene is constant), the vector consists of a zero element.}
\item{func}{The transition function in truth table representation. This vector has \if{latex}{\cr}\code{2^length(input)} entries, one for each combination of input variables. If the gene is constant, the function is -1.}
\item{expression}{A string representation of the underlying Boolean expression}
\item{probability}{The probability that the corresponding transition function is chosen}
}}
\item{fixed}{A vector specifying which genes are knocked-out or over-expressed. For each gene, there is one element which is set to 0 if the gene is knocked-out, to 1 if the gene is over-expressed, and to -1 if the gene is not fixed at all, i. e. can change its value according to the supplied transition function. You can knock-out and over-express genes using \code{\link{fixGenes}}.}
If \code{symbolic=TRUE}, a structure of class \code{SymbolicBooleanNetwork} that represents the network as expression trees is returned. It has the following components:
\item{genes}{A vector of gene names involved in the network. This list determines the indices of genes in inputs of functions or in state bit vectors.}
\item{interactions}{A list with \code{length(genes)} elements, where the i-th element describes the transition function for the i-th gene in a symbolic representation. Each such element is a list that represents a recursive expression tree, possibly consisting of sub-elements (operands) that are expression trees themselves. Each element in an expression tree can be a Boolean/temporal operator, a literal ("atom") or a numeric constant.}
\item{internalStructs}{A pointer referencing an internal representation of the expression trees as raw C objects. This is used for simulations and must be set to NULL if \code{interactions} are changed to force a refreshment.
}
\item{timeDelays}{An integer vector storing the temporal memory sizes required for each of the genes in the network. That is, the vector stores the minimum number of predecessor states of each gene that need to be saved to determine the successor state of the network.}
\item{fixed}{A vector specifying which genes are knocked-out or over-expressed. For each gene, there is one element which is set to 0 if the gene is knocked-out, to 1 if the gene is over-expressed, and to -1 if the gene is not fixed at all, i. e. can change its value according to the supplied transition function. Constant genes are automatically set to fixed values.}
}
\seealso{
\code{\link{getAttractors}}, \code{\link{simulateSymbolicModel}}, \code{\link{markovSimulation}}, \code{\link{stateTransition}}, \code{\link{fixGenes}}, \code{\link{loadSBML}}, \code{\link{loadBioTapestry}}
}
\examples{
# write example network to file
sink("testNet.bn")
cat("targets, factors\n")
cat("Gene1, !Gene2 | !Gene3\n")
cat("Gene2, Gene3 & Gene4\n")
cat("Gene3, Gene2 & !Gene1\n")
cat("Gene4, 1\n")
sink()
# read file
net <- loadNetwork("testNet.bn")
print(net)
}
\keyword{Boolean network
probabilistic Boolean network
PBN
file
logic
parse}
|
/man/loadNetwork.Rd
|
no_license
|
JacobVisscher/BoolNet
|
R
| false
| false
| 14,188
|
rd
|
\name{loadNetwork}
\Rdversion{1.1}
\alias{loadNetwork}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Load a Boolean network from a file
}
\description{
Loads a Boolean network or probabilistic Boolean network from a file and converts it to an internal transition table representation.
}
\usage{
loadNetwork(file,
bodySeparator = ",",
lowercaseGenes = FALSE,
symbolic = FALSE)
}
\arguments{
\item{file}{
The name of the file to be read
}
\item{bodySeparator}{
An optional separation character to divide the target factors and the formulas. Default is ",".
}
\item{lowercaseGenes}{
If set to \code{TRUE}, all gene names are converted to lower case, i.e. the gene names are case-insensitive. This corresponds to the behaviour of \pkg{BoolNet} versions prior to 1.5. Defaults to \code{FALSE}.
}
\item{symbolic}{
If set to \code{TRUE}, a symbolic representation of class \code{SymbolicBooleanNetwork} is returned. This is not available for asynchronous or probabilistic Boolean networks, but is required for the simulation of networks with extended temporal predicates and time delays (see \code{\link{simulateSymbolicModel}}). If such predicates are detected, the switch is activated by default.
}
}
\details{
Depending on whether the network is loaded in truth table representation or not, the supported network file formats differ slightly.
For the truth table representation (\code{symbolic=FALSE}), the language basically consists of expressions based on the Boolean operators AND (&), or (|), and NOT (!). In addition, some convenience operators are included (see EBNF and operator description below).
The first line contains a header. In case of a Boolean network with only one function per gene, the header is "targets, functions"; in a probabilistic network, there is an optional third column "probabilities". All subsequent lines contain Boolean rules or comment lines that are omitted by the parser.
A rule consists of a target gene, a separator, a Boolean expression to calculate a transition step for the target gene, and an optional probability for the rule (for probabilistic Boolean networks only -- see below).
The EBNF description of the network file format is as follows:
\preformatted{
Network = Header Newline {Rule Newline | Comment Newline};
Header = "targets" Separator "factors";
Rule = GeneName Separator BooleanExpression [Separator Probability];
Comment = "#" String;
BooleanExpression = GeneName
| "!" BooleanExpression
| "(" BooleanExpression ")"
| BooleanExpression " & " BooleanExpression
| BooleanExpression " | " BooleanExpression;
| "all(" BooleanExpression {"," BooleanExpression} ")"
| "any(" BooleanExpression {"," BooleanExpression} ")"
| "maj(" BooleanExpression {"," BooleanExpression} ")"
| "sumgt(" BooleanExpression {"," BooleanExpression} "," Integer ")"
| "sumlt(" BooleanExpression {"," BooleanExpression} "," Integer ")";
GeneName = ? A gene name from the list of involved genes ?;
Separator = ",";
Integer = ? An integer value?;
Probability = ? A floating-point number ?;
String = ? Any sequence of characters (except a line break) ?;
Newline = ? A line break character ?;
}
The extended format for Boolean networks with temporal elements that can be loaded if \code{symbolic=TRUE} additionally allows for a specification of time steps. Furthermore, the operators can be extended with iterators that evaluate their arguments over multiple time steps.
\preformatted{
Network = Header Newline
{Function Newline | Comment Newline};
Header = "targets" Separator "factors";
Function = GeneName Separator BooleanExpression;
Comment = "#" String;
BooleanExpression = GeneName | GeneName TemporalSpecification | BooleanOperator | TemporalOperator
BooleanOperator = BooleanExpression
| "!" BooleanExpression
| "(" BooleanExpression ")"
| BooleanExpression " & " BooleanExpression
| BooleanExpression " | " BooleanExpression;
TemporalOperator = "all" [TemporalIteratorDef]
"(" BooleanExpression {"," BooleanExpression} ")"
| "any" [TemporalIteratorDef]
"(" BooleanExpression {"," BooleanExpression} ")"
| "maj" [TemporalIteratorDef]
"(" BooleanExpression {"," BooleanExpression} ")"
| "sumgt" [TemporalIteratorDef]
"(" BooleanExpression {"," BooleanExpression} "," Integer ")"
| "sumlt" [TemporalIteratorDef]
"(" BooleanExpression {"," BooleanExpression} "," Integer ")"
| "timeis" "(" Integer ")"
| "timegt" "(" Integer ")"
| "timelt" "(" Integer ")";
TemporalIteratorDef = "[" TemporalIterator "=" Integer ".." Integer "]";
TemporalSpecification = "[" TemporalOperand {"+" TemporalOperand | "-" TemporalOperand} "]";
TemporalOperand = TemporalIterator | Integer
TemporalIterator = ? An alphanumeric string ?;
GeneName = ? A gene name from the list of involved genes ?;
Separator = ",";
Integer = ? An integer value?;
String = ? Any sequence of characters (except a line break) ?;
Newline = ? A line break character ?;
}
The meaning of the operators is as follows:
\describe{
\item{\code{all}}{Equivalent to a conjunction of all arguments. For symbolic networks, the operator can have a time range, in which case the arguments are evaluated for each time point specified in the iterator.}
\item{\code{any}}{Equivalent to a disjunction of all arguments. For symbolic networks, the operator can have a time range, in which case the arguments are evaluated for each time point specified in the iterator.}
\item{\code{maj}}{Evaluates to true if the majority of the arguments evaluate to true. For symbolic networks, the operator can have a time range, in which case the arguments are evaluated for each time point specified in the iterator.}
\item{\code{sumgt}}{Evaluates to true if the number of arguments (except the last) that evaluate to true is greater than the number specified in the last argument. For symbolic networks, the operator can have a time range, in which case the arguments are evaluated for each time point specified in the iterator.}
\item{\code{sumlt}}{Evaluates to true if the number of arguments (except the last) that evaluate to true is less than the number specified in the last argument. For symbolic networks, the operator can have a time range, in which case the arguments are evaluated for each time point specified in the iterator.}
\item{\code{timeis}}{Evaluates to true if the current absolute time step (i.e. number of state transitions performed from the current start state) is the same as the argument.}
\item{\code{timelt}}{Evaluates to true if the current absolute time step (i.e. number of state transitions performed from the current start state) is the less than the argument.}
\item{\code{timegt}}{Evaluates to true if the current absolute time step (i.e. number of state transitions performed from the current start state) is greater than the argument.}
}
If \code{symbolic=FALSE} and there is exactly one rule for each gene, a Boolean network of class \code{BooleanNetwork} is created. In these networks, constant genes are automatically fixed (e.g. knocked-out or over-expressed). This means that they are always set to the constant value, and states with the complementary value are not considered in transition tables etc. If you would like to change this behaviour, use \code{\link{fixGenes}} to reset the fixing.
If \code{symbolic=FALSE} and two or more rules exist for the same gene, the function returns a probabilistic network of class \code{ProbabilisticBooleanNetwork}. In this case, alternative rules may be annotated with probabilities, which must sum up to 1 for all rules that belong to the same gene. If no probabilities are supplied, uniform distribution is assumed.
If \code{symbolic=TRUE}, a symbolic representation of a (possibly temporal) Boolean network of class \code{SymbolicBooleanNetwork} is created.
}
\value{
If \code{symbolic=FALSE} and only one function per gene is specified, a structure of class \code{BooleanNetwork} representing the network is returned. It has the following components:
\item{genes}{A vector of gene names involved in the network. This list determines the indices of genes in inputs of functions or in state bit vectors.}
\item{interactions}{A list with \code{length(genes)} elements, where the i-th element describes the transition function for the i-th gene. Each element has the following sub-components:
\describe{
\item{input}{A vector of indices of the genes that serve as the input of the Boolean transition function. If the function has no input (i.e. the gene is constant), the vector consists of a zero element.}
\item{func}{The transition function in truth table representation. This vector has \if{latex}{\cr}\code{2^length(input)} entries, one for each combination of input variables. If the gene is constant, the function is 1 or 0.}
\item{expression}{A string representation of the Boolean expression from which the truth table was generated}
}}
\item{fixed}{A vector specifying which genes are knocked-out or over-expressed. For each gene, there is one element which is set to 0 if the gene is knocked-out, to 1 if the gene is over-expressed, and to -1 if the gene is not fixed at all, i. e. can change its value according to the supplied transition function. Constant genes are automatically set to fixed values.}
If \code{symbolic=FALSE} and there is at least one gene with two or more alternative transition functions, a structure of class \code{ProbabilisticBooleanNetwork} is returned. This structure is similar to \code{BooleanNetwork}, but allows for storing more than one function in an interaction. It consists of the following components:
\item{genes}{A vector of gene names involved in the network. This list determines the indices of genes in inputs of functions or in state bit vectors.}
\item{interactions}{A list with \code{length(genes)} elements, where the i-th element describes the alternative transition functions for the i-th gene. Each element is a list of transition functions. In this second-level list, each element has the the following sub-components:
\describe{
\item{input}{A vector of indices of the genes that serve as the input of the Boolean transition function. If the function has no input (i.e. the gene is constant), the vector consists of a zero element.}
\item{func}{The transition function in truth table representation. This vector has \if{latex}{\cr}\code{2^length(input)} entries, one for each combination of input variables. If the gene is constant, the function is -1.}
\item{expression}{A string representation of the underlying Boolean expression}
\item{probability}{The probability that the corresponding transition function is chosen}
}}
\item{fixed}{A vector specifying which genes are knocked-out or over-expressed. For each gene, there is one element which is set to 0 if the gene is knocked-out, to 1 if the gene is over-expressed, and to -1 if the gene is not fixed at all, i. e. can change its value according to the supplied transition function. You can knock-out and over-express genes using \code{\link{fixGenes}}.}
If \code{symbolic=TRUE}, a structure of class \code{SymbolicBooleanNetwork} that represents the network as expression trees is returned. It has the following components:
\item{genes}{A vector of gene names involved in the network. This list determines the indices of genes in inputs of functions or in state bit vectors.}
\item{interactions}{A list with \code{length(genes)} elements, where the i-th element describes the transition function for the i-th gene in a symbolic representation. Each such element is a list that represents a recursive expression tree, possibly consisting of sub-elements (operands) that are expression trees themselves. Each element in an expression tree can be a Boolean/temporal operator, a literal ("atom") or a numeric constant.}
\item{internalStructs}{A pointer referencing an internal representation of the expression trees as raw C objects. This is used for simulations and must be set to NULL if \code{interactions} are changed to force a refreshment.
}
\item{timeDelays}{An integer vector storing the temporal memory sizes required for each of the genes in the network. That is, the vector stores the minimum number of predecessor states of each gene that need to be saved to determine the successor state of the network.}
\item{fixed}{A vector specifying which genes are knocked-out or over-expressed. For each gene, there is one element which is set to 0 if the gene is knocked-out, to 1 if the gene is over-expressed, and to -1 if the gene is not fixed at all, i. e. can change its value according to the supplied transition function. Constant genes are automatically set to fixed values.}
}
\seealso{
\code{\link{getAttractors}}, \code{\link{simulateSymbolicModel}}, \code{\link{markovSimulation}}, \code{\link{stateTransition}}, \code{\link{fixGenes}}, \code{\link{loadSBML}}, \code{\link{loadBioTapestry}}
}
\examples{
# write example network to file
sink("testNet.bn")
cat("targets, factors\n")
cat("Gene1, !Gene2 | !Gene3\n")
cat("Gene2, Gene3 & Gene4\n")
cat("Gene3, Gene2 & !Gene1\n")
cat("Gene4, 1\n")
sink()
# read file
net <- loadNetwork("testNet.bn")
print(net)
}
\keyword{Boolean network
probabilistic Boolean network
PBN
file
logic
parse}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.