content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
testlist <- list(x = c(-692781312L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), y = integer(0))
result <- do.call(diffrprojects:::dist_mat_absolute,testlist)
str(result) | /diffrprojects/inst/testfiles/dist_mat_absolute/libFuzzer_dist_mat_absolute/dist_mat_absolute_valgrind_files/1609961625-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 479 | r | testlist <- list(x = c(-692781312L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), y = integer(0))
result <- do.call(diffrprojects:::dist_mat_absolute,testlist)
str(result) |
x<-mean(c(372889698, 143797361,671828181,373809554))
y<-mean(c(602487792,16412868,15030736,15901403) )
x
y
x-y
x<- mean(c(9459, 11492,49514,9484))
y<-mean(c(10356,10256, 8417,12081))
x
y
x-y
x<-mean(c(0.014486573,0.007287606,0.024476034, 0.014344770))
y<- mean(c(0.021872540,0.002893806,0.002676134,0.002677902))
x
y
x-y
array_sum and touch_by_all
x<-mean(c(1488389352,10813538,442001922))
y<-mean(c( 16341115,17137728,120088591))
x
y
x-y
x<-mean(c(10067,8828, 11419))
y<-mean(c(9805 ,12430,7846 ))
y<-mean(c(0.002820471, 0.002615039,0.006201801 ))
x<-mean(c(0.057200670,0.002552240, 0.016680796))
| /FHPC_ASSIGNMENT_3/FHPC_ASSIGNMENT_2/graphs/R files/measures_for_activity.R | no_license | nicdom23/FHPC_2019-2020 | R | false | false | 678 | r | x<-mean(c(372889698, 143797361,671828181,373809554))
y<-mean(c(602487792,16412868,15030736,15901403) )
x
y
x-y
x<- mean(c(9459, 11492,49514,9484))
y<-mean(c(10356,10256, 8417,12081))
x
y
x-y
x<-mean(c(0.014486573,0.007287606,0.024476034, 0.014344770))
y<- mean(c(0.021872540,0.002893806,0.002676134,0.002677902))
x
y
x-y
array_sum and touch_by_all
x<-mean(c(1488389352,10813538,442001922))
y<-mean(c( 16341115,17137728,120088591))
x
y
x-y
x<-mean(c(10067,8828, 11419))
y<-mean(c(9805 ,12430,7846 ))
y<-mean(c(0.002820471, 0.002615039,0.006201801 ))
x<-mean(c(0.057200670,0.002552240, 0.016680796))
|
#####################################################
#####################################################
##
## Roll dice demo
##
##
#####################################################
#####################################################
#install.packages("TeachingDemos")
# Note if you choose > 10 as the number of dice the app fails as it is not able to handle Null or N/A which are the default lables for the colors
rollEm <- function(numDice = 2)
{
if (numDice < 11)
{
list.of.packages <- c("TeachingDemos")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages)
library(TeachingDemos)
r<-dice(ndice = numDice)
return(as.data.frame(r))
}
else {"Oops we have not quite figued this out... number of dice need to be less than or equal to 10!"}
}
result<-rollEm(x)
| /samples/features/sql-big-data-cluster/app-deploy/RollDice/roll-dice.R | permissive | ik-dotnet/sql-server-samples | R | false | false | 893 | r | #####################################################
#####################################################
##
## Roll dice demo
##
##
#####################################################
#####################################################
#install.packages("TeachingDemos")
# Note if you choose > 10 as the number of dice the app fails as it is not able to handle Null or N/A which are the default lables for the colors
rollEm <- function(numDice = 2)
{
if (numDice < 11)
{
list.of.packages <- c("TeachingDemos")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages)
library(TeachingDemos)
r<-dice(ndice = numDice)
return(as.data.frame(r))
}
else {"Oops we have not quite figued this out... number of dice need to be less than or equal to 10!"}
}
result<-rollEm(x)
|
rm(list=ls())
wd<-getwd()
setwd(wd)
event<-read.csv("event.csv")
event<-na.omit(event)
newdata <- event[which(event$ebt_snap=='1'),]
event1<-aggregate(newdata[, 2:4], list(newdata$hhnum), sum)
names(event1)[1]<-"hhnum"
# Household data set
hh<-read.csv("household.csv")
hh<-na.omit(hh)
hh1 <- hh[ which(hh$snapnowhh=='1' & hh$snapnowreport ==1),]
#joining two data set by hhnum
#finaldata<-hh1[hh1$hhnum%in%event1$Group.1,]
#event1[!event1$Group.1%in%hh1$hhnum,]
final.data<-merge(hh1, event1)
# remove duplicate household if any..
final.data<-final.data[!duplicated(final.data$hhnum), ]
#recoding region variable
final.data$region[final.data$region==1]="Northeast"
final.data$region[final.data$region==2]="Midwest"
final.data$region[final.data$region==3]="South"
final.data$region[final.data$region==4]="West"
#recoding rural variable
final.data$rural[final.data$rural==1]="Rural"
final.data$rural[final.data$rural==0]="Urban"
# recoding adjtfscat variable
final.data$adltfscat[final.data$adltfscat==1]="High"
final.data$adltfscat[final.data$adltfscat==2]="Marginal"
final.data$adltfscat[final.data$adltfscat==3]="Low"
final.data$adltfscat[final.data$adltfscat==4]="very low"
summary(final.data)
# I categorized total weekly food expenditure on food at home based on Official USDA Food Plans,
# based on weekly food expenditure for a family of 4 (which is average household size in our data) is $129.5.
# I am interested to examine the proportion of hosehold that have met the food expenditure requirement on the basis of Thrifty Food Plan and Dietary Guidelines of america.
final.data$total.paid <- ifelse(final.data$totalpaid > 129,
c("1"), c("0"))
# converting numeric variable into factor
final.data$total.paid<-as.factor(final.data$total.paid)
final.data$region<-as.factor(final.data$region)
final.data$hhsize<-as.factor(final.data$hhsize)
final.data$rural <-as.factor(final.data$rural)
final.data$targetgroup<-as.factor(final.data$targetgroup)
final.data$selfemployhh<-as.factor(final.data$selfemployhh)
final.data$housingown<-as.factor(final.data$housingown)
final.data$liqassets<-as.factor(final.data$liqassets)
final.data$anyvehicle<-as.factor(final.data$anyvehicle)
final.data$foodsufficient<-as.ordered(final.data$foodsufficient)
final.data$grocerylistfreq<-as.factor(final.data$grocerylistfreq)
final.data$anyvegetarian<-as.factor(final.data$anyvegetarian)
final.data$nutritioneduc<-as.factor(final.data$nutritioneduc)
final.data$eathealthyhh<-as.factor(final.data$eathealthyhh)
final.data$adltfscat<-as.ordered(final.data$adltfscat)
final.data$dietstatuspr<-as.factor(final.data$dietstatuspr)
# structure of data
str(final.data)
#Exploratory data analysis
#install.packages("DataExplorer")
library(DataExplorer)
basic_eda <- function(data)
{
head(data)
df_status(data)
freq(data)
profiling_num(data)
plot_num(data)
describe(data)
}
basic_eda(final.data)
library(ggplot2)
g2<-ggplot(final.data) +
geom_bar(aes(primstoresnaptype,totalpaid, fill =primstoresnaptype), stat = "summary", fun.y = "mean")
g2 + labs(title = "Total expenditure by store types", xlab="Store type", ylab="Weekly food expenditure $")
g3<-ggplot(data =final.data) +
geom_bar(aes(region,ebt_snapamt, fill=region), stat = "summary", fun.y = "mean")
g3 + labs(x="Region ", y="Weekly food expenditure",title = "Food expenditure by regions")
g4<-ggplot(data =final.data) +
geom_bar(aes(rural,totalpaid, fill=rural), stat = "summary", fun.y = "mean")
g4 + labs(x="Rural ", y="Weekly food expenditure",title = "Food expenditure by rural and urban region")
g5<-ggplot(final.data, aes(x=adltfscat, y=totalpaid, group=rural)) +
geom_line(aes(color=rural))+
geom_point(aes(color=rural))
g5+labs(x="Food security ", y="Weekly food expenditure",title = "Food expenditure with food security levels")
g6<-ggplot(data =final.data) +
geom_bar(aes(hhsize,totalpaid, fill=hhsize), stat = "summary", fun.y = "mean")
g6 + labs(x="HH size ", y="Weekly food expenditure",title = "Food expenditure by HH size")
#predicting model using mechine learning
library(tidyverse)
library(caret)
library(randomForest)
require(e1071)
library(DataExplorer)
set.seed(1337)
# tainControl function
train_control<-trainControl(method = "cv", number=10)
# create an index to partition data
index <- createDataPartition(final.data$total.paid, p=0.75, list=FALSE)
# spliting data in to training and testing groups
trainSet <- final.data[ index,]
testSet <- final.data[-index,]
#Feature selection using rfe in caret
#control <- rfeControl(functions = rfFuncs,method = "repeatedcv",repeats = 3,verbose = FALSE)
outcomeName<-'total.paid'
control <- rfeControl(functions = rfFuncs,
method = "repeatedcv",
repeats = 3,
verbose = FALSE)
predictors<-names(trainSet)[!names(trainSet) %in% outcomeName]
spend_Pred_Profile <- rfe(trainSet[,predictors], trainSet[,outcomeName],
rfeControl = control)
spend_Pred_Profile
# Total potential predictors
#predictors<-c("hhsize", "region", "rural", "itemstot", "anyvegetarian","inchhavg_r", "liqassets", "selfemployhh", "anyvehicle", "largeexp","adltfscat", "foodsufficient", "dietstatuspr", "grocerylistfreq", "primstoresnaptype", "primstoredist_d", "nutritioneduc")
# Using several combinations of explatory variabls here I finalize following variables in the final model.
predictors<-c("hhsize", "itemstot", "inchhavg_r", "grocerylistfreq", "primstoredist_d")
names(getModelInfo())
#random forest
model_rf<-train(total.paid~hhsize+itemstot +inchhavg_r+grocerylistfreq+primstoredist_d,method="rf", data=trainSet, trControl=train_control, na.action = na.omit)
model_rf
save(model_rf, file="RandomF.rda")
model_rf<-train(trainSet[,predictors],trainSet[,outcomeName],method='rf')
save(model_rf, file="RandomForest.rda")
print(model_rf)
confusionMatrix(model_rf)
#Creating grid
#Checking variable importance for GLM
varImp(object=model_rf)
#rf variable importance
plot(model_rf)
plot(varImp(object=model_rf),main="Random forest - Variable Importance")
#Predictions
predictions_rf<-predict.train(object=model_rf,testSet[,predictors],type="raw")
table(predictions_rf)
# Confusion matrix
confusionMatrix(predictions_rf,testSet[,outcomeName])
#Using gbm method
model_gbm<-train(trainSet[,predictors],trainSet[,outcomeName],method='gbm')
print(model_gbm)
#Checking variable importance for GBM
#Variable Importance
varImp(object=model_gbm)
plot(varImp(object=model_gbm),main="GBM - Variable Importance")
#Prediction with GBM
predictions_gbm<-predict.train(object=model_gbm,testSet[,predictors],type="raw")
table(predictions_gbm)
confusionMatrix(predictions_gbm,testSet[,outcomeName])
# Now Using nnet method
model_nnet<-train(trainSet[,predictors],trainSet[,outcomeName],method='nnet')
print(model_nnet)
plot(model_nnet)
# prediction with nnet
predictions_nnet<-predict.train(object=model_nnet,testSet[,predictors],type="raw")
table(predictions_nnet)
#Confusion Matrix and Statistics
confusionMatrix(predictions_nnet,testSet[,outcomeName])
confusionMatrix(predictions_gbm,testSet[,outcomeName])
confusionMatrix(predictions_rf,testSet[,outcomeName])
table(final.data$total.paid)
| /final.R | no_license | dhakalck/INFO-800-PROJECT | R | false | false | 7,265 | r | rm(list=ls())
wd<-getwd()
setwd(wd)
event<-read.csv("event.csv")
event<-na.omit(event)
newdata <- event[which(event$ebt_snap=='1'),]
event1<-aggregate(newdata[, 2:4], list(newdata$hhnum), sum)
names(event1)[1]<-"hhnum"
# Household data set
hh<-read.csv("household.csv")
hh<-na.omit(hh)
hh1 <- hh[ which(hh$snapnowhh=='1' & hh$snapnowreport ==1),]
#joining two data set by hhnum
#finaldata<-hh1[hh1$hhnum%in%event1$Group.1,]
#event1[!event1$Group.1%in%hh1$hhnum,]
final.data<-merge(hh1, event1)
# remove duplicate household if any..
final.data<-final.data[!duplicated(final.data$hhnum), ]
#recoding region variable
final.data$region[final.data$region==1]="Northeast"
final.data$region[final.data$region==2]="Midwest"
final.data$region[final.data$region==3]="South"
final.data$region[final.data$region==4]="West"
#recoding rural variable
final.data$rural[final.data$rural==1]="Rural"
final.data$rural[final.data$rural==0]="Urban"
# recoding adjtfscat variable
final.data$adltfscat[final.data$adltfscat==1]="High"
final.data$adltfscat[final.data$adltfscat==2]="Marginal"
final.data$adltfscat[final.data$adltfscat==3]="Low"
final.data$adltfscat[final.data$adltfscat==4]="very low"
summary(final.data)
# I categorized total weekly food expenditure on food at home based on Official USDA Food Plans,
# based on weekly food expenditure for a family of 4 (which is average household size in our data) is $129.5.
# I am interested to examine the proportion of hosehold that have met the food expenditure requirement on the basis of Thrifty Food Plan and Dietary Guidelines of america.
final.data$total.paid <- ifelse(final.data$totalpaid > 129,
c("1"), c("0"))
# converting numeric variable into factor
final.data$total.paid<-as.factor(final.data$total.paid)
final.data$region<-as.factor(final.data$region)
final.data$hhsize<-as.factor(final.data$hhsize)
final.data$rural <-as.factor(final.data$rural)
final.data$targetgroup<-as.factor(final.data$targetgroup)
final.data$selfemployhh<-as.factor(final.data$selfemployhh)
final.data$housingown<-as.factor(final.data$housingown)
final.data$liqassets<-as.factor(final.data$liqassets)
final.data$anyvehicle<-as.factor(final.data$anyvehicle)
final.data$foodsufficient<-as.ordered(final.data$foodsufficient)
final.data$grocerylistfreq<-as.factor(final.data$grocerylistfreq)
final.data$anyvegetarian<-as.factor(final.data$anyvegetarian)
final.data$nutritioneduc<-as.factor(final.data$nutritioneduc)
final.data$eathealthyhh<-as.factor(final.data$eathealthyhh)
final.data$adltfscat<-as.ordered(final.data$adltfscat)
final.data$dietstatuspr<-as.factor(final.data$dietstatuspr)
# structure of data
str(final.data)
#Exploratory data analysis
#install.packages("DataExplorer")
library(DataExplorer)
basic_eda <- function(data)
{
head(data)
df_status(data)
freq(data)
profiling_num(data)
plot_num(data)
describe(data)
}
basic_eda(final.data)
library(ggplot2)
g2<-ggplot(final.data) +
geom_bar(aes(primstoresnaptype,totalpaid, fill =primstoresnaptype), stat = "summary", fun.y = "mean")
g2 + labs(title = "Total expenditure by store types", xlab="Store type", ylab="Weekly food expenditure $")
g3<-ggplot(data =final.data) +
geom_bar(aes(region,ebt_snapamt, fill=region), stat = "summary", fun.y = "mean")
g3 + labs(x="Region ", y="Weekly food expenditure",title = "Food expenditure by regions")
g4<-ggplot(data =final.data) +
geom_bar(aes(rural,totalpaid, fill=rural), stat = "summary", fun.y = "mean")
g4 + labs(x="Rural ", y="Weekly food expenditure",title = "Food expenditure by rural and urban region")
g5<-ggplot(final.data, aes(x=adltfscat, y=totalpaid, group=rural)) +
geom_line(aes(color=rural))+
geom_point(aes(color=rural))
g5+labs(x="Food security ", y="Weekly food expenditure",title = "Food expenditure with food security levels")
g6<-ggplot(data =final.data) +
geom_bar(aes(hhsize,totalpaid, fill=hhsize), stat = "summary", fun.y = "mean")
g6 + labs(x="HH size ", y="Weekly food expenditure",title = "Food expenditure by HH size")
#predicting model using mechine learning
library(tidyverse)
library(caret)
library(randomForest)
require(e1071)
library(DataExplorer)
set.seed(1337)
# tainControl function
train_control<-trainControl(method = "cv", number=10)
# create an index to partition data
index <- createDataPartition(final.data$total.paid, p=0.75, list=FALSE)
# spliting data in to training and testing groups
trainSet <- final.data[ index,]
testSet <- final.data[-index,]
#Feature selection using rfe in caret
#control <- rfeControl(functions = rfFuncs,method = "repeatedcv",repeats = 3,verbose = FALSE)
outcomeName<-'total.paid'
control <- rfeControl(functions = rfFuncs,
method = "repeatedcv",
repeats = 3,
verbose = FALSE)
predictors<-names(trainSet)[!names(trainSet) %in% outcomeName]
spend_Pred_Profile <- rfe(trainSet[,predictors], trainSet[,outcomeName],
rfeControl = control)
spend_Pred_Profile
# Total potential predictors
#predictors<-c("hhsize", "region", "rural", "itemstot", "anyvegetarian","inchhavg_r", "liqassets", "selfemployhh", "anyvehicle", "largeexp","adltfscat", "foodsufficient", "dietstatuspr", "grocerylistfreq", "primstoresnaptype", "primstoredist_d", "nutritioneduc")
# Using several combinations of explatory variabls here I finalize following variables in the final model.
predictors<-c("hhsize", "itemstot", "inchhavg_r", "grocerylistfreq", "primstoredist_d")
names(getModelInfo())
#random forest
model_rf<-train(total.paid~hhsize+itemstot +inchhavg_r+grocerylistfreq+primstoredist_d,method="rf", data=trainSet, trControl=train_control, na.action = na.omit)
model_rf
save(model_rf, file="RandomF.rda")
model_rf<-train(trainSet[,predictors],trainSet[,outcomeName],method='rf')
save(model_rf, file="RandomForest.rda")
print(model_rf)
confusionMatrix(model_rf)
#Creating grid
#Checking variable importance for GLM
varImp(object=model_rf)
#rf variable importance
plot(model_rf)
plot(varImp(object=model_rf),main="Random forest - Variable Importance")
#Predictions
predictions_rf<-predict.train(object=model_rf,testSet[,predictors],type="raw")
table(predictions_rf)
# Confusion matrix
confusionMatrix(predictions_rf,testSet[,outcomeName])
#Using gbm method
model_gbm<-train(trainSet[,predictors],trainSet[,outcomeName],method='gbm')
print(model_gbm)
#Checking variable importance for GBM
#Variable Importance
varImp(object=model_gbm)
plot(varImp(object=model_gbm),main="GBM - Variable Importance")
#Prediction with GBM
predictions_gbm<-predict.train(object=model_gbm,testSet[,predictors],type="raw")
table(predictions_gbm)
confusionMatrix(predictions_gbm,testSet[,outcomeName])
# Now Using nnet method
model_nnet<-train(trainSet[,predictors],trainSet[,outcomeName],method='nnet')
print(model_nnet)
plot(model_nnet)
# prediction with nnet
predictions_nnet<-predict.train(object=model_nnet,testSet[,predictors],type="raw")
table(predictions_nnet)
#Confusion Matrix and Statistics
confusionMatrix(predictions_nnet,testSet[,outcomeName])
confusionMatrix(predictions_gbm,testSet[,outcomeName])
confusionMatrix(predictions_rf,testSet[,outcomeName])
table(final.data$total.paid)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{wageprc}
\alias{wageprc}
\title{wageprc}
\format{
A data frame with 286 observations.
\describe{
\item{year}{1964 to 1987}
\item{month}{1 to 12}
\item{price}{consumer price index}
\item{wage}{nominal hourly wage}
}
}
\source{
Jeffrey M. Wooldrige (2006): \emph{Introductory Econometrics: A Modern Approach},
3rd ed., Thomson South-Western.
}
\usage{
data("wageprc")
}
\description{
Wages and prices (macro).
}
\details{
Data from \emph{Economic Report of the President}, various years.
}
\section{Notes}{
These monthly data run from January 1964 through October 1987.
The consumer price index averages to 100 in 1967. An updated
set of data can be obtained electronically from
\url{https://www.govinfo.gov/app/collection/ERP/}.
}
\keyword{datasets}
| /man/wageprc.Rd | permissive | jcpernias/ec1027 | R | false | true | 857 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{wageprc}
\alias{wageprc}
\title{wageprc}
\format{
A data frame with 286 observations.
\describe{
\item{year}{1964 to 1987}
\item{month}{1 to 12}
\item{price}{consumer price index}
\item{wage}{nominal hourly wage}
}
}
\source{
Jeffrey M. Wooldrige (2006): \emph{Introductory Econometrics: A Modern Approach},
3rd ed., Thomson South-Western.
}
\usage{
data("wageprc")
}
\description{
Wages and prices (macro).
}
\details{
Data from \emph{Economic Report of the President}, various years.
}
\section{Notes}{
These monthly data run from January 1964 through October 1987.
The consumer price index averages to 100 in 1967. An updated
set of data can be obtained electronically from
\url{https://www.govinfo.gov/app/collection/ERP/}.
}
\keyword{datasets}
|
/PRUEBAS.R | no_license | abeldeandres/TFM-BigData | R | false | false | 4,880 | r | ||
## Script for generating Plot 4 of assignment 1 - Coursera exploratory data analysis
#Import data - two options offered in this script: sqldf or read.delim
#require(sqldf)
#HHElect <- read.csv.sql("household_power_consumption.txt", sql = 'SELECT * FROM file where
#Date IN ("1/2/2007","2/2/2007")', sep=";")
#HHElect[HHElect == "?"] <- NA
HHElect <- read.delim("household_power_consumption.txt", na.strings="?", sep=";", header=T)
#Subset to: 2007-02-01 and 2007-02-02
HHElect <- subset(HHElect, as.Date(HHElect$Date, "%d/%m/%Y")=="2007-02-01"|
as.Date(HHElect$Date, "%d/%m/%Y")=="2007-02-02")
HHElect$Date2 <- strptime(paste(HHElect$Date, HHElect$Time), "%d/%m/%Y %H:%M:%S")
#Plot 4 to png file
png(filename="Plot4.png", width=480,height=480)
par(mfrow=c(2,2))
plot(HHElect$Date2, HHElect$Global_active_power, type="l", xlab="",
ylab="Global Active Power")
plot(HHElect$Date2, HHElect$Voltage, type="l", xlab="datetime",
ylab="Voltage")
plot(HHElect$Date2,HHElect$Sub_metering_1,type="l",xlab=" ",ylab="Energy sub metering")
lines(HHElect$Date2,y=HHElect$Sub_metering_2,ylim=c(0,40),col="red")
lines(HHElect$Date2,y=HHElect$Sub_metering_3,ylim=c(0,40),col="blue")
legend("topright", c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lty=c(1,1,1),
col=c("black", "red", "blue"), cex=0.95, bty="n")
plot(HHElect$Date2, HHElect$Global_reactive_power, type="l", xlab="datetime",
ylab="Global_reactive_power")
dev.off()
| /SubmitScripts/Plot4.R | no_license | SFrav/ExData_Plotting1 | R | false | false | 1,469 | r | ## Script for generating Plot 4 of assignment 1 - Coursera exploratory data analysis
#Import data - two options offered in this script: sqldf or read.delim
#require(sqldf)
#HHElect <- read.csv.sql("household_power_consumption.txt", sql = 'SELECT * FROM file where
#Date IN ("1/2/2007","2/2/2007")', sep=";")
#HHElect[HHElect == "?"] <- NA
HHElect <- read.delim("household_power_consumption.txt", na.strings="?", sep=";", header=T)
#Subset to: 2007-02-01 and 2007-02-02
HHElect <- subset(HHElect, as.Date(HHElect$Date, "%d/%m/%Y")=="2007-02-01"|
as.Date(HHElect$Date, "%d/%m/%Y")=="2007-02-02")
HHElect$Date2 <- strptime(paste(HHElect$Date, HHElect$Time), "%d/%m/%Y %H:%M:%S")
#Plot 4 to png file
png(filename="Plot4.png", width=480,height=480)
par(mfrow=c(2,2))
plot(HHElect$Date2, HHElect$Global_active_power, type="l", xlab="",
ylab="Global Active Power")
plot(HHElect$Date2, HHElect$Voltage, type="l", xlab="datetime",
ylab="Voltage")
plot(HHElect$Date2,HHElect$Sub_metering_1,type="l",xlab=" ",ylab="Energy sub metering")
lines(HHElect$Date2,y=HHElect$Sub_metering_2,ylim=c(0,40),col="red")
lines(HHElect$Date2,y=HHElect$Sub_metering_3,ylim=c(0,40),col="blue")
legend("topright", c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lty=c(1,1,1),
col=c("black", "red", "blue"), cex=0.95, bty="n")
plot(HHElect$Date2, HHElect$Global_reactive_power, type="l", xlab="datetime",
ylab="Global_reactive_power")
dev.off()
|
context("regr_blackboost")
test_that("regr_blackboost", {
requirePackagesOrSkip(c("mboost","party"), default.method = "load")
parset.list1 = list(
list(family=mboost::GaussReg(), tree_controls=party::ctree_control(maxdepth=2)),
list(family=mboost::GaussReg(), tree_controls=party::ctree_control(maxdepth=4), control=mboost::boost_control(nu=0.03))
)
parset.list2 = list(
list(family=mboost::Gaussian(), maxdepth=2),
list(family=mboost::Gaussian(), maxdepth=4, nu=0.03)
)
old.predicts.list = list()
for (i in 1:length(parset.list1)) {
parset = parset.list1[[i]]
pars = list(regr.formula, data=regr.train)
pars = c(pars, parset)
set.seed(getOption("mlr.debug.seed"))
m = do.call(mboost::blackboost, pars)
set.seed(getOption("mlr.debug.seed"))
old.predicts.list[[i]] = predict(m, newdata=regr.test)[,1]
}
testSimpleParsets("regr.blackboost", regr.df, regr.target, regr.train.inds, old.predicts.list, parset.list2)
})
| /tests/testthat/test_regr_blackboost.R | no_license | jimhester/mlr | R | false | false | 982 | r | context("regr_blackboost")
test_that("regr_blackboost", {
requirePackagesOrSkip(c("mboost","party"), default.method = "load")
parset.list1 = list(
list(family=mboost::GaussReg(), tree_controls=party::ctree_control(maxdepth=2)),
list(family=mboost::GaussReg(), tree_controls=party::ctree_control(maxdepth=4), control=mboost::boost_control(nu=0.03))
)
parset.list2 = list(
list(family=mboost::Gaussian(), maxdepth=2),
list(family=mboost::Gaussian(), maxdepth=4, nu=0.03)
)
old.predicts.list = list()
for (i in 1:length(parset.list1)) {
parset = parset.list1[[i]]
pars = list(regr.formula, data=regr.train)
pars = c(pars, parset)
set.seed(getOption("mlr.debug.seed"))
m = do.call(mboost::blackboost, pars)
set.seed(getOption("mlr.debug.seed"))
old.predicts.list[[i]] = predict(m, newdata=regr.test)[,1]
}
testSimpleParsets("regr.blackboost", regr.df, regr.target, regr.train.inds, old.predicts.list, parset.list2)
})
|
Bom Dia
meu arquivo
| /AdrianoW.R | no_license | AdrianoW/dsrtest | R | false | false | 21 | r | Bom Dia
meu arquivo
|
if (!require("plyr")) {
install.packages("plyr", dependencies = TRUE)
library(plyr)
}
dirname='.'
setwd(dirname)#Set the directory where the clinical data is located for each cancer in separate folder
filenames<-system("ls */nationwidechildrens.org_clinical_patient*", intern=T)
##Identifying only unique column names for all the tumor samples available
for(i in 1:length(filenames)){#####iterating through each of the clinical files to create new matrix files with ALL clinical variables
f<-read.delim(paste(c(dirname,filenames[i]), collapse=''), header=1) ###reading in the filess one at a time
rownames(f)<-f$bcr_patient_barcode
f<-f[3:length(f$bcr_patient_barcode),]
if(i==1){
data<-f
}else{
data<-list(data,f)
data<-rbind.fill.matrix(data)
}
}
rownames(data)<-data[,1]
#Now, converting short TCGA ids reported in clinical data to long TCGA ids reported in RNA-Seq dataset using R codes
pancan20_featureCounts_5<-as.matrix(read.table("Datasets/PANCAN20.IlluminaHiSeq_RNASeqV2.tumor_Rsubread_FeatureCounts.txt", header=1, row.names=1, nrows=5,sep='\t', check.names = F)) #getting the long TCGA IDs used in RNA-Seq dataset
sample_names<-colnames(pancan20_featureCounts_5)
partial_sample_names<-rownames(data)
counter=0##to check how many replacement has been done
for (j in 1:length(partial_sample_names)){
if(!is.na(pmatch(partial_sample_names[j],sample_names))){
partial_sample_names[j]<-sample_names[pmatch(partial_sample_names[j],sample_names, duplicates.ok=F)]
counter=counter+1
}
}
#counter##clinical variables available for 6820 of the 7706 tumor samples
rownames(data)<-partial_sample_names
clinical_data<-matrix(NA, nrow=7706,ncol=420) ###instantiating an NA matrix
rownames(clinical_data)<-colnames(pancan20_featureCounts_5)
colnames(clinical_data)<-colnames(data)
for(i in 1:length(rownames(clinical_data))){
sample_id<-rownames(clinical_data)[i]
if(sample_id%in%rownames(data)){
clinical_data[sample_id,]<-data[sample_id,]
}
}
##Writing the clinical data file
write.table(t(clinical_data),file="TCGA20_clinical_data_ordered_all_clinical_variables_samples_as_columns.txt", sep='\t',col.names=NA, quote=F)
| /Codes/ProcessClinicalData.R | permissive | mumtahena/TCGA_RNASeq_Clinical | R | false | false | 2,201 | r |
if (!require("plyr")) {
install.packages("plyr", dependencies = TRUE)
library(plyr)
}
dirname='.'
setwd(dirname)#Set the directory where the clinical data is located for each cancer in separate folder
filenames<-system("ls */nationwidechildrens.org_clinical_patient*", intern=T)
##Identifying only unique column names for all the tumor samples available
for(i in 1:length(filenames)){#####iterating through each of the clinical files to create new matrix files with ALL clinical variables
f<-read.delim(paste(c(dirname,filenames[i]), collapse=''), header=1) ###reading in the filess one at a time
rownames(f)<-f$bcr_patient_barcode
f<-f[3:length(f$bcr_patient_barcode),]
if(i==1){
data<-f
}else{
data<-list(data,f)
data<-rbind.fill.matrix(data)
}
}
rownames(data)<-data[,1]
#Now, converting short TCGA ids reported in clinical data to long TCGA ids reported in RNA-Seq dataset using R codes
pancan20_featureCounts_5<-as.matrix(read.table("Datasets/PANCAN20.IlluminaHiSeq_RNASeqV2.tumor_Rsubread_FeatureCounts.txt", header=1, row.names=1, nrows=5,sep='\t', check.names = F)) #getting the long TCGA IDs used in RNA-Seq dataset
sample_names<-colnames(pancan20_featureCounts_5)
partial_sample_names<-rownames(data)
counter=0##to check how many replacement has been done
for (j in 1:length(partial_sample_names)){
if(!is.na(pmatch(partial_sample_names[j],sample_names))){
partial_sample_names[j]<-sample_names[pmatch(partial_sample_names[j],sample_names, duplicates.ok=F)]
counter=counter+1
}
}
#counter##clinical variables available for 6820 of the 7706 tumor samples
rownames(data)<-partial_sample_names
clinical_data<-matrix(NA, nrow=7706,ncol=420) ###instantiating an NA matrix
rownames(clinical_data)<-colnames(pancan20_featureCounts_5)
colnames(clinical_data)<-colnames(data)
for(i in 1:length(rownames(clinical_data))){
sample_id<-rownames(clinical_data)[i]
if(sample_id%in%rownames(data)){
clinical_data[sample_id,]<-data[sample_id,]
}
}
##Writing the clinical data file
write.table(t(clinical_data),file="TCGA20_clinical_data_ordered_all_clinical_variables_samples_as_columns.txt", sep='\t',col.names=NA, quote=F)
|
library(dplyr)
library(reshape2)
# First of all, the names of the measurements are assigned to a variable called
# "features". Those will be the variable names of the data set.
features <- readLines ("features.txt")
# Read the files with the subject's ID numbers and the group they belong
# to - training group in this case. Also, load the activities performed
# during the training.
Subjects_ID <- readLines ("subject_train.txt")
Subjects_ID <- as.numeric(Subjects_ID)
Group <- rep("Training", length(Subjects_ID))
Activity <- readLines ("Y_train.txt")
Activity <- as.numeric(Activity)
# Open the training data set and specify the column names.
trainingset <- read.table("X_train.txt", col.names = features)
# This adds to the trainingset data frame the columns indicating the activity
# performed, the ID number of each subjects and the experimental group.
trainingset <- cbind(Activity, trainingset)
trainingset <- cbind(Subjects_ID, trainingset)
trainingset <- cbind(Group, trainingset)
# By using order(), the columns "Subjects_ID" and "Activity" are rearranged.
trainingset <- trainingset[order(trainingset$Subjects_ID, trainingset$Activity, decreasing = FALSE),]
# This does pretty much the same what's been done with the training group before.
Subjects_ID <- readLines ("subject_test.txt")
Subjects_ID <- as.numeric(Subjects_ID)
Group <- rep ("Test", length(Subjects_ID))
Activity <- readLines ("Y_test.txt")
Activity <- as.numeric(Activity)
testset <- read.table ("X_test.txt", col.names = features)
testset <- cbind(Activity, testset)
testset <- cbind(Subjects_ID, testset)
testset <- cbind(Group, testset)
testset <- testset[order(testset$Subjects_ID, testset$Activity, decreasing = FALSE), ]
# The trainig data and the test data are merged.
DataSet <- rbind (trainingset, testset)
# We are asked to subset the columns corresponding to the mean and the standard
# deviation of the measurements.
DataSet_mn_std <- select(DataSet, 1:3, matches ("mean"), -matches ("meanFreq"), -matches ("angle"), matches ("std"))
# This renames the columns so the variable names are more intuitive.
DataSet_mn_std <- rename(DataSet_mn_std, mn_Body_acceleration_X = X1.tBodyAcc.mean...X, mn_Body_acceleration_Y = X2.tBodyAcc.mean...Y, mn_Body_acceleration_Z = X3.tBodyAcc.mean...Z, mn_Gravity_acceleration_X = X41.tGravityAcc.mean...X, mn_Gravity_acceleration_Y = X42.tGravityAcc.mean...Y, mn_Gravity_acceleration_Z = X43.tGravityAcc.mean...Z, mn_Jerk_body_acceleration_X = X81.tBodyAccJerk.mean...X, mn_Jerk_body_acceleration_Y = X82.tBodyAccJerk.mean...Y, mn_Jerk_body_acceleration_Z = X83.tBodyAccJerk.mean...Z, mn_Body_gyroscope_X = X121.tBodyGyro.mean...X, mn_Body_gyroscope_Y = X122.tBodyGyro.mean...Y, mn_Body_gyroscope_Z = X123.tBodyGyro.mean...Z, mn_Jerk_body_gyroscope_X = X161.tBodyGyroJerk.mean...X, mn_Jerk_body_gyroscope_Y = X162.tBodyGyroJerk.mean...Y, mn_Jerk_body_gyroscope_Z = X163.tBodyGyroJerk.mean...Z, mn_magnitude_body_acceleration = X201.tBodyAccMag.mean.., mn_magnitude_gravity_acceleration = X214.tGravityAccMag.mean.., mn_Jerk_magnitude_body_acceleration = X227.tBodyAccJerkMag.mean.., mn_magnitude_body_gyroscope = X240.tBodyGyroMag.mean.., mn_Jerk_magnitude_body_gyroscope = X253.tBodyGyroJerkMag.mean.., mn_Fourier_body_acceleration_X = X266.fBodyAcc.mean...X, mn_Fourier_body_acceleration_Y = X267.fBodyAcc.mean...Y, mn_Fourier_body_acceleration_Z = X268.fBodyAcc.mean...Z, mn_Fourier_Jerk_body_acceleration_X = X345.fBodyAccJerk.mean...X, mn_Fourier_Jerk_body_acceleration_Y = X346.fBodyAccJerk.mean...Y, mn_Fourier_Jerk_body_acceleration_Z = X347.fBodyAccJerk.mean...Z, mn_Fourier_body_gyroscope_X = X424.fBodyGyro.mean...X, mn_Fourier_body_gyroscope_Y = X425.fBodyGyro.mean...Y, mn_Fourier_body_gyroscope_Z = X426.fBodyGyro.mean...Z, mn_Fourier_magnitude_body_acceleration = X503.fBodyAccMag.mean.., mn_Fourier_Jerk_magnitude_body_acceleration = X516.fBodyBodyAccJerkMag.mean.., mn_Fourier_magnitude_body_gyroscope = X529.fBodyBodyGyroMag.mean.., mn_Fourier_Jerk_magnitude_body_gyroscope = X542.fBodyBodyGyroJerkMag.mean.., std_body_acceleration_X = X4.tBodyAcc.std...X, std_body_acceleration_Y = X5.tBodyAcc.std...Y, std_body_acceleration_Z = X6.tBodyAcc.std...Z, std_gravity_acceleration_X = X44.tGravityAcc.std...X, std_gravity_acceleration_Y = X45.tGravityAcc.std...Y, std_gravity_acceleration_Z = X46.tGravityAcc.std...Z, std_Jerk_body_acceleration_X = X84.tBodyAccJerk.std...X, std_Jerk_body_acceleration_Y = X85.tBodyAccJerk.std...Y, std_Jerk_body_acceleration_Z = X86.tBodyAccJerk.std...Z, std_body_gyroscope_X = X124.tBodyGyro.std...X, std_body_gyroscope_Y = X125.tBodyGyro.std...Y, std_body_gyroscope_Z = X126.tBodyGyro.std...Z, std_Jerk_body_gyroscope_X = X164.tBodyGyroJerk.std...X, std_Jerk_body_gyroscope_Y = X165.tBodyGyroJerk.std...Y, std_Jerk_body_gyroscope_Z = X166.tBodyGyroJerk.std...Z, std_magnitude_body_acceleration = X202.tBodyAccMag.std.., std_magnitude_gravity_acceleration = X215.tGravityAccMag.std.., std_Jerk_magnitude_body_acceleration = X228.tBodyAccJerkMag.std.., std_magnitude_body_gyroscope = X241.tBodyGyroMag.std.., std_Jerk_magnitude_body_gyroscope = X254.tBodyGyroJerkMag.std.., std_Fourier_body_acceleration_X = X269.fBodyAcc.std...X, std_Fourier_body_acceleration_Y = X270.fBodyAcc.std...Y, std_Fourier_body_acceleration_Z = X271.fBodyAcc.std...Z, std_Fourier_Jerk_body_acceleration_X = X348.fBodyAccJerk.std...X, std_Fourier_Jerk_body_acceleration_Y = X349.fBodyAccJerk.std...Y, std_Fourier_Jerk_body_acceleration_Z = X350.fBodyAccJerk.std...Z, std_Fourier_body_gyroscope_X = X427.fBodyGyro.std...X, std_Fourier_body_gyroscope_Y = X428.fBodyGyro.std...Y, std_Fourier_body_gyroscope_Z = X429.fBodyGyro.std...Z, std_Fourier_magnitude_body_acceleration = X504.fBodyAccMag.std.., std_Fourier_Jerk_magnitude_body_body_acceleration = X517.fBodyBodyAccJerkMag.std.., std_Fourier_magnitude_body_body_gyroscope = X530.fBodyBodyGyroMag.std.., std_Fourier_Jerk_magnitude_body_body_gyroscope = X543.fBodyBodyGyroJerkMag.std..)
# Finally, the data table is grouped by ID and activity, and the mean of those
# columns is calculated.
IndDataSet <- aggregate(. ~Subjects_ID + Activity, DataSet_mn_std, mean)
IndDataSet <- IndDataSet[order(IndDataSet$Subjects_ID, IndDataSet$Activity),]
# Tidy data is saved in a txt file.
write.table(IndDataSet, file = "TidyData.txt")
| /run_analysis.R | no_license | BarLobato/Getting-Cleaning-Data-Course-Project | R | false | false | 6,349 | r |
library(dplyr)
library(reshape2)
# First of all, the names of the measurements are assigned to a variable called
# "features". Those will be the variable names of the data set.
features <- readLines ("features.txt")
# Read the files with the subject's ID numbers and the group they belong
# to - training group in this case. Also, load the activities performed
# during the training.
Subjects_ID <- readLines ("subject_train.txt")
Subjects_ID <- as.numeric(Subjects_ID)
Group <- rep("Training", length(Subjects_ID))
Activity <- readLines ("Y_train.txt")
Activity <- as.numeric(Activity)
# Open the training data set and specify the column names.
trainingset <- read.table("X_train.txt", col.names = features)
# This adds to the trainingset data frame the columns indicating the activity
# performed, the ID number of each subjects and the experimental group.
trainingset <- cbind(Activity, trainingset)
trainingset <- cbind(Subjects_ID, trainingset)
trainingset <- cbind(Group, trainingset)
# By using order(), the columns "Subjects_ID" and "Activity" are rearranged.
trainingset <- trainingset[order(trainingset$Subjects_ID, trainingset$Activity, decreasing = FALSE),]
# This does pretty much the same what's been done with the training group before.
Subjects_ID <- readLines ("subject_test.txt")
Subjects_ID <- as.numeric(Subjects_ID)
Group <- rep ("Test", length(Subjects_ID))
Activity <- readLines ("Y_test.txt")
Activity <- as.numeric(Activity)
testset <- read.table ("X_test.txt", col.names = features)
testset <- cbind(Activity, testset)
testset <- cbind(Subjects_ID, testset)
testset <- cbind(Group, testset)
testset <- testset[order(testset$Subjects_ID, testset$Activity, decreasing = FALSE), ]
# The trainig data and the test data are merged.
DataSet <- rbind (trainingset, testset)
# We are asked to subset the columns corresponding to the mean and the standard
# deviation of the measurements.
DataSet_mn_std <- select(DataSet, 1:3, matches ("mean"), -matches ("meanFreq"), -matches ("angle"), matches ("std"))
# This renames the columns so the variable names are more intuitive.
DataSet_mn_std <- rename(DataSet_mn_std, mn_Body_acceleration_X = X1.tBodyAcc.mean...X, mn_Body_acceleration_Y = X2.tBodyAcc.mean...Y, mn_Body_acceleration_Z = X3.tBodyAcc.mean...Z, mn_Gravity_acceleration_X = X41.tGravityAcc.mean...X, mn_Gravity_acceleration_Y = X42.tGravityAcc.mean...Y, mn_Gravity_acceleration_Z = X43.tGravityAcc.mean...Z, mn_Jerk_body_acceleration_X = X81.tBodyAccJerk.mean...X, mn_Jerk_body_acceleration_Y = X82.tBodyAccJerk.mean...Y, mn_Jerk_body_acceleration_Z = X83.tBodyAccJerk.mean...Z, mn_Body_gyroscope_X = X121.tBodyGyro.mean...X, mn_Body_gyroscope_Y = X122.tBodyGyro.mean...Y, mn_Body_gyroscope_Z = X123.tBodyGyro.mean...Z, mn_Jerk_body_gyroscope_X = X161.tBodyGyroJerk.mean...X, mn_Jerk_body_gyroscope_Y = X162.tBodyGyroJerk.mean...Y, mn_Jerk_body_gyroscope_Z = X163.tBodyGyroJerk.mean...Z, mn_magnitude_body_acceleration = X201.tBodyAccMag.mean.., mn_magnitude_gravity_acceleration = X214.tGravityAccMag.mean.., mn_Jerk_magnitude_body_acceleration = X227.tBodyAccJerkMag.mean.., mn_magnitude_body_gyroscope = X240.tBodyGyroMag.mean.., mn_Jerk_magnitude_body_gyroscope = X253.tBodyGyroJerkMag.mean.., mn_Fourier_body_acceleration_X = X266.fBodyAcc.mean...X, mn_Fourier_body_acceleration_Y = X267.fBodyAcc.mean...Y, mn_Fourier_body_acceleration_Z = X268.fBodyAcc.mean...Z, mn_Fourier_Jerk_body_acceleration_X = X345.fBodyAccJerk.mean...X, mn_Fourier_Jerk_body_acceleration_Y = X346.fBodyAccJerk.mean...Y, mn_Fourier_Jerk_body_acceleration_Z = X347.fBodyAccJerk.mean...Z, mn_Fourier_body_gyroscope_X = X424.fBodyGyro.mean...X, mn_Fourier_body_gyroscope_Y = X425.fBodyGyro.mean...Y, mn_Fourier_body_gyroscope_Z = X426.fBodyGyro.mean...Z, mn_Fourier_magnitude_body_acceleration = X503.fBodyAccMag.mean.., mn_Fourier_Jerk_magnitude_body_acceleration = X516.fBodyBodyAccJerkMag.mean.., mn_Fourier_magnitude_body_gyroscope = X529.fBodyBodyGyroMag.mean.., mn_Fourier_Jerk_magnitude_body_gyroscope = X542.fBodyBodyGyroJerkMag.mean.., std_body_acceleration_X = X4.tBodyAcc.std...X, std_body_acceleration_Y = X5.tBodyAcc.std...Y, std_body_acceleration_Z = X6.tBodyAcc.std...Z, std_gravity_acceleration_X = X44.tGravityAcc.std...X, std_gravity_acceleration_Y = X45.tGravityAcc.std...Y, std_gravity_acceleration_Z = X46.tGravityAcc.std...Z, std_Jerk_body_acceleration_X = X84.tBodyAccJerk.std...X, std_Jerk_body_acceleration_Y = X85.tBodyAccJerk.std...Y, std_Jerk_body_acceleration_Z = X86.tBodyAccJerk.std...Z, std_body_gyroscope_X = X124.tBodyGyro.std...X, std_body_gyroscope_Y = X125.tBodyGyro.std...Y, std_body_gyroscope_Z = X126.tBodyGyro.std...Z, std_Jerk_body_gyroscope_X = X164.tBodyGyroJerk.std...X, std_Jerk_body_gyroscope_Y = X165.tBodyGyroJerk.std...Y, std_Jerk_body_gyroscope_Z = X166.tBodyGyroJerk.std...Z, std_magnitude_body_acceleration = X202.tBodyAccMag.std.., std_magnitude_gravity_acceleration = X215.tGravityAccMag.std.., std_Jerk_magnitude_body_acceleration = X228.tBodyAccJerkMag.std.., std_magnitude_body_gyroscope = X241.tBodyGyroMag.std.., std_Jerk_magnitude_body_gyroscope = X254.tBodyGyroJerkMag.std.., std_Fourier_body_acceleration_X = X269.fBodyAcc.std...X, std_Fourier_body_acceleration_Y = X270.fBodyAcc.std...Y, std_Fourier_body_acceleration_Z = X271.fBodyAcc.std...Z, std_Fourier_Jerk_body_acceleration_X = X348.fBodyAccJerk.std...X, std_Fourier_Jerk_body_acceleration_Y = X349.fBodyAccJerk.std...Y, std_Fourier_Jerk_body_acceleration_Z = X350.fBodyAccJerk.std...Z, std_Fourier_body_gyroscope_X = X427.fBodyGyro.std...X, std_Fourier_body_gyroscope_Y = X428.fBodyGyro.std...Y, std_Fourier_body_gyroscope_Z = X429.fBodyGyro.std...Z, std_Fourier_magnitude_body_acceleration = X504.fBodyAccMag.std.., std_Fourier_Jerk_magnitude_body_body_acceleration = X517.fBodyBodyAccJerkMag.std.., std_Fourier_magnitude_body_body_gyroscope = X530.fBodyBodyGyroMag.std.., std_Fourier_Jerk_magnitude_body_body_gyroscope = X543.fBodyBodyGyroJerkMag.std..)
# Finally, the data table is grouped by ID and activity, and the mean of those
# columns is calculated.
IndDataSet <- aggregate(. ~Subjects_ID + Activity, DataSet_mn_std, mean)
IndDataSet <- IndDataSet[order(IndDataSet$Subjects_ID, IndDataSet$Activity),]
# Tidy data is saved in a txt file.
write.table(IndDataSet, file = "TidyData.txt")
|
con <- file("https://github.com/junyitt/ds10_capstone/raw/master/nlist1.Rdata")
load(con, envir = .GlobalEnv)
close(con)
testnlist <- prep_nlist(testsdf, k = 3)
words.v <- as.data.frame(testnlist[[3]])[,"pre"]
set.seed(124)
sample1 <- sample(1:length(words.v), size = 1000)
testpredict <- lapply(words.v[sample1], FUN = function(word){
pred_df <- p2(word, nlist1, k = 3)
# return(pred_df)
predict <- as.data.frame(pred_df[1:3,"predict"])
df1 <- t(predict)
colnames(df1) <- c("pred1", "pred2", "pred3")
df1
})
testpredict <- do.call(rbind,testpredict)
realtest <- as.data.frame(testnlist[[3]])[sample1,"predict"]
correct1 <- testpredict[,1] == realtest
correct2 <- testpredict[,2] == realtest
correct3 <- testpredict[,3] == realtest
sum(as.logical(correct1+correct2+correct3), na.rm = T)
ss <- sum(correct1)
percentage <- correct/1000
| /4_predictiveperformance.R | no_license | junyitt/ds10_capstone | R | false | false | 899 | r | con <- file("https://github.com/junyitt/ds10_capstone/raw/master/nlist1.Rdata")
load(con, envir = .GlobalEnv)
close(con)
testnlist <- prep_nlist(testsdf, k = 3)
words.v <- as.data.frame(testnlist[[3]])[,"pre"]
set.seed(124)
sample1 <- sample(1:length(words.v), size = 1000)
testpredict <- lapply(words.v[sample1], FUN = function(word){
pred_df <- p2(word, nlist1, k = 3)
# return(pred_df)
predict <- as.data.frame(pred_df[1:3,"predict"])
df1 <- t(predict)
colnames(df1) <- c("pred1", "pred2", "pred3")
df1
})
testpredict <- do.call(rbind,testpredict)
realtest <- as.data.frame(testnlist[[3]])[sample1,"predict"]
correct1 <- testpredict[,1] == realtest
correct2 <- testpredict[,2] == realtest
correct3 <- testpredict[,3] == realtest
sum(as.logical(correct1+correct2+correct3), na.rm = T)
ss <- sum(correct1)
percentage <- correct/1000
|
library(XML)
library(digest)
library(tibble)
library(httr)
library(rvest)
library(stringr)
library(dplyr)
library(tidyverse)
#####
#링크
link = '//*[@id="content"]/section[2]/div/div[1]/section/section/div'
url = "https://www.reuters.com/news/archive/businessnews?view=page&page=1&pageSize=10" %>%
read_html() %>%
html_nodes(xpath = link) %>%
html_nodes("a") %>%
html_attr("href") %>%
unique() # 중복된 링크 제거
ru = paste0('https://www.reuters.com',url[2]) %>% read_html() %>% html_nodes(".StandardArticleBody_body p") %>% html_text(trim = TRUE)
## 각 링크 찾아가기
news = tibble()
for(n in 1:10){
exlink = paste0('https://www.reuters.com',url[n]) %>% read_html()
news.title = exlink %>% html_nodes(".ArticleHeader_headline") %>% html_text(trim = TRUE)
news.body = exlink %>% html_nodes(".StandardArticleBody_body p") %>% html_text(trim = TRUE) %>% paste0(collapse = "")
news.date = exlink %>% html_nodes(".ArticleHeader_date") %>% html_text(trim = TRUE) %>% str_trim() %>% str_split_fixed("/",3)
new = tibble(news.date[1] , news.title,news.body)
news = bind_rows(news,new)
}
#####
#for
SC.reuter = function(topic,pages){
news = tibble()
for(i in 1:pages){
link = '//*[@id="content"]/section[2]/div/div[1]/section/section/div'
url = paste0("https://www.reuters.com/news/archive/",topic,"?view=page&page=",i,"&pageSize=10") %>%
read_html() %>%
html_nodes(xpath = link) %>%
html_nodes("a") %>%
html_attr("href") %>%
unique() # 중복된 링크 제거
for(n in 1:10){
exlink = paste0('https://www.reuters.com',url[n]) %>% read_html()
news.title = exlink %>% html_nodes(".ArticleHeader_headline") %>% html_text(trim = TRUE)
news.body = exlink %>% html_nodes(".StandardArticleBody_body p") %>% html_text(trim = TRUE) %>% paste0(collapse = "")
news.date = exlink %>% html_nodes(".ArticleHeader_date") %>% html_text(trim = TRUE) %>% str_trim() %>% str_split_fixed("/",3)
new = tibble("source" = "Reueter","Catergory" = topic, "date" = news.date[1],"time" = news.date[2] , news.title,news.body)
news = bind_rows(news,new)
}
print(c(i,"/",pages))
}
return(news)
}
# 목록에서 선택형
SC.reuter.t = function(){
reuter = c('businessNews','companyNews','wealth','topNews','domesticNews','worldNews')
x = as.numeric(readline("--카테고리를 선택하라 -- \n 1 : businessNews \n 2 : companyNews \n 3 : wealth \n 4 : topNews \n 5 : domesticNews \n 6 : worldNews \n :"))
pages = as.numeric(readline("가져올 페이지 수는 ? \n :"))
topic = reuter[x]
news = tibble()
for(i in 1:pages){
link = '//*[@id="content"]/section[2]/div/div[1]/section/section/div'
url = paste0("https://www.reuters.com/news/archive/",topic,"?view=page&page=",i,"&pageSize=10") %>%
read_html() %>%
html_nodes(xpath = link) %>%
html_nodes("a") %>%
html_attr("href") %>%
unique() # 중복된 링크 제거
for(n in 1:10){
exlink = paste0('https://www.reuters.com',url[n]) %>% read_html()
news.title = exlink %>% html_nodes(".ArticleHeader_headline") %>% html_text(trim = TRUE)
news.body = exlink %>% html_nodes(".StandardArticleBody_body p") %>% html_text(trim = TRUE) %>% paste0(collapse = "")
news.date = exlink %>% html_nodes(".ArticleHeader_date") %>% html_text(trim = TRUE) %>% str_trim() %>% str_split_fixed("/",3)
new = tibble("source" = "Reueter","Catergory" = topic, "date" = news.date[1],"time" = news.date[2] , news.title,news.body)
news = bind_rows(news,new)
}
print(c(i,"/",pages))
}
return(news)
}
setwd(choose.dir())
##
reuter = c('businessNews','companyNews','wealth','topNews','domesticNews','worldNews')
SC.reuter.save = function(topic,pages){
df = SC.reuter(topic,pages)
name = paste0("Reueter",Sys.time(),topic,pages,"csv")
write.csv(df,file = name)
print("완료!")
}
SC.reuter.save('businessNews',5000)
| /R/reueter.R | no_license | suime/Crawling | R | false | false | 4,165 | r | library(XML)
library(digest)
library(tibble)
library(httr)
library(rvest)
library(stringr)
library(dplyr)
library(tidyverse)
#####
#링크
link = '//*[@id="content"]/section[2]/div/div[1]/section/section/div'
url = "https://www.reuters.com/news/archive/businessnews?view=page&page=1&pageSize=10" %>%
read_html() %>%
html_nodes(xpath = link) %>%
html_nodes("a") %>%
html_attr("href") %>%
unique() # 중복된 링크 제거
ru = paste0('https://www.reuters.com',url[2]) %>% read_html() %>% html_nodes(".StandardArticleBody_body p") %>% html_text(trim = TRUE)
## 각 링크 찾아가기
news = tibble()
for(n in 1:10){
exlink = paste0('https://www.reuters.com',url[n]) %>% read_html()
news.title = exlink %>% html_nodes(".ArticleHeader_headline") %>% html_text(trim = TRUE)
news.body = exlink %>% html_nodes(".StandardArticleBody_body p") %>% html_text(trim = TRUE) %>% paste0(collapse = "")
news.date = exlink %>% html_nodes(".ArticleHeader_date") %>% html_text(trim = TRUE) %>% str_trim() %>% str_split_fixed("/",3)
new = tibble(news.date[1] , news.title,news.body)
news = bind_rows(news,new)
}
#####
#for
SC.reuter = function(topic,pages){
news = tibble()
for(i in 1:pages){
link = '//*[@id="content"]/section[2]/div/div[1]/section/section/div'
url = paste0("https://www.reuters.com/news/archive/",topic,"?view=page&page=",i,"&pageSize=10") %>%
read_html() %>%
html_nodes(xpath = link) %>%
html_nodes("a") %>%
html_attr("href") %>%
unique() # 중복된 링크 제거
for(n in 1:10){
exlink = paste0('https://www.reuters.com',url[n]) %>% read_html()
news.title = exlink %>% html_nodes(".ArticleHeader_headline") %>% html_text(trim = TRUE)
news.body = exlink %>% html_nodes(".StandardArticleBody_body p") %>% html_text(trim = TRUE) %>% paste0(collapse = "")
news.date = exlink %>% html_nodes(".ArticleHeader_date") %>% html_text(trim = TRUE) %>% str_trim() %>% str_split_fixed("/",3)
new = tibble("source" = "Reueter","Catergory" = topic, "date" = news.date[1],"time" = news.date[2] , news.title,news.body)
news = bind_rows(news,new)
}
print(c(i,"/",pages))
}
return(news)
}
# 목록에서 선택형
SC.reuter.t = function(){
reuter = c('businessNews','companyNews','wealth','topNews','domesticNews','worldNews')
x = as.numeric(readline("--카테고리를 선택하라 -- \n 1 : businessNews \n 2 : companyNews \n 3 : wealth \n 4 : topNews \n 5 : domesticNews \n 6 : worldNews \n :"))
pages = as.numeric(readline("가져올 페이지 수는 ? \n :"))
topic = reuter[x]
news = tibble()
for(i in 1:pages){
link = '//*[@id="content"]/section[2]/div/div[1]/section/section/div'
url = paste0("https://www.reuters.com/news/archive/",topic,"?view=page&page=",i,"&pageSize=10") %>%
read_html() %>%
html_nodes(xpath = link) %>%
html_nodes("a") %>%
html_attr("href") %>%
unique() # 중복된 링크 제거
for(n in 1:10){
exlink = paste0('https://www.reuters.com',url[n]) %>% read_html()
news.title = exlink %>% html_nodes(".ArticleHeader_headline") %>% html_text(trim = TRUE)
news.body = exlink %>% html_nodes(".StandardArticleBody_body p") %>% html_text(trim = TRUE) %>% paste0(collapse = "")
news.date = exlink %>% html_nodes(".ArticleHeader_date") %>% html_text(trim = TRUE) %>% str_trim() %>% str_split_fixed("/",3)
new = tibble("source" = "Reueter","Catergory" = topic, "date" = news.date[1],"time" = news.date[2] , news.title,news.body)
news = bind_rows(news,new)
}
print(c(i,"/",pages))
}
return(news)
}
setwd(choose.dir())
##
reuter = c('businessNews','companyNews','wealth','topNews','domesticNews','worldNews')
SC.reuter.save = function(topic,pages){
df = SC.reuter(topic,pages)
name = paste0("Reueter",Sys.time(),topic,pages,"csv")
write.csv(df,file = name)
print("완료!")
}
SC.reuter.save('businessNews',5000)
|
options(shiny.maxRequestSize=30*1024^2)
library(shiny)
library(leaflet)
library(dplyr)
# Define server that analyzes the patterns of crimes in DC
shinyServer(function(input, output,session) {
# Create an output variable for problem description
output$text <- renderText({
"This project uses the dataset 'DC Bike Accidents in 2012'. The dataset contains information for 2012 bicicle accidents, including criminal patterns in DC, including CCN, Report Date, Shift, Method, Offense, Block, Ward, ANC, District, PSA, Neighborhood Cluster, Block Group, Census Tract, Voting Precinct, Latitude, Longitude, Bid, Start Date, End Date, and Object ID. Question: How Do the Patterns of Crimes in 2017 Vary at Different Time Slots and in Different Police Districts of Washington, DC? To answer this question, we analyze the types of crimes, the methods of crimes, the report frequency at different hours, and create a map for visualization. This question is a great interest to police officials in DC."
})
output$toptable <- DT::renderDataTable({
df <- read.csv('/Users/shen_sun/Desktop/GWU_Shen/week7/shen_sun/leaflet-v6/DC_Bike_Accidents_2012.csv') %>%
group_by(Main_Street)%>%summarise(Frequency=sum(Injured))
action <- DT::dataTableAjax(session, df)
DT::datatable(df, options = list(ajax = list(url = action)), escape = FALSE)
})
# Create a descriptive table for different offenses
output$map <- renderLeaflet({
# Connect to the sidebar of file input
inFile <- input$file
if(is.null(inFile))
return(NULL)
# Read input file
mydata <- read.csv(inFile$datapath)
attach(mydata)
# handle data
mydata %>% select(-GeocodeError) -> mydata
mydata <- mydata[complete.cases(mydata),]
# Filter the data for different time slots and different districts
target1 <- c(input$Day)
target2 <- c(input$Quadrant)
map_df <- filter(mydata, Day %in% target1 & Quadrant %in% target2)
# cluster
temp <- mydata %>%
group_by(Quadrant) %>%
summarise(Injured = sum(Injured), lng=mean(Longitude), lat=mean(Latitude)) %>%
filter(Quadrant %in% target2)
if(!input$Circle){
temp <- temp[1,]
temp[1,] <- NA
}
#
# Create colors with a categorical color function
color <- colorFactor(rainbow(9), map_df$On_Street)
# Create the leaflet function for data
leaflet() %>%
# Set the default view
setView(lng = -77.0369, lat = 38.9072, zoom = 12) %>%
# Provide tiles
addProviderTiles("CartoDB.Positron", options = providerTileOptions(noWrap = TRUE)) %>%
# Add circles
addCircleMarkers(
radius = 3,
lng= map_df$Longitude,
lat= map_df$Latitude,
stroke= FALSE,
fillOpacity=4,
color=color(On_Street)
) %>%
# Add circles
addCircles(radius = temp$Injured*10,
lng = temp$lng,
lat = temp$lat,
stroke = T) %>%
# Add legends for different types of crime
addLegend(
"bottomleft",
pal=color,
values=mydata$On_Street,
opacity=0.5,
title="Type of Bike accidents"
)
})
}) | /server.R | no_license | SHENSun0610/R-project- | R | false | false | 3,271 | r |
options(shiny.maxRequestSize=30*1024^2)
library(shiny)
library(leaflet)
library(dplyr)
# Define server that analyzes the patterns of crimes in DC
shinyServer(function(input, output,session) {
# Create an output variable for problem description
output$text <- renderText({
"This project uses the dataset 'DC Bike Accidents in 2012'. The dataset contains information for 2012 bicicle accidents, including criminal patterns in DC, including CCN, Report Date, Shift, Method, Offense, Block, Ward, ANC, District, PSA, Neighborhood Cluster, Block Group, Census Tract, Voting Precinct, Latitude, Longitude, Bid, Start Date, End Date, and Object ID. Question: How Do the Patterns of Crimes in 2017 Vary at Different Time Slots and in Different Police Districts of Washington, DC? To answer this question, we analyze the types of crimes, the methods of crimes, the report frequency at different hours, and create a map for visualization. This question is a great interest to police officials in DC."
})
output$toptable <- DT::renderDataTable({
df <- read.csv('/Users/shen_sun/Desktop/GWU_Shen/week7/shen_sun/leaflet-v6/DC_Bike_Accidents_2012.csv') %>%
group_by(Main_Street)%>%summarise(Frequency=sum(Injured))
action <- DT::dataTableAjax(session, df)
DT::datatable(df, options = list(ajax = list(url = action)), escape = FALSE)
})
# Create a descriptive table for different offenses
output$map <- renderLeaflet({
# Connect to the sidebar of file input
inFile <- input$file
if(is.null(inFile))
return(NULL)
# Read input file
mydata <- read.csv(inFile$datapath)
attach(mydata)
# handle data
mydata %>% select(-GeocodeError) -> mydata
mydata <- mydata[complete.cases(mydata),]
# Filter the data for different time slots and different districts
target1 <- c(input$Day)
target2 <- c(input$Quadrant)
map_df <- filter(mydata, Day %in% target1 & Quadrant %in% target2)
# cluster
temp <- mydata %>%
group_by(Quadrant) %>%
summarise(Injured = sum(Injured), lng=mean(Longitude), lat=mean(Latitude)) %>%
filter(Quadrant %in% target2)
if(!input$Circle){
temp <- temp[1,]
temp[1,] <- NA
}
#
# Create colors with a categorical color function
color <- colorFactor(rainbow(9), map_df$On_Street)
# Create the leaflet function for data
leaflet() %>%
# Set the default view
setView(lng = -77.0369, lat = 38.9072, zoom = 12) %>%
# Provide tiles
addProviderTiles("CartoDB.Positron", options = providerTileOptions(noWrap = TRUE)) %>%
# Add circles
addCircleMarkers(
radius = 3,
lng= map_df$Longitude,
lat= map_df$Latitude,
stroke= FALSE,
fillOpacity=4,
color=color(On_Street)
) %>%
# Add circles
addCircles(radius = temp$Injured*10,
lng = temp$lng,
lat = temp$lat,
stroke = T) %>%
# Add legends for different types of crime
addLegend(
"bottomleft",
pal=color,
values=mydata$On_Street,
opacity=0.5,
title="Type of Bike accidents"
)
})
}) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{nytcovstate}
\alias{nytcovstate}
\title{NYT COVID-19 data for the US states, current as of Friday, May 15, 2020}
\format{
A tibble with 3,974 rows and 5 columns
\describe{
\item{date}{Date in YYYY-MM-DD format (date)}
\item{state}{State name (character)}
\item{fips}{State FIPS code (character)}
\item{cases}{Cumulative N reported cases}
\item{deaths}{Cumulative N reported deaths}
}
}
\source{
The New York Times \url{https://github.com/nytimes/covid-19-data}.
For details on the methods and limitations see \url{https://github.com/nytimes/covid-19-data}.
}
\usage{
nytcovstate
}
\description{
A dataset containing US state-level data on COVID-19, collected by the New York Times.
}
\keyword{datasets}
| /man/nytcovstate.Rd | permissive | AVCDRK/covdata | R | false | true | 810 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{nytcovstate}
\alias{nytcovstate}
\title{NYT COVID-19 data for the US states, current as of Friday, May 15, 2020}
\format{
A tibble with 3,974 rows and 5 columns
\describe{
\item{date}{Date in YYYY-MM-DD format (date)}
\item{state}{State name (character)}
\item{fips}{State FIPS code (character)}
\item{cases}{Cumulative N reported cases}
\item{deaths}{Cumulative N reported deaths}
}
}
\source{
The New York Times \url{https://github.com/nytimes/covid-19-data}.
For details on the methods and limitations see \url{https://github.com/nytimes/covid-19-data}.
}
\usage{
nytcovstate
}
\description{
A dataset containing US state-level data on COVID-19, collected by the New York Times.
}
\keyword{datasets}
|
example.peerquiz = function() {
setwd("D:/libraries/courserPeerquiz/peerquiz")
set.pq.opts()
pq = load.pq("p-value")
pq = load.pq("Kap1_Software_1")
responderid = "guest"
adf = pq.get.answers.df(pq=pq)
ans = select.guess.choices(adf, responderid = responderid)
pgu = set.pgu(new.pgu(pq=pq, ans=ans,responderid = responderid))
app = eventsApp()
app$ui = fluidPage(
pq.guess.headers(),
uiOutput("mainUI")
)
app$userid = paste0("Guest_", sample.int(1e6,1))
appInitHandler(function(...) {
set.pgu.ui("mainUI",pq=pq, pgu=pgu)
})
viewApp()
#view.html(ui=ui)
}
pq.get.answers.df = function(pq) {
restore.point("pq.get.answers.df")
adf = load.pq.answers(pq=pq)
db = get.pqdb()
gdf = dbGet(db,"pqguess",nlist(id=pq$id))
if (NROW(gdf)>0) {
sgdf = gdf %>%
mutate(userid=writerid) %>%
group_by(userid) %>%
summarize(num_guess = n())
adf = left_join(adf, sgdf, by="userid") %>%
mutate(num_guess = ifelse(is.na(num_guess),0,num_guess))
} else {
adf$num_guess = 0
}
adf
}
# select 4 choices for the responder
select.guess.choices = function(adf, responderid, n=4) {
restore.point("select.guess.choices")
adf$row = seq_len(NROW(adf))
sol = filter(adf, is.sol, userid != responderid)
ord = order(sol$num_guess + runif(NROW(sol),0,0.0001))
sol.row = sol$row[ord[1]]
ans = filter(adf, !is.sol, userid != responderid)
ord = order(ans$num_guess + runif(NROW(ans),0,0.0001))
ans.rows = ans$row[ord[1:(n-1)]]
rows = sample(c(sol.row,ans.rows),replace = FALSE)
adf[rows,]
}
# state of pgu for user: "no", "assigned", "submitted"
get.user.pgu.state = function(pq, userid, task.dir=pq.task.dir(pq)) {
file.name = digest(userid)
if (file.exists(file.path(task.dir,"pgu_submitted", file.name))) return("submitted")
if (file.exists(file.path(task.dir,"pgu_assigned", file.name))) return("assigned")
return("no")
}
new.pgu = function(pq,responderid, ans= if(!is.null(adf)) select.guess.choices(adf = adf, responderid=responderid), num.ans = NROW(ans), adf = NULL, state="assigned", ...) {
pgu = as.environment(list(id=pq$id,state=state,responderid=responderid, ans=ans, num.ans=num.ans, ans.div.id = paste0("ans-div-id-",seq_len(NROW(ans)),"-",pq$id)))
}
set.pgu = function(pgu, app=getApp()) {
if (is.null(app[["pgu.li"]]))
app$pgu.li = list()
app$pgu.li[[pgu$id]] = pgu
pgu
}
get.pgu = function(pq=NULL,id = pq$id, app=getApp()){
if (is.null(app[["pgu.li"]]))
app$pgu.li = list()
if (is.null(app$pgu.li[[id]]))
app$pgu.li[[id]] = new.pgu(pq=pq)
app$pgu.li[[id]]
}
set.pgu.ui = function(container.id,pq, pgu = NULL, edit = !isTRUE(pgu$state=="submitted"), show.sol=!edit) {
restore.point("set.pgu.ui")
ns = pq$ns
ans = pgu$ans
ui = pgu.ui(pq=pq,pgu = pgu, edit=edit)
if (edit) {
eventHandler("clickRankChange",id=pq$id,function(ranked,max_ranked, num_ranked, ...) {
restore.point("cr.clickRankChange")
ns = pq$ns
ranked = unlist(ranked)
if (length(ranked)>0) {
ranked = ranked+1
if (num_ranked == pgu$num.ans-1) {
ranked = unique(c(ranked,1:pgu$num.ans))
}
}
pgu$ranked = ranked
pgu.show.ans.ranking(pgu, pq)
})
callJS("newClickRank",id=pq$id,div_ids=pgu$ans.div.id,max_ranked=3)
buttonHandler(ns("submitGuessBtn"), function(...) {
pgu.submit(pq=pq, pgu=pgu)
})
} else {
# disable click event handler
eventHandler("clickRankChange",id=pq$id,function(...) {})
pgu.show.ans.ranking(pgu, pq)
ui = tagList(ui,
tags$script(HTML(pgu.show.sol(pgu,pq, return.js=TRUE)))
)
}
setUI(container.id,ui)
dsetUI(container.id,ui)
pgu
}
get.pgu.points = function(pgu, pq) {
if (length(pgu$ranked)==0) return(NULL)
sol.rank = which(pgu$ans$is.sol[pgu$ranked])
c(4,2,1,0)[sol.rank]
}
pgu.show.sol = function(pgu, pq, return.js = FALSE) {
restore.point("pgu.show.sol")
sol.ind = which(pgu$ans$is.sol)
if (length(sol.ind)==0) return()
id = pgu$ans.div.id[sol.ind]
if (return.js)
return(paste0('$("#',id,'").css({border:"4px solid #0000aa"});'))
setHtmlCSS(id=id, list(border="4px solid blue;"))
}
pgu.show.ans.ranking = function(pgu, pq, show.sol=isTRUE(pgu$state=="submitted"), show.explain=show.sol) {
restore.point("pgu.show.ans.ranking")
ranked = pgu$ranked
ns = pq$ns
labs = pq_string(pq$lang)
cat("\nRanking:",paste0(ranked, collapse=", "))
if (length(ranked)==0) {
str = labs$not_yet_ranked
} else {
str = paste0(seq_along(ranked), ": ",labs$Answer," ", ranked)
if (show.sol) {
rows = which(pgu$ans$is.sol[pgu$ranked])
points = get.pgu.points(pgu=pgu,pq=pq)
str[rows] = paste0('<font color="#0000aa">', str[rows],' (',labs$sample_sol,', ', points, ' ',labs$points,')</font>')
}
str = paste0(str, collapse="<br>")
}
ranking.ui = tagList(
h4(pq_string(pq$lang)$your_ranking,":"),
p(HTML(str)),
if (show.explain & !is.null(pq$explain_ui)) {
tagList(
h3(labs$explain),
pq$explain_ui
)
}
)
setUI(ns("ranking"), ranking.ui)
}
pgu.submit = function(pgu, pq,show.sol=TRUE,file.name = digest(pgu$responderid), show.msg =TRUE, ...) {
restore.point("pgu.submit")
ans = pgu$ans; ns = pq$ns;
if (length(pgu$ranked) < pgu$num.ans) {
timedMessage(pq$ns("pguAlert"), html=colored.html(pq_string(pq$lang)$not_all_ranked, color="#880000"))
return()
}
pgu$state = "submitted"
pgu$ranked
db = get.pqdb()
idf = data_frame(id=pq$id,writerid = ans$userid[pgu$ranked],responderid=pgu$responderid, rank=1:NROW(ans), numchoices=NROW(ans),guesstime=Sys.time())
dbInsert(db,"pqguess",idf)
dir = file.path(pq.task.dir(pq=pq),"pgu_submitted")
if (!dir.exists(dir))
dir.create(dir, recursive = TRUE)
#file.name = digest(pgu$responderid)
saveRDS(pgu, file.path(dir , file.name))
if (show.msg) {
timedMessage(pq$ns("pguAlert"), html=colored.html(pq_string(pq$lang)$guess_save_msg, color="#880000"),millis = Inf)
}
if (show.sol) {
shinyEvents::setHtmlHide(pq$ns("submitGuessBtn"))
pgu.show.ans.ranking(pgu, pq)
pgu.show.sol(pgu,pq)
}
}
pq.guess.headers = function() {
restore.point("pq.guess.headers")
www.path = system.file("www",package="peerquiz")
return(
htmlDependency('clickrank-css',version="1.0", src = system.file('www', package = 'courserPeerquiz'), stylesheet = 'clickrank.css',script = "clickrank.js"
)
)
tagList(
singleton(tags$head(includeScript(file.path(www.path,"clickrank.js")))),
singleton(tags$head(includeCSS(file.path(www.path,"clickrank.css"))))
)
}
pgu.ui = function(ans=pgu$ans,pq, pgu=get.pgu(pq), num.cols=2, add.header = TRUE, edit=TRUE) {
restore.point("pgu.ui")
ns = pq$ns
pgu$ans = ans
divs = lapply(seq_len(NROW(ans)), quiz.ans.div, pq=pq,pgu=pgu)
is.left = seq_along(divs)%%2 == 1
left = divs[is.left]
right = divs[!is.left]
if (length(right)<length(left)) right[[length(left)]] = ""
str = paste0('<tr><td valign="top" style="border: 0px solid #000000">',left,'</td><td valign="top" style="border: 0px solid #000000">',right,"</td></tr>")
tab = paste0('<table style="width: 100%; border-collapse:collapse;"><col width="50%"><col width="50%">', paste0(str, collapse="\n"),"</table>")
ui = withMathJaxNoHeader(tagList(
if (add.header) pq.guess.headers(),
HTML(pq$question_html),
h4(pq_string(pq$lang)$proposed_answers),
HTML(tab),
uiOutput(ns("ranking")),
uiOutput(ns("pguAlert")),
if (edit)
actionButton(ns("submitGuessBtn"),pq_string(pq$lang)$submitBtn)
))
ui
}
quiz.ans.div = function(ans.num=1, pq, pgu=get.pgu(pq)) {
restore.point("quiz.ans.div")
ans = pgu$ans[ans.num,]
id = pgu$ans.div.id[[ans.num]]
ui = div(id = id,style="margin:5px; border: 1px solid #000000; padding:10px;", class="clickable",
tags$h4(pq_string(pq$lang)$Answer, ans.num),
ans$answer.ui[[1]]
)
as.character(ui)
}
| /R/pq_guess.R | no_license | skranz/courserPeerquiz | R | false | false | 8,001 | r | example.peerquiz = function() {
setwd("D:/libraries/courserPeerquiz/peerquiz")
set.pq.opts()
pq = load.pq("p-value")
pq = load.pq("Kap1_Software_1")
responderid = "guest"
adf = pq.get.answers.df(pq=pq)
ans = select.guess.choices(adf, responderid = responderid)
pgu = set.pgu(new.pgu(pq=pq, ans=ans,responderid = responderid))
app = eventsApp()
app$ui = fluidPage(
pq.guess.headers(),
uiOutput("mainUI")
)
app$userid = paste0("Guest_", sample.int(1e6,1))
appInitHandler(function(...) {
set.pgu.ui("mainUI",pq=pq, pgu=pgu)
})
viewApp()
#view.html(ui=ui)
}
pq.get.answers.df = function(pq) {
restore.point("pq.get.answers.df")
adf = load.pq.answers(pq=pq)
db = get.pqdb()
gdf = dbGet(db,"pqguess",nlist(id=pq$id))
if (NROW(gdf)>0) {
sgdf = gdf %>%
mutate(userid=writerid) %>%
group_by(userid) %>%
summarize(num_guess = n())
adf = left_join(adf, sgdf, by="userid") %>%
mutate(num_guess = ifelse(is.na(num_guess),0,num_guess))
} else {
adf$num_guess = 0
}
adf
}
# select 4 choices for the responder
select.guess.choices = function(adf, responderid, n=4) {
restore.point("select.guess.choices")
adf$row = seq_len(NROW(adf))
sol = filter(adf, is.sol, userid != responderid)
ord = order(sol$num_guess + runif(NROW(sol),0,0.0001))
sol.row = sol$row[ord[1]]
ans = filter(adf, !is.sol, userid != responderid)
ord = order(ans$num_guess + runif(NROW(ans),0,0.0001))
ans.rows = ans$row[ord[1:(n-1)]]
rows = sample(c(sol.row,ans.rows),replace = FALSE)
adf[rows,]
}
# state of pgu for user: "no", "assigned", "submitted"
get.user.pgu.state = function(pq, userid, task.dir=pq.task.dir(pq)) {
file.name = digest(userid)
if (file.exists(file.path(task.dir,"pgu_submitted", file.name))) return("submitted")
if (file.exists(file.path(task.dir,"pgu_assigned", file.name))) return("assigned")
return("no")
}
new.pgu = function(pq,responderid, ans= if(!is.null(adf)) select.guess.choices(adf = adf, responderid=responderid), num.ans = NROW(ans), adf = NULL, state="assigned", ...) {
pgu = as.environment(list(id=pq$id,state=state,responderid=responderid, ans=ans, num.ans=num.ans, ans.div.id = paste0("ans-div-id-",seq_len(NROW(ans)),"-",pq$id)))
}
set.pgu = function(pgu, app=getApp()) {
if (is.null(app[["pgu.li"]]))
app$pgu.li = list()
app$pgu.li[[pgu$id]] = pgu
pgu
}
get.pgu = function(pq=NULL,id = pq$id, app=getApp()){
if (is.null(app[["pgu.li"]]))
app$pgu.li = list()
if (is.null(app$pgu.li[[id]]))
app$pgu.li[[id]] = new.pgu(pq=pq)
app$pgu.li[[id]]
}
set.pgu.ui = function(container.id,pq, pgu = NULL, edit = !isTRUE(pgu$state=="submitted"), show.sol=!edit) {
restore.point("set.pgu.ui")
ns = pq$ns
ans = pgu$ans
ui = pgu.ui(pq=pq,pgu = pgu, edit=edit)
if (edit) {
eventHandler("clickRankChange",id=pq$id,function(ranked,max_ranked, num_ranked, ...) {
restore.point("cr.clickRankChange")
ns = pq$ns
ranked = unlist(ranked)
if (length(ranked)>0) {
ranked = ranked+1
if (num_ranked == pgu$num.ans-1) {
ranked = unique(c(ranked,1:pgu$num.ans))
}
}
pgu$ranked = ranked
pgu.show.ans.ranking(pgu, pq)
})
callJS("newClickRank",id=pq$id,div_ids=pgu$ans.div.id,max_ranked=3)
buttonHandler(ns("submitGuessBtn"), function(...) {
pgu.submit(pq=pq, pgu=pgu)
})
} else {
# disable click event handler
eventHandler("clickRankChange",id=pq$id,function(...) {})
pgu.show.ans.ranking(pgu, pq)
ui = tagList(ui,
tags$script(HTML(pgu.show.sol(pgu,pq, return.js=TRUE)))
)
}
setUI(container.id,ui)
dsetUI(container.id,ui)
pgu
}
get.pgu.points = function(pgu, pq) {
if (length(pgu$ranked)==0) return(NULL)
sol.rank = which(pgu$ans$is.sol[pgu$ranked])
c(4,2,1,0)[sol.rank]
}
pgu.show.sol = function(pgu, pq, return.js = FALSE) {
restore.point("pgu.show.sol")
sol.ind = which(pgu$ans$is.sol)
if (length(sol.ind)==0) return()
id = pgu$ans.div.id[sol.ind]
if (return.js)
return(paste0('$("#',id,'").css({border:"4px solid #0000aa"});'))
setHtmlCSS(id=id, list(border="4px solid blue;"))
}
pgu.show.ans.ranking = function(pgu, pq, show.sol=isTRUE(pgu$state=="submitted"), show.explain=show.sol) {
restore.point("pgu.show.ans.ranking")
ranked = pgu$ranked
ns = pq$ns
labs = pq_string(pq$lang)
cat("\nRanking:",paste0(ranked, collapse=", "))
if (length(ranked)==0) {
str = labs$not_yet_ranked
} else {
str = paste0(seq_along(ranked), ": ",labs$Answer," ", ranked)
if (show.sol) {
rows = which(pgu$ans$is.sol[pgu$ranked])
points = get.pgu.points(pgu=pgu,pq=pq)
str[rows] = paste0('<font color="#0000aa">', str[rows],' (',labs$sample_sol,', ', points, ' ',labs$points,')</font>')
}
str = paste0(str, collapse="<br>")
}
ranking.ui = tagList(
h4(pq_string(pq$lang)$your_ranking,":"),
p(HTML(str)),
if (show.explain & !is.null(pq$explain_ui)) {
tagList(
h3(labs$explain),
pq$explain_ui
)
}
)
setUI(ns("ranking"), ranking.ui)
}
pgu.submit = function(pgu, pq,show.sol=TRUE,file.name = digest(pgu$responderid), show.msg =TRUE, ...) {
restore.point("pgu.submit")
ans = pgu$ans; ns = pq$ns;
if (length(pgu$ranked) < pgu$num.ans) {
timedMessage(pq$ns("pguAlert"), html=colored.html(pq_string(pq$lang)$not_all_ranked, color="#880000"))
return()
}
pgu$state = "submitted"
pgu$ranked
db = get.pqdb()
idf = data_frame(id=pq$id,writerid = ans$userid[pgu$ranked],responderid=pgu$responderid, rank=1:NROW(ans), numchoices=NROW(ans),guesstime=Sys.time())
dbInsert(db,"pqguess",idf)
dir = file.path(pq.task.dir(pq=pq),"pgu_submitted")
if (!dir.exists(dir))
dir.create(dir, recursive = TRUE)
#file.name = digest(pgu$responderid)
saveRDS(pgu, file.path(dir , file.name))
if (show.msg) {
timedMessage(pq$ns("pguAlert"), html=colored.html(pq_string(pq$lang)$guess_save_msg, color="#880000"),millis = Inf)
}
if (show.sol) {
shinyEvents::setHtmlHide(pq$ns("submitGuessBtn"))
pgu.show.ans.ranking(pgu, pq)
pgu.show.sol(pgu,pq)
}
}
pq.guess.headers = function() {
restore.point("pq.guess.headers")
www.path = system.file("www",package="peerquiz")
return(
htmlDependency('clickrank-css',version="1.0", src = system.file('www', package = 'courserPeerquiz'), stylesheet = 'clickrank.css',script = "clickrank.js"
)
)
tagList(
singleton(tags$head(includeScript(file.path(www.path,"clickrank.js")))),
singleton(tags$head(includeCSS(file.path(www.path,"clickrank.css"))))
)
}
pgu.ui = function(ans=pgu$ans,pq, pgu=get.pgu(pq), num.cols=2, add.header = TRUE, edit=TRUE) {
restore.point("pgu.ui")
ns = pq$ns
pgu$ans = ans
divs = lapply(seq_len(NROW(ans)), quiz.ans.div, pq=pq,pgu=pgu)
is.left = seq_along(divs)%%2 == 1
left = divs[is.left]
right = divs[!is.left]
if (length(right)<length(left)) right[[length(left)]] = ""
str = paste0('<tr><td valign="top" style="border: 0px solid #000000">',left,'</td><td valign="top" style="border: 0px solid #000000">',right,"</td></tr>")
tab = paste0('<table style="width: 100%; border-collapse:collapse;"><col width="50%"><col width="50%">', paste0(str, collapse="\n"),"</table>")
ui = withMathJaxNoHeader(tagList(
if (add.header) pq.guess.headers(),
HTML(pq$question_html),
h4(pq_string(pq$lang)$proposed_answers),
HTML(tab),
uiOutput(ns("ranking")),
uiOutput(ns("pguAlert")),
if (edit)
actionButton(ns("submitGuessBtn"),pq_string(pq$lang)$submitBtn)
))
ui
}
quiz.ans.div = function(ans.num=1, pq, pgu=get.pgu(pq)) {
restore.point("quiz.ans.div")
ans = pgu$ans[ans.num,]
id = pgu$ans.div.id[[ans.num]]
ui = div(id = id,style="margin:5px; border: 1px solid #000000; padding:10px;", class="clickable",
tags$h4(pq_string(pq$lang)$Answer, ans.num),
ans$answer.ui[[1]]
)
as.character(ui)
}
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
har_agg <- function(RM, periods, iNperiods) {
.Call(`_highfrequency_har_agg`, RM, periods, iNperiods)
}
heavy_parameter_transformR_ <- function(parameters, K, p, q, O, A, B, pMax1, qMax1) {
.Call(`_highfrequency_heavy_parameter_transformR_`, parameters, K, p, q, O, A, B, pMax1, qMax1)
}
heavy_parameter_transform_RetrackR_ <- function(parameters, K, p, q, means, O, A, B, pMax1, qMax1) {
.Call(`_highfrequency_heavy_parameter_transform_RetrackR_`, parameters, K, p, q, means, O, A, B, pMax1, qMax1)
}
heavy_likelihoodR_ <- function(h, O, A, B, TT, K, pMax, qMax, data, backcast, LB, UB, llRM, lls) {
.Call(`_highfrequency_heavy_likelihoodR_`, h, O, A, B, TT, K, pMax, qMax, data, backcast, LB, UB, llRM, lls)
}
nsmaller <- function(times, lengths, start, end, max) {
.Call(`_highfrequency_nsmaller`, times, lengths, start, end, max)
}
KK <- function(x, type) {
.Call(`_highfrequency_KK`, x, type)
}
kernelEstimator <- function(a, b, na, q, adj, type, ab, ab2) {
.Call(`_highfrequency_kernelEstimator`, a, b, na, q, adj, type, ab, ab2)
}
rv <- function(a, b, na, period, tmpa, tmpb, tmpna) {
.Call(`_highfrequency_rv`, a, b, na, period, tmpa, tmpb, tmpna)
}
pcovcc <- function(a, ap, b, at, atp, bt, na, nap, nb, period) {
.Call(`_highfrequency_pcovcc`, a, ap, b, at, atp, bt, na, nap, nb, period)
}
| /R/RcppExports.R | no_license | junfanz1/highfrequency | R | false | false | 1,472 | r | # Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
har_agg <- function(RM, periods, iNperiods) {
.Call(`_highfrequency_har_agg`, RM, periods, iNperiods)
}
heavy_parameter_transformR_ <- function(parameters, K, p, q, O, A, B, pMax1, qMax1) {
.Call(`_highfrequency_heavy_parameter_transformR_`, parameters, K, p, q, O, A, B, pMax1, qMax1)
}
heavy_parameter_transform_RetrackR_ <- function(parameters, K, p, q, means, O, A, B, pMax1, qMax1) {
.Call(`_highfrequency_heavy_parameter_transform_RetrackR_`, parameters, K, p, q, means, O, A, B, pMax1, qMax1)
}
heavy_likelihoodR_ <- function(h, O, A, B, TT, K, pMax, qMax, data, backcast, LB, UB, llRM, lls) {
.Call(`_highfrequency_heavy_likelihoodR_`, h, O, A, B, TT, K, pMax, qMax, data, backcast, LB, UB, llRM, lls)
}
nsmaller <- function(times, lengths, start, end, max) {
.Call(`_highfrequency_nsmaller`, times, lengths, start, end, max)
}
KK <- function(x, type) {
.Call(`_highfrequency_KK`, x, type)
}
kernelEstimator <- function(a, b, na, q, adj, type, ab, ab2) {
.Call(`_highfrequency_kernelEstimator`, a, b, na, q, adj, type, ab, ab2)
}
rv <- function(a, b, na, period, tmpa, tmpb, tmpna) {
.Call(`_highfrequency_rv`, a, b, na, period, tmpa, tmpb, tmpna)
}
pcovcc <- function(a, ap, b, at, atp, bt, na, nap, nb, period) {
.Call(`_highfrequency_pcovcc`, a, ap, b, at, atp, bt, na, nap, nb, period)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SnB.R
\name{SnB}
\alias{SnB}
\title{Cramer-von Mises statistic SnB for GOF based on the Rosenblatt transform}
\usage{
SnB(E)
}
\arguments{
\item{E}{(n x d) matrix of pseudos-observations (normalized ranks)}
}
\value{
\item{Sn}{Cramer-von Mises statistic}
}
\description{
This function computes the Cramer-von Mises statistic SnB for GOF based on the Rosenblatt transform
}
| /man/SnB.Rd | no_license | cran/HMMcopula | R | false | true | 468 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SnB.R
\name{SnB}
\alias{SnB}
\title{Cramer-von Mises statistic SnB for GOF based on the Rosenblatt transform}
\usage{
SnB(E)
}
\arguments{
\item{E}{(n x d) matrix of pseudos-observations (normalized ranks)}
}
\value{
\item{Sn}{Cramer-von Mises statistic}
}
\description{
This function computes the Cramer-von Mises statistic SnB for GOF based on the Rosenblatt transform
}
|
#'
#'
#' A script to analyse genomic distance distribution of gene pairs that encode
#' for proteins that have direct protein-protein interactinos (PPI).
#'
#'
require(stringr) # for some string functionality
require(biomaRt) # to retrieve human paralogs from Ensembl
require(TxDb.Hsapiens.UCSC.hg19.knownGene)
require(ggplot2)
require(GenomicRanges)
require(rtracklayer) # to parse .bed files
require(plyr)
# set some parameters:
# to download thies files, run the script data/download.sh
HIPPIE_SCORE_TH <- 0.72
HIPPIE_FILE <- "data/HIPPIE/hippie_current.txt"
N_RAND = 10
TAD_FILE <- "data/Rao2014/GSE63525_IMR90_Arrowhead_domainlist.txt.bed"
outPrefix <- "results/PPI_genomics"
# create directory, if not exist
dir.create(dirname(outPrefix), recursive=TRUE, showWarnings = FALSE)
#' Add linear distance between genes.
#'
#' Distance is measured from start of each region and reproted in kilobaes. If
#' the genes are on different chromosome, NA is reported.
#'
#' @param genePair a \code{data.frames} where each row is a gene pair with the
#' first columns holding gnee IDs
#' @param tssGR a \code{\link{GRanges}} object with genes. The names should
#' match the gene ids in \code{genePairs}.
#' @return a \code{data.frame} with with the same data as \code{genePair} but
#' with an additional column \code{dist} holding the pairwise distances in kb.
addPairDistKb <- function(genePairs, tssGR){
# get chromosomes of gene pairs
chr1 <- as.character(seqnames(tssGR[genePairs[,1]]))
chr2 <- as.character(seqnames(tssGR[genePairs[,2]]))
sameChrom <- chr1 == chr2
s1 = start(tssGR[genePairs[,1]])
s2 = start(tssGR[genePairs[,2]])
# add a new column "dist" to the data.frame
genePairs[, "dist"] = ifelse(sameChrom, abs(s2-s1)/1000, NA)
return(genePairs)
}
#=======================================================================
# Analyse genomic distance distribution of PPI and non PPI gene pairs
#=======================================================================
#-------------------------------------------------------------------
# get tssGR for ENSG
#-------------------------------------------------------------------
seqInfo <- seqinfo(TxDb.Hsapiens.UCSC.hg19.knownGene)
ensemblGRCh37 <- useMart(host="grch37.ensembl.org", biomart="ENSEMBL_MART_ENSEMBL",dataset="hsapiens_gene_ensembl", verbose=FALSE)
geneAttributes = c("ensembl_gene_id", "hgnc_symbol", "chromosome_name", "start_position", "end_position", "strand", "status", "gene_biotype")
geneFilters="chromosome_name"
# read "normal" human chromosome names (without fixes and patches)
geneValues=c(1:22, "X", "Y")
allGenes = getBM(attributes=geneAttributes, mart=ensemblGRCh37, filters=geneFilters, values=geneValues)
# unique gene entry by ENSG ID symbol:
genes = allGenes[!duplicated(allGenes$ensembl_gene_id),]
# make GRanges object for all known prot coding genes
tssGR = GRanges(
paste0("chr", genes$chromosome_name),
IRanges(genes$start_position, genes$start_position),
strand = ifelse(genes$strand == 1, '+', '-'),
names = genes$ensembl_gene_id,
genes[,c("hgnc_symbol", "status", "gene_biotype")],
seqinfo=seqInfo
)
names(tssGR) = genes$ensembl_gene_id
tssGR <- sort(tssGR)
#-------------------------------------------------------------------
# get mapping of entrez IDs to ENGS from ENSEMBL
#-------------------------------------------------------------------
entrezAttributes <- c("entrezgene", "ensembl_gene_id")
entrezFilters <- c("chromosome_name", "with_entrezgene")
entrezValues <- list("chromosome_name"=c(1:22, "X", "Y"), with_entrezgene=TRUE)
entrezToEnsgDF = getBM(attributes=entrezAttributes, mart=ensemblGRCh37, filters=entrezFilters, values=entrezValues)
# take only unique entrez IDs
entrezToEnsgDF <- entrezToEnsgDF[!duplicated(entrezToEnsgDF$entrezgene),]
#-----------------------------------------------------------------------
# Parse HIPPIE
#-----------------------------------------------------------------------
hippieDF <- read.table(HIPPIE_FILE, header=FALSE, sep="\t", quote="")
# get index in mapping table for each entrez gene ID in HIPPIE
idxG1 <- match(as.character(hippieDF[,2]), entrezToEnsgDF$entrezgene)
idxG2 <- match(as.character(hippieDF[,4]), entrezToEnsgDF$entrezgene)
hippie <- data.frame(
g1_ENSG = entrezToEnsgDF$ensembl_gene_id[idxG1],
g2_ENSG = entrezToEnsgDF$ensembl_gene_id[idxG2],
symbol1 = str_split_fixed(as.character(hippieDF[,1]), "_", 2)[,1],
symbol2 = str_split_fixed(as.character(hippieDF[,3]), "_", 2)[,1],
score = hippieDF[,5],
stringsAsFactors=FALSE)
message("INFO: After parsing: ", nrow(hippie))
# filter out interactions that could not be mapped to ENSG
hippie <- hippie[!is.na(hippie[,1]) & !is.na(hippie[,2]),]
message("INFO: After ENSG mapping: ", nrow(hippie))
# filter out interaction bellow score threshold
hippie <- hippie[hippie$score >= HIPPIE_SCORE_TH,]
#-----------------------------------------------------------------------
# generate random interaction network
#-----------------------------------------------------------------------
randNet <- hippie[rep(1:nrow(hippie), N_RAND) ,c("g1_ENSG", "g2_ENSG", "score")]
randNet[,2] <- sample(randNet[,2])
#-----------------------------------------------------------------------
# combine HIPPIE and random interactions
#-----------------------------------------------------------------------
pairsDF <- rbind(
hippie[,c("g1_ENSG", "g2_ENSG", "score")],
randNet
)
pairsDF$group <- rep(c("PPI", "shuffled"), c(nrow(hippie), nrow(randNet)))
pairsDF$replicate <- rep(c(1, 1:N_RAND), each=nrow(hippie))
message("INFO: After filtering score >= ", HIPPIE_SCORE_TH, " : ", sum(pairsDF$group == "PPI"), " and shuffled: ",sum(pairsDF$group == "shuffled"))
#-----------------------------------------------------------------------
# Annotate gene pairs with genomic distance and filter for same chrom.
#-----------------------------------------------------------------------
# add distance
pairsDF <- addPairDistKb(pairsDF, tssGR)
# filter for pairs on same chromosome (with dist != NA)
pairsDF <- pairsDF[!is.na(pairsDF$dist),]
# add distance bins
# breaksCis <- seq(0, 1000, 50)
breaksCis <- seq(0, 1000, 100)
pairsDF$distBin <- as.factor(breaksCis[.bincode(pairsDF$dist, breaksCis)])
message("INFO: After filtering out different chromosomes : ",
sum(pairsDF$group == "PPI"), " and shuffled: ",
sum(pairsDF$group == "shuffled"))
message("INFO: PPI pairs with dist==0: ", sum(pairsDF$group == "PPI" & pairsDF$dist == 0))
message("INFO: PPI pairs with same ID: ", sum(pairsDF$group == "PPI" & pairsDF[,1] == pairsDF[,2]))
# filter out pairs with same ID
pairsDF <- pairsDF[!pairsDF[,1] == pairsDF[,2],]
message("INFO: After filtering out homo-dimers (pairs with same ID): ", sum(pairsDF$group == "PPI"), " and shuffled: ",sum(pairsDF$group == "shuffled"))
pairsDF <- pairsDF[pairsDF$dist <= 1000,]
message("INFO: After filtering distance <= 1000kb: ", sum(pairsDF$group == "PPI"), " and shuffled: ",sum(pairsDF$group == "shuffled"))
#-----------------------------------------------------------------------
# annotate to be in same TAD
#-----------------------------------------------------------------------
getPairAsGR <- function(genePairs, tssGR){
# get chromosomes of gene pairs
chrom = seqnames(tssGR[genePairs[,1]])
s1 = start(tssGR[genePairs[,1]])
s2 = start(tssGR[genePairs[,2]])
up = apply(cbind(s1, s2), 1, min)
down = apply(cbind(s1, s2), 1, max)
GR = GRanges(chrom, IRanges(up, down))
# add gene IDs and other annotations
mcols(GR) = genePairs
return(GR)
}
#-----------------------------------------------------------------------
# add column to indicate that query lies within at least one subject object
#-----------------------------------------------------------------------
addWithinSubject <- function(query, subject, colName="inRegion"){
mcols(query)[, colName] = countOverlaps(query, subject, type="within") >= 1
return(query)
}
#-----------------------------------------------------------------------
# parse TADs from Rao et al.
#-----------------------------------------------------------------------
# parse TADs from bed file
tadGR <- import(TAD_FILE, seqinfo=seqInfo)
# get gene-pair spanning regions as GRanges
pairsGR <- getPairAsGR(pairsDF, tssGR)
# check overlap of gnee-pair spanning region is within any TAD
pairsDF$inTAD <- countOverlaps(pairsGR, tadGR, type="within") >= 1
pairsDF$inTAD <- factor(pairsDF$inTAD, c(TRUE, FALSE), c("Same TAD", "Not same TAD"))
#===============================================================================
# plot geomic distance distribution
#===============================================================================
# compute p-value for distance difference between HIPPIE and shuffled
pVal <- wilcox.test(dist ~ group, data=pairsDF)$p.value
p <- ggplot(pairsDF, aes(dist, ..density.., fill=group, color=group)) +
geom_histogram(binwidth=50, alpha=.5, position="identity") +
labs(title=paste("p =", signif(pVal, 3)), x="Genomic distance [kb]") +
theme_bw()
ggsave(p, file=paste0(outPrefix, ".hippie_genomic_distance.v03.hist.pdf"), w=7, h=3.5)
p <- p + facet_grid(inTAD~., margins=TRUE, scales="free_y")
ggsave(p, file=paste0(outPrefix, ".hippie_genomic_distance.v03.hist.byTAD.pdf"), w=7, h=7)
#===============================================================================
# plot percet of pairs in same TAD
#===============================================================================
repDF <- ddply(pairsDF, .(group, replicate), summarize,
N = length(inTAD),
n = sum(inTAD=="Same TAD"),
percent = n/N*100
)
groupDF <- ddply(repDF, .(group), summarize,
mean = mean(percent),
sd = sd(percent)
)
pval <- fisher.test(pairsDF$inTAD, pairsDF$group)$p.value
p <- ggplot(groupDF, aes(x=group, y=mean, ymax=mean+sd, ymin=mean-sd, fill=group)) +
geom_errorbar(width=.25) +
geom_bar(stat="identity", color="black") +
geom_text(aes(label=round(mean, 2)), vjust=1.5) +
geom_text(aes(y=1.1*max(mean), x=1.5, label=paste0("p=", signif(pval, 3)))) +
labs(x="", y="Gene pairs in same TAD [%]") +
theme_bw()
ggsave(p, file=paste0(outPrefix, ".hippie_genomic_distance.v03.inTAD_by_group.barplot.pdf"), w=3.7, h=7)
#===============================================================================
# plot percet of pairs in same TAD by distance bins
#===============================================================================
binDF <- ddply(pairsDF, .(group, distBin), summarize,
N = length(inTAD),
n = sum(inTAD=="Same TAD"),
percent = n/N*100
)
p <- ggplot(binDF, aes(x=distBin, y=percent, fill=group, color=group)) +
geom_bar(stat="identity", position="dodge", alpha=.5) +
labs(x="Genomic distance [kb]", y="Gene pairs in same TAD [%]") +
theme_bw()
ggsave(p, file=paste0(outPrefix, ".hippie_genomic_distance.v03.inTAD_by_distBin.barplot.pdf"), w=7, h=3.5)
| /R/genomic_distance_of_PPI.R | no_license | ibn-salem/PPIgenomics | R | false | false | 11,152 | r | #'
#'
#' A script to analyse genomic distance distribution of gene pairs that encode
#' for proteins that have direct protein-protein interactinos (PPI).
#'
#'
require(stringr) # for some string functionality
require(biomaRt) # to retrieve human paralogs from Ensembl
require(TxDb.Hsapiens.UCSC.hg19.knownGene)
require(ggplot2)
require(GenomicRanges)
require(rtracklayer) # to parse .bed files
require(plyr)
# set some parameters:
# to download thies files, run the script data/download.sh
HIPPIE_SCORE_TH <- 0.72
HIPPIE_FILE <- "data/HIPPIE/hippie_current.txt"
N_RAND = 10
TAD_FILE <- "data/Rao2014/GSE63525_IMR90_Arrowhead_domainlist.txt.bed"
outPrefix <- "results/PPI_genomics"
# create directory, if not exist
dir.create(dirname(outPrefix), recursive=TRUE, showWarnings = FALSE)
#' Add linear distance between genes.
#'
#' Distance is measured from start of each region and reproted in kilobaes. If
#' the genes are on different chromosome, NA is reported.
#'
#' @param genePair a \code{data.frames} where each row is a gene pair with the
#' first columns holding gnee IDs
#' @param tssGR a \code{\link{GRanges}} object with genes. The names should
#' match the gene ids in \code{genePairs}.
#' @return a \code{data.frame} with with the same data as \code{genePair} but
#' with an additional column \code{dist} holding the pairwise distances in kb.
addPairDistKb <- function(genePairs, tssGR){
# get chromosomes of gene pairs
chr1 <- as.character(seqnames(tssGR[genePairs[,1]]))
chr2 <- as.character(seqnames(tssGR[genePairs[,2]]))
sameChrom <- chr1 == chr2
s1 = start(tssGR[genePairs[,1]])
s2 = start(tssGR[genePairs[,2]])
# add a new column "dist" to the data.frame
genePairs[, "dist"] = ifelse(sameChrom, abs(s2-s1)/1000, NA)
return(genePairs)
}
#=======================================================================
# Analyse genomic distance distribution of PPI and non PPI gene pairs
#=======================================================================
#-------------------------------------------------------------------
# get tssGR for ENSG
#-------------------------------------------------------------------
seqInfo <- seqinfo(TxDb.Hsapiens.UCSC.hg19.knownGene)
ensemblGRCh37 <- useMart(host="grch37.ensembl.org", biomart="ENSEMBL_MART_ENSEMBL",dataset="hsapiens_gene_ensembl", verbose=FALSE)
geneAttributes = c("ensembl_gene_id", "hgnc_symbol", "chromosome_name", "start_position", "end_position", "strand", "status", "gene_biotype")
geneFilters="chromosome_name"
# read "normal" human chromosome names (without fixes and patches)
geneValues=c(1:22, "X", "Y")
allGenes = getBM(attributes=geneAttributes, mart=ensemblGRCh37, filters=geneFilters, values=geneValues)
# unique gene entry by ENSG ID symbol:
genes = allGenes[!duplicated(allGenes$ensembl_gene_id),]
# make GRanges object for all known prot coding genes
tssGR = GRanges(
paste0("chr", genes$chromosome_name),
IRanges(genes$start_position, genes$start_position),
strand = ifelse(genes$strand == 1, '+', '-'),
names = genes$ensembl_gene_id,
genes[,c("hgnc_symbol", "status", "gene_biotype")],
seqinfo=seqInfo
)
names(tssGR) = genes$ensembl_gene_id
tssGR <- sort(tssGR)
#-------------------------------------------------------------------
# get mapping of entrez IDs to ENGS from ENSEMBL
#-------------------------------------------------------------------
entrezAttributes <- c("entrezgene", "ensembl_gene_id")
entrezFilters <- c("chromosome_name", "with_entrezgene")
entrezValues <- list("chromosome_name"=c(1:22, "X", "Y"), with_entrezgene=TRUE)
entrezToEnsgDF = getBM(attributes=entrezAttributes, mart=ensemblGRCh37, filters=entrezFilters, values=entrezValues)
# take only unique entrez IDs
entrezToEnsgDF <- entrezToEnsgDF[!duplicated(entrezToEnsgDF$entrezgene),]
#-----------------------------------------------------------------------
# Parse HIPPIE
#-----------------------------------------------------------------------
hippieDF <- read.table(HIPPIE_FILE, header=FALSE, sep="\t", quote="")
# get index in mapping table for each entrez gene ID in HIPPIE
idxG1 <- match(as.character(hippieDF[,2]), entrezToEnsgDF$entrezgene)
idxG2 <- match(as.character(hippieDF[,4]), entrezToEnsgDF$entrezgene)
hippie <- data.frame(
g1_ENSG = entrezToEnsgDF$ensembl_gene_id[idxG1],
g2_ENSG = entrezToEnsgDF$ensembl_gene_id[idxG2],
symbol1 = str_split_fixed(as.character(hippieDF[,1]), "_", 2)[,1],
symbol2 = str_split_fixed(as.character(hippieDF[,3]), "_", 2)[,1],
score = hippieDF[,5],
stringsAsFactors=FALSE)
message("INFO: After parsing: ", nrow(hippie))
# filter out interactions that could not be mapped to ENSG
hippie <- hippie[!is.na(hippie[,1]) & !is.na(hippie[,2]),]
message("INFO: After ENSG mapping: ", nrow(hippie))
# filter out interaction bellow score threshold
hippie <- hippie[hippie$score >= HIPPIE_SCORE_TH,]
#-----------------------------------------------------------------------
# generate random interaction network
#-----------------------------------------------------------------------
randNet <- hippie[rep(1:nrow(hippie), N_RAND) ,c("g1_ENSG", "g2_ENSG", "score")]
randNet[,2] <- sample(randNet[,2])
#-----------------------------------------------------------------------
# combine HIPPIE and random interactions
#-----------------------------------------------------------------------
pairsDF <- rbind(
hippie[,c("g1_ENSG", "g2_ENSG", "score")],
randNet
)
pairsDF$group <- rep(c("PPI", "shuffled"), c(nrow(hippie), nrow(randNet)))
pairsDF$replicate <- rep(c(1, 1:N_RAND), each=nrow(hippie))
message("INFO: After filtering score >= ", HIPPIE_SCORE_TH, " : ", sum(pairsDF$group == "PPI"), " and shuffled: ",sum(pairsDF$group == "shuffled"))
#-----------------------------------------------------------------------
# Annotate gene pairs with genomic distance and filter for same chrom.
#-----------------------------------------------------------------------
# add distance
pairsDF <- addPairDistKb(pairsDF, tssGR)
# filter for pairs on same chromosome (with dist != NA)
pairsDF <- pairsDF[!is.na(pairsDF$dist),]
# add distance bins
# breaksCis <- seq(0, 1000, 50)
breaksCis <- seq(0, 1000, 100)
pairsDF$distBin <- as.factor(breaksCis[.bincode(pairsDF$dist, breaksCis)])
message("INFO: After filtering out different chromosomes : ",
sum(pairsDF$group == "PPI"), " and shuffled: ",
sum(pairsDF$group == "shuffled"))
message("INFO: PPI pairs with dist==0: ", sum(pairsDF$group == "PPI" & pairsDF$dist == 0))
message("INFO: PPI pairs with same ID: ", sum(pairsDF$group == "PPI" & pairsDF[,1] == pairsDF[,2]))
# filter out pairs with same ID
pairsDF <- pairsDF[!pairsDF[,1] == pairsDF[,2],]
message("INFO: After filtering out homo-dimers (pairs with same ID): ", sum(pairsDF$group == "PPI"), " and shuffled: ",sum(pairsDF$group == "shuffled"))
pairsDF <- pairsDF[pairsDF$dist <= 1000,]
message("INFO: After filtering distance <= 1000kb: ", sum(pairsDF$group == "PPI"), " and shuffled: ",sum(pairsDF$group == "shuffled"))
#-----------------------------------------------------------------------
# annotate to be in same TAD
#-----------------------------------------------------------------------
getPairAsGR <- function(genePairs, tssGR){
# get chromosomes of gene pairs
chrom = seqnames(tssGR[genePairs[,1]])
s1 = start(tssGR[genePairs[,1]])
s2 = start(tssGR[genePairs[,2]])
up = apply(cbind(s1, s2), 1, min)
down = apply(cbind(s1, s2), 1, max)
GR = GRanges(chrom, IRanges(up, down))
# add gene IDs and other annotations
mcols(GR) = genePairs
return(GR)
}
#-----------------------------------------------------------------------
# add column to indicate that query lies within at least one subject object
#-----------------------------------------------------------------------
addWithinSubject <- function(query, subject, colName="inRegion"){
mcols(query)[, colName] = countOverlaps(query, subject, type="within") >= 1
return(query)
}
#-----------------------------------------------------------------------
# parse TADs from Rao et al.
#-----------------------------------------------------------------------
# parse TADs from bed file
tadGR <- import(TAD_FILE, seqinfo=seqInfo)
# get gene-pair spanning regions as GRanges
pairsGR <- getPairAsGR(pairsDF, tssGR)
# check overlap of gnee-pair spanning region is within any TAD
pairsDF$inTAD <- countOverlaps(pairsGR, tadGR, type="within") >= 1
pairsDF$inTAD <- factor(pairsDF$inTAD, c(TRUE, FALSE), c("Same TAD", "Not same TAD"))
#===============================================================================
# plot geomic distance distribution
#===============================================================================
# compute p-value for distance difference between HIPPIE and shuffled
pVal <- wilcox.test(dist ~ group, data=pairsDF)$p.value
p <- ggplot(pairsDF, aes(dist, ..density.., fill=group, color=group)) +
geom_histogram(binwidth=50, alpha=.5, position="identity") +
labs(title=paste("p =", signif(pVal, 3)), x="Genomic distance [kb]") +
theme_bw()
ggsave(p, file=paste0(outPrefix, ".hippie_genomic_distance.v03.hist.pdf"), w=7, h=3.5)
p <- p + facet_grid(inTAD~., margins=TRUE, scales="free_y")
ggsave(p, file=paste0(outPrefix, ".hippie_genomic_distance.v03.hist.byTAD.pdf"), w=7, h=7)
#===============================================================================
# plot percet of pairs in same TAD
#===============================================================================
repDF <- ddply(pairsDF, .(group, replicate), summarize,
N = length(inTAD),
n = sum(inTAD=="Same TAD"),
percent = n/N*100
)
groupDF <- ddply(repDF, .(group), summarize,
mean = mean(percent),
sd = sd(percent)
)
pval <- fisher.test(pairsDF$inTAD, pairsDF$group)$p.value
p <- ggplot(groupDF, aes(x=group, y=mean, ymax=mean+sd, ymin=mean-sd, fill=group)) +
geom_errorbar(width=.25) +
geom_bar(stat="identity", color="black") +
geom_text(aes(label=round(mean, 2)), vjust=1.5) +
geom_text(aes(y=1.1*max(mean), x=1.5, label=paste0("p=", signif(pval, 3)))) +
labs(x="", y="Gene pairs in same TAD [%]") +
theme_bw()
ggsave(p, file=paste0(outPrefix, ".hippie_genomic_distance.v03.inTAD_by_group.barplot.pdf"), w=3.7, h=7)
#===============================================================================
# plot percet of pairs in same TAD by distance bins
#===============================================================================
binDF <- ddply(pairsDF, .(group, distBin), summarize,
N = length(inTAD),
n = sum(inTAD=="Same TAD"),
percent = n/N*100
)
p <- ggplot(binDF, aes(x=distBin, y=percent, fill=group, color=group)) +
geom_bar(stat="identity", position="dodge", alpha=.5) +
labs(x="Genomic distance [kb]", y="Gene pairs in same TAD [%]") +
theme_bw()
ggsave(p, file=paste0(outPrefix, ".hippie_genomic_distance.v03.inTAD_by_distBin.barplot.pdf"), w=7, h=3.5)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BIEN.R
\name{BIEN_trait_family}
\alias{BIEN_trait_family}
\title{Download trait data for given families.}
\usage{
BIEN_trait_family(family, all.taxonomy = FALSE,
political.boundaries = FALSE, source.citation = F, ...)
}
\arguments{
\item{family}{A single family or a vector of families.}
\item{all.taxonomy}{Should full taxonomic information and TNRS output be returned? Default is FALSE.}
\item{political.boundaries}{Should political boundary information (country, state, etc.) be returned? Default is FALSE.}
\item{source.citation}{Should readable source information be downloaded for each record? Note that \code{\link{BIEN_metadata_citation}} may be more useful.}
\item{...}{Additional arguments passed to internal functions.}
}
\value{
A dataframe of all data matching the specified families.
}
\description{
BIEN_trait_family extracts all trait data for the specified families.
}
\examples{
\dontrun{
BIEN_trait_family("Poaceae")
family_vector<-c("Poaceae","Orchidaceae")
BIEN_trait_family(family_vector)}
}
\seealso{
Other trait functions: \code{\link{BIEN_trait_list}},
\code{\link{BIEN_trait_mean}},
\code{\link{BIEN_trait_species}},
\code{\link{BIEN_trait_traitbyfamily}},
\code{\link{BIEN_trait_traitbygenus}},
\code{\link{BIEN_trait_traitbyspecies}},
\code{\link{BIEN_trait_traits_per_species}},
\code{\link{BIEN_trait_trait}}
}
| /BIEN/man/BIEN_trait_family.Rd | no_license | naturalis/RBIEN | R | false | true | 1,442 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BIEN.R
\name{BIEN_trait_family}
\alias{BIEN_trait_family}
\title{Download trait data for given families.}
\usage{
BIEN_trait_family(family, all.taxonomy = FALSE,
political.boundaries = FALSE, source.citation = F, ...)
}
\arguments{
\item{family}{A single family or a vector of families.}
\item{all.taxonomy}{Should full taxonomic information and TNRS output be returned? Default is FALSE.}
\item{political.boundaries}{Should political boundary information (country, state, etc.) be returned? Default is FALSE.}
\item{source.citation}{Should readable source information be downloaded for each record? Note that \code{\link{BIEN_metadata_citation}} may be more useful.}
\item{...}{Additional arguments passed to internal functions.}
}
\value{
A dataframe of all data matching the specified families.
}
\description{
BIEN_trait_family extracts all trait data for the specified families.
}
\examples{
\dontrun{
BIEN_trait_family("Poaceae")
family_vector<-c("Poaceae","Orchidaceae")
BIEN_trait_family(family_vector)}
}
\seealso{
Other trait functions: \code{\link{BIEN_trait_list}},
\code{\link{BIEN_trait_mean}},
\code{\link{BIEN_trait_species}},
\code{\link{BIEN_trait_traitbyfamily}},
\code{\link{BIEN_trait_traitbygenus}},
\code{\link{BIEN_trait_traitbyspecies}},
\code{\link{BIEN_trait_traits_per_species}},
\code{\link{BIEN_trait_trait}}
}
|
# # The data that is used for testing is the data from the sae package.
# load("EBP/ebp_summary.RData")
# load("FH/fh_summary.RData")
# load("Direct/direct_summary.RData")
#
# # Test if return is a data.frame
# test_that("Test that the summary output works as expected", {
# # check ebp summary
# expect_equal(summary_ebp,
# capture_output_lines(summary(model_ebp),
# print = TRUE, width = 120))
# # # check fh summary
# # expect_equal(summary_fh,
# # capture_output_lines(summary(model_fh),
# # print = TRUE, width = 120))
# #
# # check direct summary
# expect_equal(summary_direct,
# capture_output_lines(summary(model_direct),
# print = TRUE, width = 120))
# }) | /tests/testthat/test_S3_methods.R | no_license | SoerenPannier/emdi | R | false | false | 833 | r | # # The data that is used for testing is the data from the sae package.
# load("EBP/ebp_summary.RData")
# load("FH/fh_summary.RData")
# load("Direct/direct_summary.RData")
#
# # Test if return is a data.frame
# test_that("Test that the summary output works as expected", {
# # check ebp summary
# expect_equal(summary_ebp,
# capture_output_lines(summary(model_ebp),
# print = TRUE, width = 120))
# # # check fh summary
# # expect_equal(summary_fh,
# # capture_output_lines(summary(model_fh),
# # print = TRUE, width = 120))
# #
# # check direct summary
# expect_equal(summary_direct,
# capture_output_lines(summary(model_direct),
# print = TRUE, width = 120))
# }) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rBuildReleaseTest-package.R
\docType{package}
\name{rBuildRelease}
\alias{rBuildRelease}
\title{Test R Build and Release}
\description{
rBuildReleaseTest: provides a test case for automated CI/CD pipelines in R
}
\details{
This package several main features:
\itemize{
\item Creating a connection to a MongoDb database using a reference type R6 class.
\item Querying a MongoDb database through a Service, with an injected context.
\item Using best practice such as unit and integration-style tests, logging, linting, etc.
\item Others... [TBC]
}
}
\section{Available functionality}{
The available objects in this package are:
\itemize{
\item \code{\link{ApplicationDbContext}}: Base class for creating an injectable MongoDb database context.
\item \code{\link{CompaniesService}}: Service for querying Companies data in a MongoDb database.
}
}
| /man/rBuildRelease.Rd | permissive | nik01010/rBuildReleaseTest | R | false | true | 935 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rBuildReleaseTest-package.R
\docType{package}
\name{rBuildRelease}
\alias{rBuildRelease}
\title{Test R Build and Release}
\description{
rBuildReleaseTest: provides a test case for automated CI/CD pipelines in R
}
\details{
This package several main features:
\itemize{
\item Creating a connection to a MongoDb database using a reference type R6 class.
\item Querying a MongoDb database through a Service, with an injected context.
\item Using best practice such as unit and integration-style tests, logging, linting, etc.
\item Others... [TBC]
}
}
\section{Available functionality}{
The available objects in this package are:
\itemize{
\item \code{\link{ApplicationDbContext}}: Base class for creating an injectable MongoDb database context.
\item \code{\link{CompaniesService}}: Service for querying Companies data in a MongoDb database.
}
}
|
## ############################################################################
##
## DISCLAIMER:
## This script has been developed for research purposes only.
## The script is provided without any warranty of any kind, either express or
## implied. The entire risk arising out of the use or performance of the sample
## script and documentation remains with you.
## In no event shall its author, or anyone else involved in the
## creation, production, or delivery of the script be liable for any damages
## whatsoever (including, without limitation, damages for loss of business
## profits, business interruption, loss of business information, or other
## pecuniary loss) arising out of the use of or inability to use the sample
## scripts or documentation, even if the author has been advised of the
## possibility of such damages.
##
## ############################################################################
##
## DESCRIPTION
## Simulates outbreaks and analyses them using EARS-Negative Binomial
##
##
## Written by: Angela Noufaily and Felipe J Colón-González
## For any problems with this code, please contact f.colon@uea.ac.uk
##
## ############################################################################
# Delete objects in environment
rm(list=ls(all=TRUE))
# Load packages
require(data.table)
require(dplyr)
require(tidyr)
require(surveillance)
require(lubridate)
require(zoo)
# FUNCTIONS THAT PRODUCE THE DATA
# DEFINING FUNCTION h
#==============
# 5-day systems
#==============
h1=function(N,k,k2,alpha,beta,gama1,gama2,gama3,gama4,shift,shift2){
t=1:N
if(k==0 & k2==0){h1=alpha+beta*t}
else{
if(k==0)
{
l=1:k2
h1=rep(0,N)
for(i in 1:N){
h1[i]=alpha+beta*(t[i]+shift)+sum(gama3*cos((2*pi*l*(t[i]+shift))/5)+gama4*sin((2*pi*l*(t[i]+shift))/5))
}
}
else{
j=1:k
l=1:k2
h1=rep(0,N)
for(i in 1:N){
h1[i]=alpha+beta*(t[i]+shift)+sum(gama1*cos((2*pi*j*(t[i]+shift))/(52*5))+gama2*sin((2*pi*j*(t[i]+shift2))/(52*5)))+sum(gama3*cos((2*pi*l*(t[i]+shift))/5)+gama4*sin((2*pi*l*(t[i]+shift))/5))
}
}
}
h1
}
negbinNoise1=function(N,k,k2,alpha,beta,gama1,gama2,gama3,gama4,phi,shift,shift2){
mu <- exp(h1(N,k,k2,alpha,beta,gama1,gama2,gama3,gama4,shift,shift2))
if(phi==1){yi <- rpois(N,mu)}
else{
prob <- 1/phi
size <- mu/(phi-1)
yi <- rnbinom(N,size=size,prob=prob)
}
yi
}
outbreak5=function(currentday,weeklength,wtime,yi,interval,k,k2,alpha,beta,gama1,gama2,gama3,gama4,shift,shift2,phi,numoutbk,peakoutbk,meanlog,sdlog){
# theta, beta, gama1 and gama2 are the parameters of the equation for mu in Section 3.1
N=length(yi)
t=1:N
mu <- exp(h1(N,k,k2,alpha,beta,gama1,gama2,gama3,gama4,shift,shift2))
s=sqrt(mu*phi)
#wtime = (currentday-49*5+1):currentday # current outbreaks
# GENERATING OUTBREAKS
# STARTING TIMES OF OUTBREAKS
startoutbk <- sample(wtime, numoutbk, replace = FALSE)
# OUTBREAK SIZE OF CASES
sizeoutbk=rep(0,numoutbk)
for(i in 1:numoutbk){
set.seed(i)
soutbk=1
sou=1
while(soutbk<2){
set.seed(sou)
soutbk=rpois(1,s[startoutbk[i]]*peakoutbk)
sou=sou+1
}
sizeoutbk[i]=soutbk
}
# DISTRIBUTE THESE CASES OVER TIME USING LOGNORMAL
outbreak=rep(0,2*N)
for( j in 1:numoutbk){
set.seed(j)
outbk <-rlnorm(sizeoutbk[j], meanlog = meanlog, sdlog = sdlog)
#outbk <-rnorm(sizeoutbk[j], mean = meanlog2, sd = sdlog)
#h<- hist(outbk,breaks=seq(0,ceiling(max(outbk)),1),plot=FALSE)
h<- hist(outbk,breaks=seq(0,ceiling(max(outbk)),interval),plot=FALSE)
cases <- h$counts
weight=rep(0,length(cases))
duration<-startoutbk:(startoutbk+length(cases)-1)
dayofweek<-duration%%5 # 0 is friday; 1 is monday; 2 is tuesday etc.
for(i in 1:length(cases)){
if(dayofweek[i]==0){weight[i]=1.1}
if(dayofweek[i]==1){weight[i]=1.5}
if(dayofweek[i]==2){weight[i]=1.1}
if(dayofweek[i]==3){weight[i]=1}
if(dayofweek[i]==4){weight[i]=1}
}
cases2 <- cases*weight
for (l in 1:(length(cases2))){
outbreak[startoutbk[j]+(l-1)]= cases2[l]+outbreak[startoutbk[j]+(l-1)]
}# l loop
}# j loop
#for(v in 1:(currentday-49*5)){if(outbreak[v]>0){outbreak[v]=0}}
for(v in currentday:(currentday+100)){if(outbreak[v]>0){outbreak[v]=0}}
outbreak=outbreak[1:N]
# ADD NOISE AND OUTBREAKS
yitot=yi+outbreak
result=list(yitot=yitot,outbreak=outbreak,startoutbk=startoutbk,sizeoutbk=sizeoutbk,sd=s,mean=mu)
#return(result)
}
#==============
# 7-day systems
#==============
h2=function(N,k,k2,alpha,beta,gama1,gama2,gama3,gama4,shift){
t=1:N
if(k==0 & k2==0){h2=alpha+beta*t}
else{
if(k==0)
{
l=1:k2
h2=rep(0,N)
for(i in 1:N){
h2[i]=alpha+beta*(t[i]+shift)+sum(gama3*cos((2*pi*l*(t[i]+shift))/7)+gama4*sin((2*pi*l*(t[i]+shift))/7))
}
}
else{
j=1:k
l=1:k2
h2=rep(0,N)
for(i in 1:N){
h2[i]=alpha+beta*(t[i]+shift)+sum(gama1*cos((2*pi*j*(t[i]+shift))/(52*7))+gama2*sin((2*pi*j*(t[i]+shift))/(52*7)))+sum(gama3*cos((2*pi*l*(t[i]+shift))/7)+gama4*sin((2*pi*l*(t[i]+shift))/7))
}
}
}
h2
}
negbinNoise2=function(N,k,k2,alpha,beta,gama1,gama2,gama3,gama4,phi,shift){
mu <- exp(h2(N,k,k2,alpha,beta,gama1,gama2,gama3,gama4,shift))
if(phi==1){yi <- rpois(N,mu)}
else{
prob <- 1/phi
size <- mu/(phi-1)
yi <- rnbinom(N,size=size,prob=prob)
}
yi
}
outbreak7=function(currentday,weeklength,wtime,yi,interval,k,k2,alpha,beta,gama1,gama2,gama3,gama4,shift,phi,numoutbk,peakoutbk,meanlog,sdlog){
# theta, beta, gama1 and gama2 are the parameters of the equation for mu in Section 3.1
N=length(yi)
t=1:N
mu <- exp(h2(N,k,k2,alpha,beta,gama1,gama2,gama3,gama4,shift))
s=sqrt(mu*phi)
#wtime = (currentday-49*7+1):currentday # current outbreaks
# wtime = 350*1:7 # current outbreaks
# GENERATING OUTBREAKS
# STARTING TIMES OF OUTBREAKS
startoutbk <- sample(wtime, numoutbk, replace = FALSE)
# OUTBREAK SIZE OF CASES
sizeoutbk=rep(0,numoutbk)
for(i in 1:numoutbk){
set.seed(i)
soutbk=1
sou=1
while(soutbk<2){
set.seed(sou)
soutbk=rpois(1,s[startoutbk[i]]*peakoutbk)
sou=sou+1
}
sizeoutbk[i]=soutbk
}
# DISTRIBUTE THESE CASES OVER TIME USING LOGNORMAL
outbreak=rep(0,2*N)
for( j in 1:numoutbk){
set.seed(j)
outbk <-rlnorm(sizeoutbk[j], meanlog = meanlog, sdlog = sdlog)
#outbk <-rnorm(sizeoutbk[j], mean = meanlog2, sd = sdlog)
#h<- hist(outbk,breaks=seq(0,ceiling(max(outbk)),1),plot=FALSE)
h<- hist(outbk,breaks=seq(0,ceiling(max(outbk)),interval),plot=FALSE)
cases <- h$counts
weight=rep(0,length(cases))
duration<-startoutbk:(startoutbk+length(cases)-1)
dayofweek<-duration%%7 # 0 is sunday; 1 is monday; 2 is tuesday etc.
for(i in 1:length(cases)){
if(dayofweek[i]==0){weight[i]=2}
if(dayofweek[i]==1){weight[i]=1}
if(dayofweek[i]==2){weight[i]=1}
if(dayofweek[i]==3){weight[i]=1}
if(dayofweek[i]==4){weight[i]=1}
if(dayofweek[i]==5){weight[i]=1}
if(dayofweek[i]==6){weight[i]=2}
}
cases2 <- cases*weight
for (l in 1:(length(cases2))){
outbreak[startoutbk[j]+(l-1)]= cases2[l]+outbreak[startoutbk[j]+(l-1)]
}# l loop
}# j loop
#for(v in (currentday-49*7):currentday){if(outbreak[v]>0){outbreak[v]=0}}
for(v in currentday:(currentday+100)){if(outbreak[v]>0){outbreak[v]=0}}
outbreak=outbreak[1:N]
# ADD NOISE AND OUTBREAKS
yitot=yi+outbreak
result=list(yitot=yitot,outbreak=outbreak,startoutbk=startoutbk,sizeoutbk=sizeoutbk,sd=s,mean=mu)
#return(result)
}
#==========================
# Specify the bank holidays
#==========================
myDir <- "/local/zck07apu/Documents/GitLab/rammie_comparison/scripts/NB/3x"
years=7
bankholidays=read.csv(file.path(myDir, "Bankholidays.csv"))
#fix(bankholidays)
bankhols7=bankholidays$bankhol
bankhols7=as.numeric(bankhols7)
length(bankhols7)
#fix(bankhols7)
bankhols5=bankhols7[-seq(6,length(bankhols7),7)]
bankhols5=bankhols5[-seq(6,length(bankhols5),6)]
bankhols5=as.numeric(bankhols5)
length(bankhols5)
#fix(bankhols5)
#=======================
# Define the data frames
#=======================
nsim=100
simulateddata1=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulateddata2=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulateddata3=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulateddata4=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulateddata5=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulateddata6=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulateddata7=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulateddata8=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulateddata9=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulateddata10=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulateddata11=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulateddata12=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulateddata13=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulateddata14=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulateddata15=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulateddata16=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulateddata17=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals1=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals2=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals3=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals4=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals5=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals6=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals7=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals8=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals9=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals10=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals11=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals12=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals13=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals14=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals15=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals16=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals17=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak1=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak2=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak3=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak4=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak5=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak6=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak7=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak8=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak9=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak10=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak11=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak12=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak13=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak14=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak15=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak16=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak17=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedzseasoutbreak6=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedzseasoutbreak7=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedzseasoutbreak16=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
#################################
#SIMULATE SYNDROMES AND OUTBREAKS
#################################
#=====================
# 5-day week syndromes
#=====================
days5=5
N=52*days5*years
#sigid6
for(i in 1:nsim){
set.seed(i)
yt=negbinNoise1(N=N,k=1,k2=1,alpha=6,beta=0,gama1=0.3,gama2=2,gama3=0.3,gama4=0.5,phi=1.5,shift=-50,shift2=-50)/10
#mu=exp(h1(N=N,k=1,k2=1,alpha=6,beta=0,gama1=0.3,gama2=2,gama3=0.3,gama4=0.5,shift=-50,shift2=-50))
out1=rep(0,N)
for(j in 1:years){
set.seed(j+years*i)
out=outbreak5(currentday=days5*52*years,weeklength=52*days5*years,wtime=((1+(j-1)*days5*52):(20+(j-1)*days5*52)),yi=yt,interval=0.02,k=1,
k2=1,alpha=6,beta=0,gama1=0.3,gama2=2,gama3=0.3,gama4=0.5,phi=1.5,shift=-50,shift2=-50,numoutbk=1,peakoutbk=3*days5*80,meanlog=0,sdlog=0.5)
out1=out1+out$outbreak
}
set.seed(i)
out2=outbreak5(currentday=days5*52*years,weeklength=52*days5*years,wtime=(length(yt)-49*days5+1):length(yt),yi=yt,interval=0.25,k=1,k2=1,alpha=6,beta=0,gama1=0.3,
gama2=2,gama3=0.3,gama4=0.5,phi=1.5,shift=-50,shift2=-50,numoutbk=1,peakoutbk=3*days5,meanlog=0,sdlog=0.5)
zoutbreak=out2$outbreak
zseasoutbreak=out2$outbreak +out1
zt=yt +out1
zitot=yt + out2$outbreak +out1
#zitot[(bankhols5==1)]=0
#zitot[(bankhols5==1)+1]=1.5*zitot[i+1]
for(b in 1:length(zitot)){
if(bankhols5[b]==1){
zitot[b]=0
zitot[b+1]=1.5*zitot[b+1]
}
}
zeros=rep(0,2)
weekend=seq(days5,days5*years*52,days5)
#weekend=seq(0,days5*years*52-1,days5)
for(s in 1:length(weekend)){
yt=append(yt,zeros,after=2*(s-1)+weekend[s])
zt=append(zt,zeros,after=2*(s-1)+weekend[s])
zitot=append(zitot,zeros,after=2*(s-1)+weekend[s])
zoutbreak=append(zoutbreak,zeros,after=2*(s-1)+weekend[s])
zseasoutbreak=append(zseasoutbreak,zeros,after=2*(s-1)+weekend[s])
}
simulateddata6[,i]=round(zt)
simulatedtotals6[,i]=round(zitot)
simulatedoutbreak6[,i]=round(zoutbreak)
simulatedzseasoutbreak6[,i]=round(zseasoutbreak)
}
#----------------------------------------------------
# Plot the datasets and outbreaks using the following
#----------------------------------------------------
#plot(1:N,yt,typ='l')
#plot(1:(52*years*7),zt,typ='l',xlim=c(1,364))
#plot(1:(52*years*7),zitot,typ='l',xlim=c(1,364))
#plot(1:(52*years*7),zitot,typ='l',xlim=c(365,728))
#plot(1:(52*years*7),zitot,typ='l',xlim=c(729,1092))
#plot(1:(52*years*7),zitot,typ='l',xlim=c(1093,1456))
#plot(1:(52*years*7),zitot,typ='l',xlim=c(1457,1820))
#plot(1:(52*years*7),zitot,typ='l',xlim=c(1821,2184))
#plot(1:(52*years*7),zitot,typ='l',xlim=c(2185,2548))
#lines(1:(52*years*7),zoutbreak,col='green')
plot(1:(52*years*7),simulatedtotals6[,4],typ='l',xlim=c(1,7*364))
lines(1:(52*years*7),simulatedzseasoutbreak6[,4],col='green')
lines(1:(52*years*7),simulatedoutbreak6[,4],col='red')
plot(1:(52*years*7),zitot,typ='l',xlim=c(1,7*364))
lines(1:(52*years*7),yt,col='blue')
lines(1:(52*years*7),zseasoutbreak,col='green')
lines(1:(52*years*7),zoutbreak,col='red')
plot(1:(52*years*7),simulatedzseasoutbreak6[,4],col='green',typ='l')
#sigid7
for(i in 1:nsim){
set.seed(i)
yt=negbinNoise1(N=N,k=1,k2=1,alpha=1,beta=0,gama1=0.1,gama2=2,gama3=0.05,gama4=0.05,phi=1,shift=-50,shift2=-50)
#mu=exp(h1(N=N,k=1,k2=1,alpha=1.5,beta=0,gama1=0.1,gama2=2,gama3=0.1,gama4=0.1,shift=-50,shift2=-50))
out1=rep(0,N)
for(j in 1:years){
set.seed(j+years*i)
out=outbreak5(currentday=days5*52*years,weeklength=52*days5*years,wtime=((1+(j-1)*days5*52):(20+(j-1)*days5*52)),yi=yt,interval=0.02,k=1,k2=1,alpha=1,beta=0,gama1=0.1,
gama2=2,gama3=0.05,gama4=0.05,phi=1,shift=-50,shift2=-50,numoutbk=1,peakoutbk=3*days5*50,meanlog=0,sdlog=0.5)
out1=out1+out$outbreak
}
set.seed(i)
out2=outbreak5(currentday=days5*52*years,weeklength=52*days5*years,wtime=(length(yt)-49*days5+1):length(yt),yi=yt,interval=0.25,k=1,k2=1,alpha=1,beta=0,gama1=0.1,
gama2=2,gama3=0.05,gama4=0.05,phi=1,shift=-50,shift2=-50,numoutbk=1,peakoutbk=3*days5,meanlog=0,sdlog=0.5)
zoutbreak=out2$outbreak
zseasoutbreak=out2$outbreak+out1
zt=yt +out1
zitot=yt + out2$outbreak +out1
for(b in 1:length(zitot)){
if(bankhols5[b]==1){
zitot[b]=0
zitot[b+1]=1.5*zitot[b+1]
}
}
zeros=rep(0,2)
weekend=seq(days5,days5*years*52,days5)
#weekend=seq(0,days5*years*52-1,days5)
for(s in 1:length(weekend)){
yt=append(yt,zeros,after=2*(s-1)+weekend[s])
zt=append(zt,zeros,after=2*(s-1)+weekend[s])
zitot=append(zitot,zeros,after=2*(s-1)+weekend[s])
zoutbreak=append(zoutbreak,zeros,after=2*(s-1)+weekend[s])
zseasoutbreak=append(zseasoutbreak,zeros,after=2*(s-1)+weekend[s])
}
simulateddata7[,i]=round(zt)
simulatedtotals7[,i]=round(zitot)
simulatedoutbreak7[,i]=round(zoutbreak)
simulatedzseasoutbreak7[,i]=round(zseasoutbreak)
}
plot(1:(52*years*7),simulatedtotals7[,7],typ='l',xlim=c(1,7*364))
lines(1:(52*years*7),simulatedzseasoutbreak7[,7],col='green')
lines(1:(52*years*7),simulatedoutbreak7[,7],col='red')
plot(1:(52*years*7),zitot,typ='l',xlim=c(1,7*364))
lines(1:(52*years*7),yt,col='blue')
lines(1:(52*years*7),zseasoutbreak,col='green')
lines(1:(52*years*7),zoutbreak,col='red')
plot(1:(52*years*7),simulatedzseasoutbreak7[,4],col='green',typ='l')
#sigid8
for(i in 1:nsim){
set.seed(i)
yt=negbinNoise1(N=N,k=0,k2=1,alpha=6,beta=0.0001,gama1=0,gama2=0,gama3=0.6,gama4=0.9,phi=1.5,shift=0,shift2=0)/10
#mu=exp(h1(N=N,k=0,k2=1,alpha=6,beta=0,gama1=0,gama2=0,gama3=0.6,gama4=0.9,shift=0,shift2=0))
set.seed(i)
out2=outbreak5(currentday=days5*52*years,weeklength=52*days5*years,wtime=(length(yt)-49*days5+1):length(yt),yi=yt,interval=0.25,k=0,k2=1,alpha=6,beta=0,gama1=0,
gama2=0,gama3=0.6,gama4=0.9,phi=1.5,shift=0,shift2=0,numoutbk=1,peakoutbk=3*days5,meanlog=0,sdlog=0.5)
zoutbreak=out2$outbreak
zt=yt#+out1
zitot=yt + out2$outbreak #+out1
#zitot[(bankhols5==1)]=0
#zitot[(bankhols5==1)+1]=1.5*zitot[i+1]
for(b in 1:length(zitot)){
if(bankhols5[b]==1){
zitot[b]=0
zitot[b+1]=1.5*zitot[b+1]
}
}
zeros=rep(0,2)
weekend=seq(days5,days5*years*52,days5)
#weekend=seq(0,days5*years*52-1,days5)
for(s in 1:length(weekend)){
zt=append(zt,zeros,after=2*(s-1)+weekend[s])
zitot=append(zitot,zeros,after=2*(s-1)+weekend[s])
zoutbreak=append(zoutbreak,zeros,after=2*(s-1)+weekend[s])
}
simulateddata8[,i]=round(zt)
simulatedtotals8[,i]=round(zitot)
simulatedoutbreak8[,i]=round(zoutbreak)
}
plot(1:(52*years*7),simulateddata8[,1],typ='l',xlim=c(2185,2548),col='green')
lines(1:(52*years*7),simulateddata8[,1]+simulatedoutbreak8[,1])
#sigid9
for(i in 1:nsim){
set.seed(i)
yt=negbinNoise1(N=N,k=1,k2=1,alpha=3,beta=0,gama1=1.5,gama2=0.1,gama3=0.2,gama4=0.3,phi=1,shift=-150,shift2=-150)
mu=exp(h1(N=N,k=1,k2=1,alpha=3,beta=0,gama1=1.5,gama2=0.1,gama3=0.6,gama4=0.8,shift=-150,shift2=-150))
set.seed(i)
out2=outbreak5(currentday=days5*52*years,weeklength=52*days5*years,wtime=(length(yt)-49*days5+1):length(yt),interval=0.25,yi=yt,k=1,k2=1,alpha=3,beta=0,gama1=1.5,
gama2=0.1,gama3=0.2,gama4=0.3,phi=1,shift=-150,shift2=-150,numoutbk=1,peakoutbk=3*days5,meanlog=0,sdlog=0.5)
zoutbreak=out2$outbreak
zt=yt#+out1
zitot=yt + out2$outbreak #+out1
#zitot[(bankhols5==1)]=0
#zitot[(bankhols5==1)+1]=1.5*zitot[i+1]
for(b in 1:length(zitot)){
if(bankhols5[b]==1){
zitot[b]=0
zitot[b+1]=1.5*zitot[b+1]
}
}
zeros=rep(0,2)
weekend=seq(days5,days5*years*52,days5)
#weekend=seq(0,days5*years*52-1,days5)
for(s in 1:length(weekend)){
zt=append(zt,zeros,after=2*(s-1)+weekend[s])
zitot=append(zitot,zeros,after=2*(s-1)+weekend[s])
zoutbreak=append(zoutbreak,zeros,after=2*(s-1)+weekend[s])
}
simulateddata9[,i]=round(zt)
simulatedtotals9[,i]=round(zitot)
simulatedoutbreak9[,i]=round(zoutbreak)
}
plot(1:(52*years*7),simulateddata9[,1],typ='l',xlim=c(2185,2548),col='green')
lines(1:(52*years*7),simulateddata9[,1]+simulatedoutbreak9[,1])
#sigid10
for(i in 1:nsim){
set.seed(i)
yt=negbinNoise1(N=N,k=1,k2=1,alpha=3,beta=0,gama1=0.2,gama2=0.1,gama3=0.05,gama4=0.15,phi=1,shift=-200,shift2=-200)
#mu=exp(h1(N=N,k=1,k2=1,alpha=3,beta=0,gama1=0.2,gama2=0.1,gama3=0.05,gama4=0.15,shift=-200,shift2=-200))
set.seed(i)
out2=outbreak5(currentday=days5*52*years,weeklength=52*days5*years,wtime=(length(yt)-49*days5+1):length(yt),yi=yt,interval=0.25,k=1,k2=1,alpha=3,beta=0,gama1=0.2,
gama2=0.1,gama3=0.05,gama4=0.15,phi=1,shift=-200,shift2=-200,numoutbk=1,peakoutbk=3*days5,meanlog=0,sdlog=0.5)
zoutbreak=out2$outbreak
zt=yt#+out1
zitot=yt + out2$outbreak #+out1
#zitot[(bankhols5==1)]=0
#zitot[(bankhols5==1)+1]=1.5*zitot[i+1]
for(b in 1:length(zitot)){
if(bankhols5[b]==1){
zitot[b]=0
zitot[b+1]=1.5*zitot[b+1]
}
}
zeros=rep(0,2)
weekend=seq(days5,days5*years*52,days5)
#weekend=seq(0,days5*years*52-1,days5)
for(s in 1:length(weekend)){
zt=append(zt,zeros,after=2*(s-1)+weekend[s])
zitot=append(zitot,zeros,after=2*(s-1)+weekend[s])
zoutbreak=append(zoutbreak,zeros,after=2*(s-1)+weekend[s])
}
simulateddata10[,i]=round(zt)
simulatedtotals10[,i]=round(zitot)
simulatedoutbreak10[,i]=round(zoutbreak)
}
#sigid11
for(i in 1:nsim){
set.seed(i)
yt=negbinNoise1(N=N,k=1,k2=1,alpha=5,beta=0,gama1=0.2,gama2=0.1,gama3=0.05,gama4=0.1,phi=1,shift=0,shift2=0)
mu=exp(h1(N=N,k=1,k2=1,alpha=5,beta=0,gama1=0.2,gama2=0.1,gama3=0.05,gama4=0.1,shift=0,shift2=0))
set.seed(i)
out2=outbreak5(currentday=days5*52*years,weeklength=52*days5*years,wtime=(length(yt)-49*days5+1):length(yt),interval=0.25,yi=yt,k=1,k2=1,alpha=5,beta=0,gama1=0.2,
gama2=0.1,gama3=0.05,gama4=0.1,phi=1,shift=0,shift2=0,numoutbk=1,peakoutbk=3*days5,meanlog=0,sdlog=0.5)
zoutbreak=out2$outbreak
zt=yt#+out1
zitot=yt + out2$outbreak #+out1
#zitot[(bankhols5==1)]=0
#zitot[(bankhols5==1)+1]=1.5*zitot[i+1]
for(b in 1:length(zitot)){
if(bankhols5[b]==1){
zitot[b]=0
zitot[b+1]=1.5*zitot[b+1]
}
}
zeros=rep(0,2)
weekend=seq(days5,days5*years*52,days5)
#weekend=seq(0,days5*years*52-1,days5)
for(s in 1:length(weekend)){
zt=append(zt,zeros,after=2*(s-1)+weekend[s])
zitot=append(zitot,zeros,after=2*(s-1)+weekend[s])
zoutbreak=append(zoutbreak,zeros,after=2*(s-1)+weekend[s])
}
simulateddata11[,i]=round(zt)
simulatedtotals11[,i]=round(zitot)
simulatedoutbreak11[,i]=round(zoutbreak)
}
#sigid12
for(i in 1:nsim){
set.seed(i)
yt=negbinNoise1(N=N,k=2,k2=1,alpha=0.5,beta=0,gama1=0.4,gama2=0,gama3=0.05,gama4=0.15,phi=1,shift=0,shift2=0)
#mu=exp(h1(N=N,k=2,k2=1,alpha=0.5,beta=0,gama1=0.4,gama2=0,gama3=0.05,gama4=0.15,shift=0,shift2=0))
set.seed(i)
out2=outbreak5(currentday=days5*52*years,weeklength=52*days5*years,wtime=(length(yt)-49*days5+1):length(yt),yi=yt,interval=0.25,k=2,k2=1,alpha=0.5,beta=0,gama1=0.4,
gama2=0,gama3=0.05,gama4=0.15,phi=1,shift=0,shift2=0,numoutbk=1,peakoutbk=3*days5,meanlog=0,sdlog=0.5)
zoutbreak=out2$outbreak
zt=yt#+out1
zitot=yt + out2$outbreak #+out1
#zitot[(bankhols5==1)]=0
#zitot[(bankhols5==1)+1]=1.5*zitot[i+1]
for(b in 1:length(zitot)){
if(bankhols5[b]==1){
zitot[b]=0
zitot[b+1]=1.5*zitot[b+1]
}
}
zeros=rep(0,2)
weekend=seq(days5,days5*years*52,days5)
#weekend=seq(0,days5*years*52-1,days5)
for(s in 1:length(weekend)){
zt=append(zt,zeros,after=2*(s-1)+weekend[s])
zitot=append(zitot,zeros,after=2*(s-1)+weekend[s])
zoutbreak=append(zoutbreak,zeros,after=2*(s-1)+weekend[s])
}
simulateddata12[,i]=round(zt)
simulatedtotals12[,i]=round(zitot)
simulatedoutbreak12[,i]=round(zoutbreak)
}
#sigid13
for(i in 1:nsim){
set.seed(i)
yt=negbinNoise1(N=N,k=1,k2=1,alpha=9,beta=0,gama1=0.5,gama2=0.2,gama3=0.2,gama4=0.5,phi=1,shift=0,shift2=0)/100
#mu=exp(h1(N=N,k=1,k2=1,alpha=9,beta=0,gama1=0.5,gama2=0.2,gama3=0.2,gama4=0.5,shift=0,shift2=0))
set.seed(i)
out2=outbreak5(currentday=days5*52*years,weeklength=52*days5*years,wtime=(length(yt)-49*days5+1):length(yt),yi=yt,interval=0.25,k=1,k2=1,alpha=9,beta=0,gama1=0.5,
gama2=0.2,gama3=0.2,gama4=0.5,phi=1,shift=0,shift2=0,numoutbk=1,peakoutbk=3*days5,meanlog=0,sdlog=0.5)
zoutbreak=out2$outbreak
zt=yt#+out1
zitot=yt + out2$outbreak #+out1
#zitot[(bankhols5==1)]=0
#zitot[(bankhols5==1)+1]=1.5*zitot[i+1]
for(b in 1:length(zitot)){
if(bankhols5[b]==1){
zitot[b]=0
zitot[b+1]=1.5*zitot[b+1]
}
}
zeros=rep(0,2)
weekend=seq(days5,days5*years*52,days5)
#weekend=seq(0,days5*years*52-1,days5)
for(s in 1:length(weekend)){
zt=append(zt,zeros,after=2*(s-1)+weekend[s])
zitot=append(zitot,zeros,after=2*(s-1)+weekend[s])
zoutbreak=append(zoutbreak,zeros,after=2*(s-1)+weekend[s])
}
simulateddata13[,i]=round(zt)
simulatedtotals13[,i]=round(zitot)
simulatedoutbreak13[,i]=round(zoutbreak)
}
plot(1:length(simulatedtotals13[,1]),simulatedtotals13[,1],typ='l')
plot(1:N,simulatedtotals13[,1],typ='l',xlim=c(2206,2548),col='green')
lines(1:N,simulateddata13[,1],typ='l')
#=====================
# 7-day week syndromes
#=====================
years=7
days7=7
N=52*days7*years
#sigid1
for(i in 1:nsim){
set.seed(i)
yt=negbinNoise2(N=N,k=1,k2=2,alpha=6,beta=0,gama1=0.2,gama2=0.2,gama3=0.5,gama4=0.4,phi=2,shift=29)
#mu=exp(h2(N=N,k=1,k2=2,alpha=6,beta=0,gama1=0.2,gama2=0.2,gama3=0.5,gama4=0.4,shift=29))
set.seed(i)
out2=outbreak7(currentday=N,weeklength=52*days7*years,wtime=(length(yt)-49*days7+1):length(yt),yi=yt,interval=0.25,k=1,k2=2,alpha=6,beta=0,gama1=0.2,gama2=0.2,
gama3=0.5,gama4=0.4,phi=2,shift=29,numoutbk=1,peakoutbk=3*days7,meanlog=0,sdlog=0.5)
zoutbreak=out2$outbreak
zt=yt#+out1
zitot=yt + out2$outbreak #+out1
for(b in 1:length(zitot)){
if(bankhols7[b]==1){
zitot[b]=2*zitot[b]
}
}
simulateddata1[,i]=round(zt)
simulatedtotals1[,i]=round(zitot)
simulatedoutbreak1[,i]=round(zoutbreak)
}
#sigid3
for(i in 1:nsim){
set.seed(i)
yt=negbinNoise2(N=N,k=1,k2=2,alpha=0.5,beta=0,gama1=1.5,gama2=1.4,gama3=0.5,gama4=0.4,phi=1,shift=-167)
#mu=exp(h2(N=N,k=1,k2=2,alpha=0.5,beta=0,gama1=1.5,gama2=1.4,gama3=0.5,gama4=0.4,shift=-167))
set.seed(i)
out2=outbreak7(currentday=N,weeklength=52*days7*years,wtime=(length(yt)-49*7+1):length(yt),yi=yt,interval=0.25,k=1,k2=2,alpha=0.5,beta=0,gama1=1.5,
gama2=1.4,gama3=0.5,gama4=0.4,phi=1,shift=-167,numoutbk=1,peakoutbk=3*days7,meanlog=0,sdlog=0.5)
zoutbreak=out2$outbreak
zt=yt#+out1
zitot=yt + out2$outbreak #+out1
for(b in 1:length(zitot)){
if(bankhols7[b]==1){
zitot[b]=2*zitot[b]
}
}
simulateddata3[,i]=round(zt)
simulatedtotals3[,i]=round(zitot)
simulatedoutbreak3[,i]=round(zoutbreak)
}
#sigid4
for(i in 1:nsim){
set.seed(i)
yt=negbinNoise2(N=N,k=0,k2=2,alpha=5.5,beta=0,gama1=0,gama2=0,gama3=0.3,gama4=0.25,phi=1,shift=1)
#mu=exp(h2(N=N,k=0,k2=2,alpha=5.5,beta=0,gama1=0,gama2=0,gama3=0.3,gama4=0.25,shift=1))
set.seed(i)
out2=outbreak7(currentday=N,weeklength=52*days7*12,wtime=(length(yt)-49*7+1):length(yt),yi=yt,interval=0.25,k=0,k2=2,alpha=5.5,beta=0,gama1=0,
gama2=0,gama3=0.3,gama4=0.25,phi=1,shift=1,numoutbk=1,peakoutbk=3*days7,meanlog=0,sdlog=0.5)
zoutbreak=out2$outbreak
zt=yt#+out1
zitot=yt + out2$outbreak #+out1
for(b in 1:length(zitot)){
if(bankhols7[b]==1){
zitot[b]=2*zitot[b]
}
}
simulateddata4[,i]=round(zt)
simulatedtotals4[,i]=round(zitot)
simulatedoutbreak4[,i]=round(zoutbreak)
}
#sigid5
for(i in 1:nsim){
set.seed(i)
yt=negbinNoise2(N=N,k=0,k2=2,alpha=2,beta=0,gama1=0,gama2=0,gama3=0.3,gama4=0.25,phi=1,shift=1)
#mu=exp(h2(N=N,k=0,k2=2,alpha=2,beta=0,gama1=0,gama2=0,gama3=0.3,gama4=0.25,shift=1))
set.seed(i)
out2=outbreak7(currentday=N,weeklength=52*days7*years,wtime=(length(yt)-49*days7+1):length(yt),yi=yt,interval=0.25,k=0,k2=2,alpha=2,beta=0,gama1=0,
gama2=0,gama3=0.3,gama4=0.25,phi=1,shift=1,numoutbk=1,peakoutbk=3*days7,meanlog=0,sdlog=0.5)
zoutbreak=out2$outbreak
zt=yt#+out1
zitot=yt + out2$outbreak #+out1
for(b in 1:length(zitot)){
if(bankhols7[b]==1){
zitot[b]=2*zitot[b]
}
}
simulateddata5[,i]=round(zt)
simulatedtotals5[,i]=round(zitot)
simulatedoutbreak5[,i]=round(zoutbreak)
}
#sigid14
for(i in 1:nsim){
set.seed(i)
yt=negbinNoise2(N=N,k=1,k2=2,alpha=2,beta=0.0005,gama1=0.8,gama2=0.8,gama3=0.8,gama4=0.4,phi=4,shift=57)
#mu=exp(h2(N=N,k=1,k2=2,alpha=2,beta=0,gama1=0.8,gama2=0.8,gama3=0.8,gama4=0.4,shift=57))
set.seed(i)
out2=outbreak7(currentday=N,weeklength=52*days7*years,wtime=(length(yt)-49*days7+1):length(yt),yi=yt,interval=0.25,k=1,k2=2,alpha=6,beta=0,gama1=0.2,gama2=0.2,
gama3=0.5,gama4=0.4,phi=2,shift=29,numoutbk=1,peakoutbk=3*days7,meanlog=0,sdlog=0.5)
zoutbreak=out2$outbreak
zt=yt#+out1
zitot=yt + out2$outbreak #+out1
for(b in 1:length(zitot)){
if(bankhols7[b]==1){
zitot[b]=2*zitot[b]
}
}
simulateddata14[,i]=round(zt)
simulatedtotals14[,i]=round(zitot)
simulatedoutbreak14[,i]=round(zoutbreak)
}
#sigid15
for(i in 1:nsim){
set.seed(i)
#yt=0.1*(negbinNoise2(N=N,k=4,k2=1,alpha=1.5,beta=0,gama1=0.1,gama2=0.1,gama3=1.8,gama4=0.1,phi=1,shift=-85)+2)
yt=1*(negbinNoise2(N=N,k=4,k2=1,alpha=0.05,beta=0,gama1=0.01,gama2=0.01,gama3=1.8,gama4=0.1,phi=1,shift=-85)+0)
set.seed(i)
out2=outbreak7(currentday=N,weeklength=52*days7*years,wtime=(length(yt)-49*days7+1):length(yt),yi=yt,interval=0.25,k=1,k2=2,alpha=2,beta=0,gama1=0.8,
gama2=0.8,gama3=0.8,gama4=0.4,phi=4,shift=57,numoutbk=1,peakoutbk=3*days7,meanlog=0,sdlog=0.5)
zoutbreak=out2$outbreak
zt=yt#+out1
zitot=yt + out2$outbreak #+out1
for(b in 1:length(zitot)){
if(bankhols7[b]==1){
zitot[b]=2*zitot[b]
}
}
simulateddata15[,i]=round(zt)
simulatedtotals15[,i]=round(zitot)
simulatedoutbreak15[,i]=round(zoutbreak)
}
#plot(1:N,yt,typ='l')
#plot(1:(52*years*7),zt,typ='l',xlim=c(1,364))
#plot(1:(52*years*7),zitot,typ='l',xlim=c(1,364))
#plot(1:(52*years*7),zitot,typ='l',xlim=c(365,728))
#plot(1:(52*years*7),zitot,typ='l',xlim=c(729,1092))
#plot(1:(52*years*7),zitot,typ='l',xlim=c(1093,1456))
#plot(1:(52*years*7),zitot,typ='l',xlim=c(1457,1820))
#plot(1:(52*years*7),zitot,typ='l',xlim=c(1821,2184))
#plot(1:(52*years*7),zitot,typ='l',xlim=c(2185,2548))
#lines(1:(52*years*7),zoutbreak,col='green')
plot(1:(52*years*7),simulatedtotals6[,4],typ='l',xlim=c(1,7*364))
lines(1:(52*years*7),simulatedzseasoutbreak6[,4],col='green')
lines(1:(52*years*7),simulatedoutbreak6[,4],col='red')
plot(1:(52*years*7),zitot,typ='l',xlim=c(1,7*364))
lines(1:(52*years*7),yt,col='blue')
lines(1:(52*years*7),zseasoutbreak,col='green')
lines(1:(52*years*7),zoutbreak,col='red')
plot(1:(52*years*7),simulatedzseasoutbreak6[,4],col='green',typ='l')
#sigid16
for(i in 1:nsim){
set.seed(i)
yt=negbinNoise2(N=N,k=1,k2=2,alpha=3,beta=0,gama1=0.8,gama2=0.6,gama3=0.8,gama4=0.4,phi=4,shift=29)
#mu=exp(h2(N=N,k=1,k2=2,alpha=3,beta=0,gama1=0.8,gama2=0.6,gama3=0.8,gama4=0.4,shift=29))
out1=rep(0,N)
for(j in 1:years){
set.seed(j+years*i)
out=outbreak5(currentday=days7*52*years,weeklength=52*days7*years,wtime=((210+(j-1)*days7*52):(230+(j-1)*days7*52)),yi=yt,interval=0.02,k=1,k2=1,alpha=1,beta=0,gama1=0.1,
gama2=2,gama3=0.05,gama4=0.05,phi=1,shift=-50,shift2=-50,numoutbk=1,peakoutbk=3*days7*150,meanlog=0,sdlog=0.5)
out1=out1+out$outbreak
}
set.seed(i)
out2=outbreak7(currentday=N,weeklength=52*days7*years,wtime=(length(yt)-49*days7+1):length(yt),yi=yt,interval=0.25,k=1,k2=2,alpha=3,beta=0,gama1=0.8,
gama2=0.6,gama3=0.8,gama4=0.4,phi=4,shift=29,numoutbk=1,peakoutbk=3*days7,meanlog=0,sdlog=0.5)
zoutbreak=out2$outbreak
zseasoutbreak=out2$outbreak+out1
zt=yt +out1
zitot=yt + out2$outbreak +out1
for(b in 1:length(zitot)){
if(bankhols7[b]==1){
zitot[b]=2*zitot[b]
}
}
simulateddata16[,i]=round(zt)
simulatedtotals16[,i]=round(zitot)
simulatedoutbreak16[,i]=round(zoutbreak)
simulatedzseasoutbreak16[,i]=round(zseasoutbreak)
}
plot(1:(52*years*7),zitot,typ='l',xlim=c(1,7*364))
lines(1:(52*years*7),yt,col='blue')
lines(1:(52*years*7),zseasoutbreak,col='green')
lines(1:(52*years*7),zoutbreak,col='red')
plot(1:(52*years*7),simulatedtotals16[,1],typ='l',xlim=c(1,7*364))
lines(1:(52*years*7),simulatedzseasoutbreak16[,1],col='green')
lines(1:(52*years*7),simulatedoutbreak16[,1],col='red')
plot(1:(52*years*7),simulatedtotals16[,2],typ='l',xlim=c(1,7*364))
lines(1:(52*years*7),simulatedzseasoutbreak16[,2],col='green')
lines(1:(52*years*7),simulatedoutbreak16[,2],col='red')
#sigid17
for(i in 1:nsim){
set.seed(i)
yt=negbinNoise2(N=N,k=0,k2=2,alpha=6,beta=0,gama1=0,gama2=0,gama3=0.8,gama4=0.4,phi=4,shift=1)
#mu=exp(h2(N=N,k=0,k2=2,alpha=6,beta=0,gama1=0,gama2=0,gama3=0.8,gama4=0.4,shift=1))
set.seed(i)
out2=outbreak7(currentday=N,weeklength=52*7*12,wtime=(length(yt)-49*days7+1):length(yt),yi=yt,interval=0.25,k=0,k2=2,alpha=6,beta=0,gama1=0,
gama2=0,gama3=0.8,gama4=0.4,phi=4,shift=1,numoutbk=1,peakoutbk=3*days7,meanlog=0,sdlog=0.5)
zoutbreak=out2$outbreak
zt=yt#+out1
zitot=yt + out2$outbreak #+out1
for(b in 1:length(zitot)){
if(bankhols7[b]==1){
zitot[b]=2*zitot[b]
}
}
simulateddata17[,i]=round(zt)
simulatedtotals17[,i]=round(zitot)
simulatedoutbreak17[,i]=round(zoutbreak)
}
#=============================
# Define the alarm data frames
#=============================
days=7
nsim=100
alarmall1=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
alarmall2=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
alarmall3=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
alarmall4=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
alarmall5=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
alarmall6=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
alarmall7=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
alarmall8=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
alarmall9=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
alarmall10=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
alarmall11=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
alarmall12=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
alarmall13=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
alarmall14=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
alarmall15=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
alarmall16=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
alarmall17=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
###########################################
#========================================
#Implement the algorithm to data by days and record the alarms inthe above dataframes
#========================================
###########################################
myDates <- seq(ymd('2010-01-01'), ymd('2016-12-30'), by = '1 day')
dropDays <- as.POSIXct(c('2010-12-31','2011-12-31', '2012-12-31',
'2013-12-31', '2014-12-31', '2015-12-31',
'2016-02-29,', '2012-02-29'))
"%ni%" <- Negate("%in%")
myDates <- myDates[myDates %ni% dropDays]
# Convert to 7-day running totals
rolling <- function(x){
rollapplyr(x, width=7, FUN=sum, na.rm=T, fill=NA)
}
simdata1 <- apply(simulateddata1, 2, rolling)
# simdata2 <- apply(simulateddata2, 2, rolling)
simdata3 <- apply(simulateddata3, 2, rolling)
simdata4 <- apply(simulateddata4, 2, rolling)
simdata5 <- apply(simulateddata5, 2, rolling)
simdata6 <- apply(simulateddata6, 2, rolling)
simdata7 <- apply(simulateddata7, 2, rolling)
simdata8 <- apply(simulateddata8, 2, rolling)
simdata9 <- apply(simulateddata9, 2, rolling)
simdata10 <- apply(simulateddata10, 2, rolling)
simdata11 <- apply(simulateddata11, 2, rolling)
simdata12 <- apply(simulateddata12, 2, rolling)
simdata13 <- apply(simulateddata13, 2, rolling)
simdata14 <- apply(simulateddata14, 2, rolling)
simdata15 <- apply(simulateddata15, 2, rolling)
simdata16 <- apply(simulateddata16, 2, rolling)
simdata17 <- apply(simulateddata17, 2, rolling)
simtot1 <- apply(simulatedtotals1, 2, rolling)
# simtot2 <- apply(simulatedtotals2, 2, rolling)
simtot3 <- apply(simulatedtotals3, 2, rolling)
simtot4 <- apply(simulatedtotals4, 2, rolling)
simtot5 <- apply(simulatedtotals5, 2, rolling)
simtot6 <- apply(simulatedtotals6, 2, rolling)
simtot7 <- apply(simulatedtotals7, 2, rolling)
simtot8 <- apply(simulatedtotals8, 2, rolling)
simtot9 <- apply(simulatedtotals9, 2, rolling)
simtot10 <- apply(simulatedtotals10, 2, rolling)
simtot11 <- apply(simulatedtotals11, 2, rolling)
simtot12 <- apply(simulatedtotals12, 2, rolling)
simtot13 <- apply(simulatedtotals13, 2, rolling)
simtot14 <- apply(simulatedtotals14, 2, rolling)
simtot15 <- apply(simulatedtotals15, 2, rolling)
simtot16 <- apply(simulatedtotals16, 2, rolling)
simtot17 <- apply(simulatedtotals17, 2, rolling)
# Convert data to sts
simSts1 <- sts(simdata1, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
# simSts2 <- sts(simdata2, start=c(2010, 1), frequency=364,
# epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
simSts3 <- sts(simdata3, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
simSts4 <- sts(simdata4, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
simSts5 <- sts(simdata5, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
simSts6 <- sts(simdata6, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
simSts7 <- sts(simdata7, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
simSts8 <- sts(simdata8, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
simSts9 <- sts(simdata9, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
simSts10 <- sts(simdata10, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
simSts11 <- sts(simdata11, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
simSts12 <- sts(simdata12, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
simSts13 <- sts(simdata13, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
simSts14 <- sts(simdata14, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
simSts15 <- sts(simdata15, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
simSts16 <- sts(simdata16, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
simSts17 <- sts(simdata17, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
totSts1 <- sts(simtot1, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
# totSts2 <- sts(simtot2, start=c(2010, 1), frequency=364,
# epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
totSts3 <- sts(simtot3, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
totSts4 <- sts(simtot4, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
totSts5 <- sts(simtot5, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
totSts6 <- sts(simtot6, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
totSts7 <- sts(simtot7, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
totSts8 <- sts(simtot8, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
totSts9 <- sts(simtot9, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
totSts10 <- sts(simtot10, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
totSts11 <- sts(simtot11, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
totSts12 <- sts(simtot12, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
totSts13 <- sts(simtot13, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
totSts14 <- sts(simtot14, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
totSts15 <- sts(simtot15, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
totSts16 <- sts(simtot16, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
totSts17 <- sts(simtot17, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
in2016 <- 2206:2548
# Select range of data to monitor, algorithm and prediction interval
control <- list(range=in2016, alpha=NULL, mu0=list(S=2, trend=TRUE),
theta=NULL)
for(sim in seq(nsim)){
cat("\t", sim)
# Run detection algorithm
det1 <- algo.glrnb(sts2disProg(totSts1[,sim]), control=control)
det3 <- algo.glrnb(sts2disProg(totSts3[,sim]), control=control)
det4 <- algo.glrnb(sts2disProg(totSts4[,sim]), control=control)
det5 <- algo.glrnb(sts2disProg(totSts5[,sim]), control=control)
det6 <- algo.glrnb(sts2disProg(totSts6[,sim]), control=control)
det7 <- algo.glrnb(sts2disProg(totSts7[,sim]), control=control)
det8 <- algo.glrnb(sts2disProg(totSts8[,sim]), control=control)
det9 <- algo.glrnb(sts2disProg(totSts9[,sim]), control=control)
det10 <- algo.glrnb(sts2disProg(totSts10[,sim]), control=control)
det11 <- algo.glrnb(sts2disProg(totSts11[,sim]), control=control)
det12 <- algo.glrnb(sts2disProg(totSts12[,sim]), control=control)
det13 <- algo.glrnb(sts2disProg(totSts13[,sim]), control=control)
det14 <- algo.glrnb(sts2disProg(totSts14[,sim]), control=control)
det15 <- algo.glrnb(sts2disProg(totSts15[,sim]), control=control)
det16 <- algo.glrnb(sts2disProg(totSts16[,sim]), control=control)
det17 <- algo.glrnb(sts2disProg(totSts17[,sim]), control=control)
# Plot detection results
dir.create(file.path(myDir, "plots", "totals"),
recursive=TRUE)
png(file.path(myDir, "plots", "totals", paste0("Sim_", sim, ".png")),
width=16,height=14,units="in",res=300)
par(mfrow=c(4, 4), oma=c(0, 0, 2, 0))
plot(det1, main="Dataset 1", legend=NULL)
plot(det3, main="Dataset 3", legend=NULL)
plot(det4, main="Dataset 4", legend=NULL)
plot(det5, main="Dataset 5", legend=NULL)
plot(det6, main="Dataset 6", legend=NULL)
plot(det7, main="Dataset 7", legend=NULL)
plot(det8, main="Dataset 8", legend=NULL)
plot(det9, main="Dataset 9", legend=NULL)
plot(det10, main="Dataset 10", legend=NULL)
plot(det11, main="Dataset 11", legend=NULL)
plot(det12, main="Dataset 12", legend=NULL)
plot(det13, main="Dataset 13", legend=NULL)
plot(det14, main="Dataset 14", legend=NULL)
plot(det15, main="Dataset 15", legend=NULL)
plot(det16, main="Dataset 16", legend=NULL)
plot(det17, main="Dataset 17", legend=NULL)
title(main=list(paste("Simulation", sim, "Alpha", control$alpha ),
cex=2), outer=TRUE)
dev.off()
# Retrieve information about alarms
alarmall1[,sim] <- as.numeric(as.vector(unlist(det1$alarm)))
alarmall3[,sim] <- as.numeric(as.vector(unlist(det3$alarm)))
alarmall4[,sim] <- as.numeric(as.vector(unlist(det4$alarm)))
alarmall5[,sim] <- as.numeric(as.vector(unlist(det5$alarm)))
alarmall6[,sim] <- as.numeric(as.vector(unlist(det6$alarm)))
alarmall7[,sim] <- as.numeric(as.vector(unlist(det7$alarm)))
alarmall8[,sim] <- as.numeric(as.vector(unlist(det8$alarm)))
alarmall9[,sim] <- as.numeric(as.vector(unlist(det9$alarm)))
alarmall10[,sim] <- as.numeric(as.vector(unlist(det10$alarm)))
alarmall11[,sim] <- as.numeric(as.vector(unlist(det11$alarm)))
alarmall12[,sim] <- as.numeric(as.vector(unlist(det12$alarm)))
alarmall13[,sim] <- as.numeric(as.vector(unlist(det13$alarm)))
alarmall14[,sim] <- as.numeric(as.vector(unlist(det14$alarm)))
alarmall15[,sim] <- as.numeric(as.vector(unlist(det15$alarm)))
alarmall16[,sim] <- as.numeric(as.vector(unlist(det16$alarm)))
alarmall17[,sim] <- as.numeric(as.vector(unlist(det17$alarm)))
}
# Replace missing values with zero (?)
alarmall1[is.na(alarmall1)] <- 0
alarmall3[is.na(alarmall3)] <- 0
alarmall4[is.na(alarmall4)] <- 0
alarmall5[is.na(alarmall5)] <- 0
alarmall6[is.na(alarmall6)] <- 0
alarmall7[is.na(alarmall7)] <- 0
alarmall8[is.na(alarmall8)] <- 0
alarmall9[is.na(alarmall9)] <- 0
alarmall10[is.na(alarmall10)] <- 0
alarmall11[is.na(alarmall11)] <- 0
alarmall12[is.na(alarmall12)] <- 0
alarmall13[is.na(alarmall13)] <- 0
alarmall14[is.na(alarmall14)] <- 0
alarmall15[is.na(alarmall15)] <- 0
alarmall16[is.na(alarmall16)] <- 0
alarmall17[is.na(alarmall17)] <- 0
# Compare vs data without oubreaks
for(sim in seq(nsim)){
cat("\t", sim)
det1 <- earsC(simSts1[,sim], control=control)
det3 <- earsC(simSts3[,sim], control=control)
det4 <- earsC(simSts4[,sim], control=control)
det5 <- earsC(simSts5[,sim], control=control)
det6 <- earsC(simSts6[,sim], control=control)
det7 <- earsC(simSts7[,sim], control=control)
det8 <- earsC(simSts8[,sim], control=control)
det9 <- earsC(simSts9[,sim], control=control)
det10 <- earsC(simSts10[,sim], control=control)
det11 <- earsC(simSts11[,sim], control=control)
det12 <- earsC(simSts12[,sim], control=control)
det13 <- earsC(simSts13[,sim], control=control)
det14 <- earsC(simSts14[,sim], control=control)
det15 <- earsC(simSts15[,sim], control=control)
det16 <- earsC(simSts16[,sim], control=control)
det17 <- earsC(simSts17[,sim], control=control)
dir.create(file.path(myDir, "plots", "control"),
recursive=TRUE)
png(file.path(myDir, "plots", "control",
paste0("Sim_", sim, ".png")),
width=16,height=14,units="in",res=300)
par(mfrow=c(4, 4), oma=c(0, 0, 2, 0))
plot(det1, main="Dataset 1", legend=NULL)
plot(det3, main="Dataset 3", legend=NULL)
plot(det4, main="Dataset 4", legend=NULL)
plot(det5, main="Dataset 5", legend=NULL)
plot(det6, main="Dataset 6", legend=NULL)
plot(det7, main="Dataset 7", legend=NULL)
plot(det8, main="Dataset 8", legend=NULL)
plot(det9, main="Dataset 9", legend=NULL)
plot(det10, main="Dataset 10", legend=NULL)
plot(det11, main="Dataset 11", legend=NULL)
plot(det12, main="Dataset 12", legend=NULL)
plot(det13, main="Dataset 13", legend=NULL)
plot(det14, main="Dataset 14", legend=NULL)
plot(det15, main="Dataset 15", legend=NULL)
plot(det16, main="Dataset 16", legend=NULL)
plot(det17, main="Dataset 17", legend=NULL)
title(main=list(paste("Simulation", sim, "Alpha", control$alpha ),
cex=2), outer=TRUE)
dev.off()
}
#====================================
#====================================
#Summary
#====================================
#====================================
days=7
# FPR false positive rate
fpr=rep(0,17)
fprseas=rep(0,3)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall1[nrow(alarmall1)-i+1,j]==1 & simulatedoutbreak1[nrow(simulatedoutbreak1)-i+1,j]==0)+nu
}
}
a=
fpr[1]=nu/sum(simulatedoutbreak1[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall2[nrow(alarmall2)-i+1,j]==1 & simulatedoutbreak2[nrow(simulatedoutbreak2)-i+1,j]==0)+nu
}
}
fpr[2]=nu/sum(simulatedoutbreak2[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall3[nrow(alarmall3)-i+1,j]==1 & simulatedoutbreak3[nrow(simulatedoutbreak3)-i+1,j]==0)+nu
}
}
fpr[3]=nu/sum(simulatedoutbreak3[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall4[nrow(alarmall4)-i+1,j]==1 & simulatedoutbreak4[nrow(simulatedoutbreak4)-i+1,j]==0)+nu
}
}
fpr[4]=nu/sum(simulatedoutbreak4[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall5[nrow(alarmall5)-i+1,j]==1 & simulatedoutbreak5[nrow(simulatedoutbreak5)-i+1,j]==0)+nu
}
}
fpr[5]=nu/sum(simulatedoutbreak5[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall6[nrow(alarmall6)-i+1,j]==1 & simulatedoutbreak6[nrow(simulatedoutbreak6)-i+1,j]==0)+nu
}
}
fpr[6]=nu/sum(simulatedoutbreak6[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall6[nrow(alarmall6)-i+1,j]==1 & simulatedzseasoutbreak6[nrow(simulatedzseasoutbreak6)-i+1,j]==0)+nu
}
}
fprseas[1]=nu/sum(simulatedzseasoutbreak6[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall7[nrow(alarmall7)-i+1,j]==1 & simulatedoutbreak7[nrow(simulatedoutbreak7)-i+1,j]==0)+nu
}
}
fpr[7]=nu/sum(simulatedoutbreak7[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall7[nrow(alarmall7)-i+1,j]==1 & simulatedzseasoutbreak7[nrow(simulatedzseasoutbreak7)-i+1,j]==0)+nu
}
}
fprseas[2]=nu/sum(simulatedzseasoutbreak7[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall8[nrow(alarmall8)-i+1,j]==1 & simulatedoutbreak8[nrow(simulatedoutbreak8)-i+1,j]==0)+nu
}
}
fpr[8]=nu/sum(simulatedoutbreak8[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall9[nrow(alarmall9)-i+1,j]==1 & simulatedoutbreak9[nrow(simulatedoutbreak9)-i+1,j]==0)+nu
}
}
fpr[9]=nu/sum(simulatedoutbreak9[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall10[nrow(alarmall10)-i+1,j]==1 & simulatedoutbreak10[nrow(simulatedoutbreak10)-i+1,j]==0)+nu
}
}
fpr[10]=nu/sum(simulatedoutbreak10[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall11[nrow(alarmall11)-i+1,j]==1 & simulatedoutbreak11[nrow(simulatedoutbreak11)-i+1,j]==0)+nu
}
}
fpr[11]=nu/sum(simulatedoutbreak11[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall12[nrow(alarmall12)-i+1,j]==1 & simulatedoutbreak12[nrow(simulatedoutbreak12)-i+1,j]==0)+nu
}
}
fpr[12]=nu/sum(simulatedoutbreak12[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall13[nrow(alarmall13)-i+1,j]==1 & simulatedoutbreak13[nrow(simulatedoutbreak13)-i+1,j]==0)+nu
}
}
fpr[13]=nu/sum(simulatedoutbreak13[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall14[nrow(alarmall14)-i+1,j]==1 & simulatedoutbreak14[nrow(simulatedoutbreak14)-i+1,j]==0)+nu
}
}
fpr[14]=nu/sum(simulatedoutbreak14[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall15[nrow(alarmall15)-i+1,j]==1 & simulatedoutbreak15[nrow(simulatedoutbreak15)-i+1,j]==0)+nu
}
}
fpr[15]=nu/sum(simulatedoutbreak15[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall16[nrow(alarmall16)-i+1,j]==1 & simulatedoutbreak16[nrow(simulatedoutbreak16)-i+1,j]==0)+nu
}
}
fpr[16]=nu/sum(simulatedoutbreak16[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall16[nrow(alarmall16)-i+1,j]==1 & simulatedzseasoutbreak16[nrow(simulatedzseasoutbreak16)-i+1,j]==0)+nu
}
}
fprseas[3]=nu/sum(simulatedzseasoutbreak16[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall17[nrow(alarmall17)-i+1,j]==1 & simulatedoutbreak17[nrow(simulatedoutbreak17)-i+1,j]==0)+nu
}
}
fpr[17]=nu/sum(simulatedoutbreak17[2206:2548,]==0)
#--------------------------------------------------------
# POD power of detection
pod=rep(0,17)
podseas=rep(0,3)
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall1[nrow(alarmall1)-i+1,j]==1 & simulatedoutbreak1[nrow(simulatedoutbreak1)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[1]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall2[nrow(alarmall2)-i+1,j]==1 & simulatedoutbreak2[nrow(simulatedoutbreak2)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[2]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall3[nrow(alarmall3)-i+1,j]==1 & simulatedoutbreak3[nrow(simulatedoutbreak3)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[3]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall4[nrow(alarmall4)-i+1,j]==1 & simulatedoutbreak4[nrow(simulatedoutbreak4)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[4]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall5[nrow(alarmall5)-i+1,j]==1 & simulatedoutbreak5[nrow(simulatedoutbreak5)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[5]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall6[nrow(alarmall6)-i+1,j]==1 & simulatedoutbreak6[nrow(simulatedoutbreak6)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[6]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall6[nrow(alarmall6)-i+1,j]==1 & simulatedzseasoutbreak6[nrow(simulatedzseasoutbreak6)-i+1,j]>0)
}
mu=mu+(nu>0)
}
podseas[1]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall7[nrow(alarmall7)-i+1,j]==1 & simulatedoutbreak7[nrow(simulatedoutbreak7)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[7]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall7[nrow(alarmall7)-i+1,j]==1 & simulatedzseasoutbreak7[nrow(simulatedzseasoutbreak7)-i+1,j]>0)
}
mu=mu+(nu>0)
}
podseas[2]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall8[nrow(alarmall8)-i+1,j]==1 & simulatedoutbreak8[nrow(simulatedoutbreak8)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[8]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall9[nrow(alarmall9)-i+1,j]==1 & simulatedoutbreak9[nrow(simulatedoutbreak9)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[9]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall10[nrow(alarmall10)-i+1,j]==1 & simulatedoutbreak10[nrow(simulatedoutbreak10)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[10]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall11[nrow(alarmall11)-i+1,j]==1 & simulatedoutbreak11[nrow(simulatedoutbreak11)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[11]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall12[nrow(alarmall12)-i+1,j]==1 & simulatedoutbreak12[nrow(simulatedoutbreak12)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[12]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall13[nrow(alarmall13)-i+1,j]==1 & simulatedoutbreak13[nrow(simulatedoutbreak13)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[13]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall14[nrow(alarmall14)-i+1,j]==1 & simulatedoutbreak14[nrow(simulatedoutbreak14)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[14]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall15[nrow(alarmall15)-i+1,j]==1 & simulatedoutbreak15[nrow(simulatedoutbreak15)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[15]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall16[nrow(alarmall16)-i+1,j]==1 & simulatedoutbreak16[nrow(simulatedoutbreak16)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[16]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall16[nrow(alarmall16)-i+1,j]==1 & simulatedzseasoutbreak16[nrow(simulatedzseasoutbreak16)-i+1,j]>0)
}
mu=mu+(nu>0)
}
podseas[3]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall17[nrow(alarmall17)-i+1,j]==1 & simulatedoutbreak17[nrow(simulatedoutbreak17)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[17]=mu/nsim
#--------------------------------------------------------
# Sensitivity
sensitivity=rep(0,17)
sensitivityseas=rep(0,3)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall1[nrow(alarmall1)-i+1,j]==1 & simulatedoutbreak1[nrow(simulatedoutbreak1)-i+1,j]>0)
}
}
sensitivity[1]=nu/sum(simulatedoutbreak1>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall2[nrow(alarmall2)-i+1,j]==1 & simulatedoutbreak2[nrow(simulatedoutbreak2)-i+1,j]>0)
}
}
sensitivity[2]=nu/sum(simulatedoutbreak2>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall3[nrow(alarmall3)-i+1,j]==1 & simulatedoutbreak3[nrow(simulatedoutbreak3)-i+1,j]>0)
}
}
sensitivity[3]=nu/sum(simulatedoutbreak3>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall4[nrow(alarmall4)-i+1,j]==1 & simulatedoutbreak4[nrow(simulatedoutbreak4)-i+1,j]>0)
}
}
sensitivity[4]=nu/sum(simulatedoutbreak4>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall5[nrow(alarmall5)-i+1,j]==1 & simulatedoutbreak5[nrow(simulatedoutbreak5)-i+1,j]>0)
}
}
sensitivity[5]=nu/sum(simulatedoutbreak5>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall6[nrow(alarmall6)-i+1,j]==1 & simulatedoutbreak6[nrow(simulatedoutbreak6)-i+1,j]>0)
}
}
sensitivity[6]=nu/sum(simulatedoutbreak6>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall6[nrow(alarmall6)-i+1,j]==1 & simulatedzseasoutbreak6[nrow(simulatedzseasoutbreak6)-i+1,j]>0)
}
}
sensitivityseas[1]=nu/sum(simulatedzseasoutbreak6>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall7[nrow(alarmall7)-i+1,j]==1 & simulatedoutbreak7[nrow(simulatedoutbreak7)-i+1,j]>0)
}
}
sensitivity[7]=nu/sum(simulatedoutbreak7>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall7[nrow(alarmall7)-i+1,j]==1 & simulatedzseasoutbreak7[nrow(simulatedzseasoutbreak7)-i+1,j]>0)
}
}
sensitivityseas[2]=nu/sum(simulatedzseasoutbreak7>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall8[nrow(alarmall8)-i+1,j]==1 & simulatedoutbreak8[nrow(simulatedoutbreak8)-i+1,j]>0)
}
}
sensitivity[8]=nu/sum(simulatedoutbreak8>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall9[nrow(alarmall9)-i+1,j]==1 & simulatedoutbreak9[nrow(simulatedoutbreak9)-i+1,j]>0)
}
}
sensitivity[9]=nu/sum(simulatedoutbreak9>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall10[nrow(alarmall10)-i+1,j]==1 & simulatedoutbreak10[nrow(simulatedoutbreak10)-i+1,j]>0)
}
}
sensitivity[10]=nu/sum(simulatedoutbreak10>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall11[nrow(alarmall11)-i+1,j]==1 & simulatedoutbreak11[nrow(simulatedoutbreak11)-i+1,j]>0)
}
}
sensitivity[11]=nu/sum(simulatedoutbreak11>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall12[nrow(alarmall12)-i+1,j]==1 & simulatedoutbreak12[nrow(simulatedoutbreak12)-i+1,j]>0)
}
}
sensitivity[12]=nu/sum(simulatedoutbreak12>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall13[nrow(alarmall13)-i+1,j]==1 & simulatedoutbreak13[nrow(simulatedoutbreak13)-i+1,j]>0)
}
}
sensitivity[13]=nu/sum(simulatedoutbreak13>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall14[nrow(alarmall14)-i+1,j]==1 & simulatedoutbreak14[nrow(simulatedoutbreak14)-i+1,j]>0)
}
}
sensitivity[14]=nu/sum(simulatedoutbreak14>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall15[nrow(alarmall15)-i+1,j]==1 & simulatedoutbreak15[nrow(simulatedoutbreak15)-i+1,j]>0)
}
}
sensitivity[15]=nu/sum(simulatedoutbreak15>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall16[nrow(alarmall16)-i+1,j]==1 & simulatedoutbreak16[nrow(simulatedoutbreak16)-i+1,j]>0)
}
}
sensitivity[16]=nu/sum(simulatedoutbreak16>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall16[nrow(alarmall16)-i+1,j]==1 & simulatedzseasoutbreak16[nrow(simulatedzseasoutbreak16)-i+1,j]>0)
}
}
sensitivityseas[3]=nu/sum(simulatedzseasoutbreak16>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall17[nrow(alarmall17)-i+1,j]==1 & simulatedoutbreak17[nrow(simulatedoutbreak17)-i+1,j]>0)
}
}
sensitivity[17]=nu/sum(simulatedoutbreak17>0)
#--------------------------------------------------------
# Specificity
specificity=rep(0,17)
specificityseas=rep(0,3)
# Specificity
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall1[nrow(alarmall1)-i+1,j]==0 & simulatedoutbreak1[nrow(simulatedoutbreak1)-i+1,j]==0)
}
}
specificity[1]=nu/sum(simulatedoutbreak1[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall2[nrow(alarmall2)-i+1,j]==0 & simulatedoutbreak2[nrow(simulatedoutbreak2)-i+1,j]==0)
}
}
specificity[2]=nu/sum(simulatedoutbreak2[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall3[nrow(alarmall3)-i+1,j]==0 & simulatedoutbreak3[nrow(simulatedoutbreak3)-i+1,j]==0)
}
}
specificity[3]=nu/sum(simulatedoutbreak3[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall4[nrow(alarmall4)-i+1,j]==0 & simulatedoutbreak4[nrow(simulatedoutbreak4)-i+1,j]==0)
}
}
specificity[4]=nu/sum(simulatedoutbreak4[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall5[nrow(alarmall5)-i+1,j]==0 & simulatedoutbreak5[nrow(simulatedoutbreak5)-i+1,j]==0)
}
}
specificity[5]=nu/sum(simulatedoutbreak5[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall6[nrow(alarmall6)-i+1,j]==0 & simulatedoutbreak6[nrow(simulatedoutbreak6)-i+1,j]==0)
}
}
specificity[6]=nu/sum(simulatedoutbreak6[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall6[nrow(alarmall6)-i+1,j]==0 & simulatedzseasoutbreak6[nrow(simulatedzseasoutbreak6)-i+1,j]==0)
}
}
specificityseas[1]=nu/sum(simulatedzseasoutbreak6[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall7[nrow(alarmall7)-i+1,j]==0 & simulatedoutbreak7[nrow(simulatedoutbreak7)-i+1,j]==0)
}
}
specificity[7]=nu/sum(simulatedoutbreak7[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall7[nrow(alarmall7)-i+1,j]==0 & simulatedzseasoutbreak7[nrow(simulatedzseasoutbreak7)-i+1,j]==0)
}
}
specificityseas[2]=nu/sum(simulatedzseasoutbreak7[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall8[nrow(alarmall8)-i+1,j]==0 & simulatedoutbreak8[nrow(simulatedoutbreak8)-i+1,j]==0)
}
}
specificity[8]=nu/sum(simulatedoutbreak8[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall9[nrow(alarmall9)-i+1,j]==0 & simulatedoutbreak9[nrow(simulatedoutbreak9)-i+1,j]==0)
}
}
specificity[9]=nu/sum(simulatedoutbreak9[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall10[nrow(alarmall10)-i+1,j]==0 & simulatedoutbreak10[nrow(simulatedoutbreak10)-i+1,j]==0)
}
}
specificity[10]=nu/sum(simulatedoutbreak10[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall11[nrow(alarmall11)-i+1,j]==0 & simulatedoutbreak11[nrow(simulatedoutbreak11)-i+1,j]==0)
}
}
specificity[11]=nu/sum(simulatedoutbreak11[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall12[nrow(alarmall12)-i+1,j]==0 & simulatedoutbreak12[nrow(simulatedoutbreak12)-i+1,j]==0)
}
}
specificity[12]=nu/sum(simulatedoutbreak12[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall13[nrow(alarmall13)-i+1,j]==0 & simulatedoutbreak13[nrow(simulatedoutbreak13)-i+1,j]==0)
}
}
specificity[13]=nu/sum(simulatedoutbreak13[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall14[nrow(alarmall14)-i+1,j]==0 & simulatedoutbreak14[nrow(simulatedoutbreak14)-i+1,j]==0)
}
}
specificity[14]=nu/sum(simulatedoutbreak14[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall15[nrow(alarmall15)-i+1,j]==0 & simulatedoutbreak15[nrow(simulatedoutbreak15)-i+1,j]==0)
}
}
specificity[15]=nu/sum(simulatedoutbreak15[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall16[nrow(alarmall16)-i+1,j]==0 & simulatedoutbreak16[nrow(simulatedoutbreak16)-i+1,j]==0)
}
}
specificity[16]=nu/sum(simulatedoutbreak16[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall16[nrow(alarmall16)-i+1,j]==0 & simulatedzseasoutbreak16[nrow(simulatedzseasoutbreak16)-i+1,j]==0)
}
}
specificityseas[3]=nu/sum(simulatedzseasoutbreak16[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall17[nrow(alarmall17)-i+1,j]==0 & simulatedoutbreak17[nrow(simulatedoutbreak17)-i+1,j]==0)
}
}
specificity[17]=nu/sum(simulatedoutbreak17[2206:2548,]==0)
#----------------------------------------------
# Timeliness
timeliness=rep(0,17)
timelinessseas=rep(0,3)
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak1[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak1[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall1[nrow(alarmall1)-i+1,j]==1 & simulatedoutbreak1[nrow(simulatedoutbreak1)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak1)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[1]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak2[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak2[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall2[nrow(alarmall2)-i+1,j]==1 & simulatedoutbreak2[nrow(simulatedoutbreak2)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak2)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[2]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak3[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak3[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall3[nrow(alarmall3)-i+1,j]==1 & simulatedoutbreak3[nrow(simulatedoutbreak3)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak3)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[3]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak4[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak4[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall4[nrow(alarmall4)-i+1,j]==1 & simulatedoutbreak4[nrow(simulatedoutbreak4)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak4)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[4]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak5[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak5[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall5[nrow(alarmall5)-i+1,j]==1 & simulatedoutbreak5[nrow(simulatedoutbreak5)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak5)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[5]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak6[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak6[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall6[nrow(alarmall6)-i+1,j]==1 & simulatedoutbreak6[nrow(simulatedoutbreak6)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak6)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[6]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedzseasoutbreak6[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedzseasoutbreak6[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall6[nrow(alarmall6)-i+1,j]==1 & simulatedzseasoutbreak6[nrow(simulatedzseasoutbreak6)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedzseasoutbreak6)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timelinessseas[1]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak7[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak7[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall7[nrow(alarmall7)-i+1,j]==1 & simulatedoutbreak7[nrow(simulatedoutbreak7)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak7)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[7]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedzseasoutbreak7[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedzseasoutbreak7[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall7[nrow(alarmall7)-i+1,j]==1 & simulatedzseasoutbreak7[nrow(simulatedzseasoutbreak7)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedzseasoutbreak7)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timelinessseas[2]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak8[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak8[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall8[nrow(alarmall8)-i+1,j]==1 & simulatedoutbreak8[nrow(simulatedoutbreak8)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak8)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[8]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak9[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak9[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall9[nrow(alarmall9)-i+1,j]==1 & simulatedoutbreak9[nrow(simulatedoutbreak9)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak9)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[9]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak10[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak10[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall10[nrow(alarmall10)-i+1,j]==1 & simulatedoutbreak10[nrow(simulatedoutbreak10)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak10)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[10]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak11[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak11[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall11[nrow(alarmall11)-i+1,j]==1 & simulatedoutbreak11[nrow(simulatedoutbreak11)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak11)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[11]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak12[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak12[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall12[nrow(alarmall12)-i+1,j]==1 & simulatedoutbreak12[nrow(simulatedoutbreak12)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak12)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[12]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak13[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak13[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall13[nrow(alarmall13)-i+1,j]==1 & simulatedoutbreak13[nrow(simulatedoutbreak1)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak13)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[13]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak14[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak14[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall14[nrow(alarmall14)-i+1,j]==1 & simulatedoutbreak14[nrow(simulatedoutbreak14)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak14)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[14]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak15[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak15[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall15[nrow(alarmall15)-i+1,j]==1 & simulatedoutbreak15[nrow(simulatedoutbreak15)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak15)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[15]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak16[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak16[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall16[nrow(alarmall16)-i+1,j]==1 & simulatedoutbreak16[nrow(simulatedoutbreak16)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak16)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[16]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedzseasoutbreak16[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedzseasoutbreak16[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall16[nrow(alarmall16)-i+1,j]==1 & simulatedzseasoutbreak16[nrow(simulatedzseasoutbreak16)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedzseasoutbreak16)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timelinessseas[3]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak17[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak17[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall17[nrow(alarmall17)-i+1,j]==1 & simulatedoutbreak17[nrow(simulatedoutbreak17)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak17)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[17]=(ss+n)/nsim
#==================================
# Summary=data.frame(fpr,pod,sensitivity,specificity,timeliness)
# row.names(Summary)=c("sigid1","sigid2","sigid3","sigid4","sigid5","sigid6","sigid7","sigid8","sigid9","sigid10","sigid11","sigid12","sigid13","sigid14","sigid15","sigid16","sigid17")
#
# Summaryseas=data.frame(fprseas,podseas,sensitivityseas,specificityseas,timelinessseas)
# row.names(Summaryseas)=c("sigid6","sigid7","sigid16")
#
#
# fix(Summary)
# fix(Summaryseas)
summary1=data.frame(fpr, pod, sensitivity, specificity, timeliness)
row.names(summary1)=c("sigid1", "sigid2", "sigid3", "sigid4", "sigid5",
"sigid6", "sigid7", "sigid8", "sigid9", "sigid10",
"sigid11", "sigid12", "sigid13", "sigid14", "sigid15",
"sigid16","sigid17")
summary2=data.frame(fprseas, podseas, sensitivityseas,
specificityseas, timelinessseas)
row.names(summary2)=c("sigid6", "sigid7", "sigid16")
if(!dir.exists(file.path(myDir, "output"))){
dir.create(file.path(myDir, "output"))
}
fwrite(summary1, file.path(myDir, "output", "summaryNB-18.csv"),
row.names=FALSE)
fwrite(summary2, file.path(myDir, "output", "summarySeasNB-18.csv"),
row.names=FALSE)
| /EARS/EARSNB3x.R | no_license | FelipeJColon/AlgorithmComparison | R | false | false | 86,668 | r | ## ############################################################################
##
## DISCLAIMER:
## This script has been developed for research purposes only.
## The script is provided without any warranty of any kind, either express or
## implied. The entire risk arising out of the use or performance of the sample
## script and documentation remains with you.
## In no event shall its author, or anyone else involved in the
## creation, production, or delivery of the script be liable for any damages
## whatsoever (including, without limitation, damages for loss of business
## profits, business interruption, loss of business information, or other
## pecuniary loss) arising out of the use of or inability to use the sample
## scripts or documentation, even if the author has been advised of the
## possibility of such damages.
##
## ############################################################################
##
## DESCRIPTION
## Simulates outbreaks and analyses them using EARS-Negative Binomial
##
##
## Written by: Angela Noufaily and Felipe J Colón-González
## For any problems with this code, please contact f.colon@uea.ac.uk
##
## ############################################################################
# Delete objects in environment
rm(list=ls(all=TRUE))
# Load packages
require(data.table)
require(dplyr)
require(tidyr)
require(surveillance)
require(lubridate)
require(zoo)
# FUNCTIONS THAT PRODUCE THE DATA
# DEFINING FUNCTION h
#==============
# 5-day systems
#==============
h1=function(N,k,k2,alpha,beta,gama1,gama2,gama3,gama4,shift,shift2){
t=1:N
if(k==0 & k2==0){h1=alpha+beta*t}
else{
if(k==0)
{
l=1:k2
h1=rep(0,N)
for(i in 1:N){
h1[i]=alpha+beta*(t[i]+shift)+sum(gama3*cos((2*pi*l*(t[i]+shift))/5)+gama4*sin((2*pi*l*(t[i]+shift))/5))
}
}
else{
j=1:k
l=1:k2
h1=rep(0,N)
for(i in 1:N){
h1[i]=alpha+beta*(t[i]+shift)+sum(gama1*cos((2*pi*j*(t[i]+shift))/(52*5))+gama2*sin((2*pi*j*(t[i]+shift2))/(52*5)))+sum(gama3*cos((2*pi*l*(t[i]+shift))/5)+gama4*sin((2*pi*l*(t[i]+shift))/5))
}
}
}
h1
}
negbinNoise1=function(N,k,k2,alpha,beta,gama1,gama2,gama3,gama4,phi,shift,shift2){
mu <- exp(h1(N,k,k2,alpha,beta,gama1,gama2,gama3,gama4,shift,shift2))
if(phi==1){yi <- rpois(N,mu)}
else{
prob <- 1/phi
size <- mu/(phi-1)
yi <- rnbinom(N,size=size,prob=prob)
}
yi
}
outbreak5=function(currentday,weeklength,wtime,yi,interval,k,k2,alpha,beta,gama1,gama2,gama3,gama4,shift,shift2,phi,numoutbk,peakoutbk,meanlog,sdlog){
# theta, beta, gama1 and gama2 are the parameters of the equation for mu in Section 3.1
N=length(yi)
t=1:N
mu <- exp(h1(N,k,k2,alpha,beta,gama1,gama2,gama3,gama4,shift,shift2))
s=sqrt(mu*phi)
#wtime = (currentday-49*5+1):currentday # current outbreaks
# GENERATING OUTBREAKS
# STARTING TIMES OF OUTBREAKS
startoutbk <- sample(wtime, numoutbk, replace = FALSE)
# OUTBREAK SIZE OF CASES
sizeoutbk=rep(0,numoutbk)
for(i in 1:numoutbk){
set.seed(i)
soutbk=1
sou=1
while(soutbk<2){
set.seed(sou)
soutbk=rpois(1,s[startoutbk[i]]*peakoutbk)
sou=sou+1
}
sizeoutbk[i]=soutbk
}
# DISTRIBUTE THESE CASES OVER TIME USING LOGNORMAL
outbreak=rep(0,2*N)
for( j in 1:numoutbk){
set.seed(j)
outbk <-rlnorm(sizeoutbk[j], meanlog = meanlog, sdlog = sdlog)
#outbk <-rnorm(sizeoutbk[j], mean = meanlog2, sd = sdlog)
#h<- hist(outbk,breaks=seq(0,ceiling(max(outbk)),1),plot=FALSE)
h<- hist(outbk,breaks=seq(0,ceiling(max(outbk)),interval),plot=FALSE)
cases <- h$counts
weight=rep(0,length(cases))
duration<-startoutbk:(startoutbk+length(cases)-1)
dayofweek<-duration%%5 # 0 is friday; 1 is monday; 2 is tuesday etc.
for(i in 1:length(cases)){
if(dayofweek[i]==0){weight[i]=1.1}
if(dayofweek[i]==1){weight[i]=1.5}
if(dayofweek[i]==2){weight[i]=1.1}
if(dayofweek[i]==3){weight[i]=1}
if(dayofweek[i]==4){weight[i]=1}
}
cases2 <- cases*weight
for (l in 1:(length(cases2))){
outbreak[startoutbk[j]+(l-1)]= cases2[l]+outbreak[startoutbk[j]+(l-1)]
}# l loop
}# j loop
#for(v in 1:(currentday-49*5)){if(outbreak[v]>0){outbreak[v]=0}}
for(v in currentday:(currentday+100)){if(outbreak[v]>0){outbreak[v]=0}}
outbreak=outbreak[1:N]
# ADD NOISE AND OUTBREAKS
yitot=yi+outbreak
result=list(yitot=yitot,outbreak=outbreak,startoutbk=startoutbk,sizeoutbk=sizeoutbk,sd=s,mean=mu)
#return(result)
}
#==============
# 7-day systems
#==============
h2=function(N,k,k2,alpha,beta,gama1,gama2,gama3,gama4,shift){
t=1:N
if(k==0 & k2==0){h2=alpha+beta*t}
else{
if(k==0)
{
l=1:k2
h2=rep(0,N)
for(i in 1:N){
h2[i]=alpha+beta*(t[i]+shift)+sum(gama3*cos((2*pi*l*(t[i]+shift))/7)+gama4*sin((2*pi*l*(t[i]+shift))/7))
}
}
else{
j=1:k
l=1:k2
h2=rep(0,N)
for(i in 1:N){
h2[i]=alpha+beta*(t[i]+shift)+sum(gama1*cos((2*pi*j*(t[i]+shift))/(52*7))+gama2*sin((2*pi*j*(t[i]+shift))/(52*7)))+sum(gama3*cos((2*pi*l*(t[i]+shift))/7)+gama4*sin((2*pi*l*(t[i]+shift))/7))
}
}
}
h2
}
negbinNoise2=function(N,k,k2,alpha,beta,gama1,gama2,gama3,gama4,phi,shift){
mu <- exp(h2(N,k,k2,alpha,beta,gama1,gama2,gama3,gama4,shift))
if(phi==1){yi <- rpois(N,mu)}
else{
prob <- 1/phi
size <- mu/(phi-1)
yi <- rnbinom(N,size=size,prob=prob)
}
yi
}
outbreak7=function(currentday,weeklength,wtime,yi,interval,k,k2,alpha,beta,gama1,gama2,gama3,gama4,shift,phi,numoutbk,peakoutbk,meanlog,sdlog){
# theta, beta, gama1 and gama2 are the parameters of the equation for mu in Section 3.1
N=length(yi)
t=1:N
mu <- exp(h2(N,k,k2,alpha,beta,gama1,gama2,gama3,gama4,shift))
s=sqrt(mu*phi)
#wtime = (currentday-49*7+1):currentday # current outbreaks
# wtime = 350*1:7 # current outbreaks
# GENERATING OUTBREAKS
# STARTING TIMES OF OUTBREAKS
startoutbk <- sample(wtime, numoutbk, replace = FALSE)
# OUTBREAK SIZE OF CASES
sizeoutbk=rep(0,numoutbk)
for(i in 1:numoutbk){
set.seed(i)
soutbk=1
sou=1
while(soutbk<2){
set.seed(sou)
soutbk=rpois(1,s[startoutbk[i]]*peakoutbk)
sou=sou+1
}
sizeoutbk[i]=soutbk
}
# DISTRIBUTE THESE CASES OVER TIME USING LOGNORMAL
outbreak=rep(0,2*N)
for( j in 1:numoutbk){
set.seed(j)
outbk <-rlnorm(sizeoutbk[j], meanlog = meanlog, sdlog = sdlog)
#outbk <-rnorm(sizeoutbk[j], mean = meanlog2, sd = sdlog)
#h<- hist(outbk,breaks=seq(0,ceiling(max(outbk)),1),plot=FALSE)
h<- hist(outbk,breaks=seq(0,ceiling(max(outbk)),interval),plot=FALSE)
cases <- h$counts
weight=rep(0,length(cases))
duration<-startoutbk:(startoutbk+length(cases)-1)
dayofweek<-duration%%7 # 0 is sunday; 1 is monday; 2 is tuesday etc.
for(i in 1:length(cases)){
if(dayofweek[i]==0){weight[i]=2}
if(dayofweek[i]==1){weight[i]=1}
if(dayofweek[i]==2){weight[i]=1}
if(dayofweek[i]==3){weight[i]=1}
if(dayofweek[i]==4){weight[i]=1}
if(dayofweek[i]==5){weight[i]=1}
if(dayofweek[i]==6){weight[i]=2}
}
cases2 <- cases*weight
for (l in 1:(length(cases2))){
outbreak[startoutbk[j]+(l-1)]= cases2[l]+outbreak[startoutbk[j]+(l-1)]
}# l loop
}# j loop
#for(v in (currentday-49*7):currentday){if(outbreak[v]>0){outbreak[v]=0}}
for(v in currentday:(currentday+100)){if(outbreak[v]>0){outbreak[v]=0}}
outbreak=outbreak[1:N]
# ADD NOISE AND OUTBREAKS
yitot=yi+outbreak
result=list(yitot=yitot,outbreak=outbreak,startoutbk=startoutbk,sizeoutbk=sizeoutbk,sd=s,mean=mu)
#return(result)
}
#==========================
# Specify the bank holidays
#==========================
myDir <- "/local/zck07apu/Documents/GitLab/rammie_comparison/scripts/NB/3x"
years=7
bankholidays=read.csv(file.path(myDir, "Bankholidays.csv"))
#fix(bankholidays)
bankhols7=bankholidays$bankhol
bankhols7=as.numeric(bankhols7)
length(bankhols7)
#fix(bankhols7)
bankhols5=bankhols7[-seq(6,length(bankhols7),7)]
bankhols5=bankhols5[-seq(6,length(bankhols5),6)]
bankhols5=as.numeric(bankhols5)
length(bankhols5)
#fix(bankhols5)
#=======================
# Define the data frames
#=======================
nsim=100
simulateddata1=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulateddata2=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulateddata3=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulateddata4=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulateddata5=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulateddata6=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulateddata7=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulateddata8=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulateddata9=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulateddata10=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulateddata11=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulateddata12=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulateddata13=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulateddata14=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulateddata15=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulateddata16=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulateddata17=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals1=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals2=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals3=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals4=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals5=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals6=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals7=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals8=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals9=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals10=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals11=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals12=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals13=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals14=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals15=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals16=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals17=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak1=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak2=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak3=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak4=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak5=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak6=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak7=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak8=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak9=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak10=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak11=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak12=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak13=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak14=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak15=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak16=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak17=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedzseasoutbreak6=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedzseasoutbreak7=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedzseasoutbreak16=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
#################################
#SIMULATE SYNDROMES AND OUTBREAKS
#################################
#=====================
# 5-day week syndromes
#=====================
days5=5
N=52*days5*years
#sigid6
for(i in 1:nsim){
set.seed(i)
yt=negbinNoise1(N=N,k=1,k2=1,alpha=6,beta=0,gama1=0.3,gama2=2,gama3=0.3,gama4=0.5,phi=1.5,shift=-50,shift2=-50)/10
#mu=exp(h1(N=N,k=1,k2=1,alpha=6,beta=0,gama1=0.3,gama2=2,gama3=0.3,gama4=0.5,shift=-50,shift2=-50))
out1=rep(0,N)
for(j in 1:years){
set.seed(j+years*i)
out=outbreak5(currentday=days5*52*years,weeklength=52*days5*years,wtime=((1+(j-1)*days5*52):(20+(j-1)*days5*52)),yi=yt,interval=0.02,k=1,
k2=1,alpha=6,beta=0,gama1=0.3,gama2=2,gama3=0.3,gama4=0.5,phi=1.5,shift=-50,shift2=-50,numoutbk=1,peakoutbk=3*days5*80,meanlog=0,sdlog=0.5)
out1=out1+out$outbreak
}
set.seed(i)
out2=outbreak5(currentday=days5*52*years,weeklength=52*days5*years,wtime=(length(yt)-49*days5+1):length(yt),yi=yt,interval=0.25,k=1,k2=1,alpha=6,beta=0,gama1=0.3,
gama2=2,gama3=0.3,gama4=0.5,phi=1.5,shift=-50,shift2=-50,numoutbk=1,peakoutbk=3*days5,meanlog=0,sdlog=0.5)
zoutbreak=out2$outbreak
zseasoutbreak=out2$outbreak +out1
zt=yt +out1
zitot=yt + out2$outbreak +out1
#zitot[(bankhols5==1)]=0
#zitot[(bankhols5==1)+1]=1.5*zitot[i+1]
for(b in 1:length(zitot)){
if(bankhols5[b]==1){
zitot[b]=0
zitot[b+1]=1.5*zitot[b+1]
}
}
zeros=rep(0,2)
weekend=seq(days5,days5*years*52,days5)
#weekend=seq(0,days5*years*52-1,days5)
for(s in 1:length(weekend)){
yt=append(yt,zeros,after=2*(s-1)+weekend[s])
zt=append(zt,zeros,after=2*(s-1)+weekend[s])
zitot=append(zitot,zeros,after=2*(s-1)+weekend[s])
zoutbreak=append(zoutbreak,zeros,after=2*(s-1)+weekend[s])
zseasoutbreak=append(zseasoutbreak,zeros,after=2*(s-1)+weekend[s])
}
simulateddata6[,i]=round(zt)
simulatedtotals6[,i]=round(zitot)
simulatedoutbreak6[,i]=round(zoutbreak)
simulatedzseasoutbreak6[,i]=round(zseasoutbreak)
}
#----------------------------------------------------
# Plot the datasets and outbreaks using the following
#----------------------------------------------------
#plot(1:N,yt,typ='l')
#plot(1:(52*years*7),zt,typ='l',xlim=c(1,364))
#plot(1:(52*years*7),zitot,typ='l',xlim=c(1,364))
#plot(1:(52*years*7),zitot,typ='l',xlim=c(365,728))
#plot(1:(52*years*7),zitot,typ='l',xlim=c(729,1092))
#plot(1:(52*years*7),zitot,typ='l',xlim=c(1093,1456))
#plot(1:(52*years*7),zitot,typ='l',xlim=c(1457,1820))
#plot(1:(52*years*7),zitot,typ='l',xlim=c(1821,2184))
#plot(1:(52*years*7),zitot,typ='l',xlim=c(2185,2548))
#lines(1:(52*years*7),zoutbreak,col='green')
plot(1:(52*years*7),simulatedtotals6[,4],typ='l',xlim=c(1,7*364))
lines(1:(52*years*7),simulatedzseasoutbreak6[,4],col='green')
lines(1:(52*years*7),simulatedoutbreak6[,4],col='red')
plot(1:(52*years*7),zitot,typ='l',xlim=c(1,7*364))
lines(1:(52*years*7),yt,col='blue')
lines(1:(52*years*7),zseasoutbreak,col='green')
lines(1:(52*years*7),zoutbreak,col='red')
plot(1:(52*years*7),simulatedzseasoutbreak6[,4],col='green',typ='l')
#sigid7
for(i in 1:nsim){
set.seed(i)
yt=negbinNoise1(N=N,k=1,k2=1,alpha=1,beta=0,gama1=0.1,gama2=2,gama3=0.05,gama4=0.05,phi=1,shift=-50,shift2=-50)
#mu=exp(h1(N=N,k=1,k2=1,alpha=1.5,beta=0,gama1=0.1,gama2=2,gama3=0.1,gama4=0.1,shift=-50,shift2=-50))
out1=rep(0,N)
for(j in 1:years){
set.seed(j+years*i)
out=outbreak5(currentday=days5*52*years,weeklength=52*days5*years,wtime=((1+(j-1)*days5*52):(20+(j-1)*days5*52)),yi=yt,interval=0.02,k=1,k2=1,alpha=1,beta=0,gama1=0.1,
gama2=2,gama3=0.05,gama4=0.05,phi=1,shift=-50,shift2=-50,numoutbk=1,peakoutbk=3*days5*50,meanlog=0,sdlog=0.5)
out1=out1+out$outbreak
}
set.seed(i)
out2=outbreak5(currentday=days5*52*years,weeklength=52*days5*years,wtime=(length(yt)-49*days5+1):length(yt),yi=yt,interval=0.25,k=1,k2=1,alpha=1,beta=0,gama1=0.1,
gama2=2,gama3=0.05,gama4=0.05,phi=1,shift=-50,shift2=-50,numoutbk=1,peakoutbk=3*days5,meanlog=0,sdlog=0.5)
zoutbreak=out2$outbreak
zseasoutbreak=out2$outbreak+out1
zt=yt +out1
zitot=yt + out2$outbreak +out1
for(b in 1:length(zitot)){
if(bankhols5[b]==1){
zitot[b]=0
zitot[b+1]=1.5*zitot[b+1]
}
}
zeros=rep(0,2)
weekend=seq(days5,days5*years*52,days5)
#weekend=seq(0,days5*years*52-1,days5)
for(s in 1:length(weekend)){
yt=append(yt,zeros,after=2*(s-1)+weekend[s])
zt=append(zt,zeros,after=2*(s-1)+weekend[s])
zitot=append(zitot,zeros,after=2*(s-1)+weekend[s])
zoutbreak=append(zoutbreak,zeros,after=2*(s-1)+weekend[s])
zseasoutbreak=append(zseasoutbreak,zeros,after=2*(s-1)+weekend[s])
}
simulateddata7[,i]=round(zt)
simulatedtotals7[,i]=round(zitot)
simulatedoutbreak7[,i]=round(zoutbreak)
simulatedzseasoutbreak7[,i]=round(zseasoutbreak)
}
plot(1:(52*years*7),simulatedtotals7[,7],typ='l',xlim=c(1,7*364))
lines(1:(52*years*7),simulatedzseasoutbreak7[,7],col='green')
lines(1:(52*years*7),simulatedoutbreak7[,7],col='red')
plot(1:(52*years*7),zitot,typ='l',xlim=c(1,7*364))
lines(1:(52*years*7),yt,col='blue')
lines(1:(52*years*7),zseasoutbreak,col='green')
lines(1:(52*years*7),zoutbreak,col='red')
plot(1:(52*years*7),simulatedzseasoutbreak7[,4],col='green',typ='l')
#sigid8
for(i in 1:nsim){
set.seed(i)
yt=negbinNoise1(N=N,k=0,k2=1,alpha=6,beta=0.0001,gama1=0,gama2=0,gama3=0.6,gama4=0.9,phi=1.5,shift=0,shift2=0)/10
#mu=exp(h1(N=N,k=0,k2=1,alpha=6,beta=0,gama1=0,gama2=0,gama3=0.6,gama4=0.9,shift=0,shift2=0))
set.seed(i)
out2=outbreak5(currentday=days5*52*years,weeklength=52*days5*years,wtime=(length(yt)-49*days5+1):length(yt),yi=yt,interval=0.25,k=0,k2=1,alpha=6,beta=0,gama1=0,
gama2=0,gama3=0.6,gama4=0.9,phi=1.5,shift=0,shift2=0,numoutbk=1,peakoutbk=3*days5,meanlog=0,sdlog=0.5)
zoutbreak=out2$outbreak
zt=yt#+out1
zitot=yt + out2$outbreak #+out1
#zitot[(bankhols5==1)]=0
#zitot[(bankhols5==1)+1]=1.5*zitot[i+1]
for(b in 1:length(zitot)){
if(bankhols5[b]==1){
zitot[b]=0
zitot[b+1]=1.5*zitot[b+1]
}
}
zeros=rep(0,2)
weekend=seq(days5,days5*years*52,days5)
#weekend=seq(0,days5*years*52-1,days5)
for(s in 1:length(weekend)){
zt=append(zt,zeros,after=2*(s-1)+weekend[s])
zitot=append(zitot,zeros,after=2*(s-1)+weekend[s])
zoutbreak=append(zoutbreak,zeros,after=2*(s-1)+weekend[s])
}
simulateddata8[,i]=round(zt)
simulatedtotals8[,i]=round(zitot)
simulatedoutbreak8[,i]=round(zoutbreak)
}
plot(1:(52*years*7),simulateddata8[,1],typ='l',xlim=c(2185,2548),col='green')
lines(1:(52*years*7),simulateddata8[,1]+simulatedoutbreak8[,1])
#sigid9
for(i in 1:nsim){
set.seed(i)
yt=negbinNoise1(N=N,k=1,k2=1,alpha=3,beta=0,gama1=1.5,gama2=0.1,gama3=0.2,gama4=0.3,phi=1,shift=-150,shift2=-150)
mu=exp(h1(N=N,k=1,k2=1,alpha=3,beta=0,gama1=1.5,gama2=0.1,gama3=0.6,gama4=0.8,shift=-150,shift2=-150))
set.seed(i)
out2=outbreak5(currentday=days5*52*years,weeklength=52*days5*years,wtime=(length(yt)-49*days5+1):length(yt),interval=0.25,yi=yt,k=1,k2=1,alpha=3,beta=0,gama1=1.5,
gama2=0.1,gama3=0.2,gama4=0.3,phi=1,shift=-150,shift2=-150,numoutbk=1,peakoutbk=3*days5,meanlog=0,sdlog=0.5)
zoutbreak=out2$outbreak
zt=yt#+out1
zitot=yt + out2$outbreak #+out1
#zitot[(bankhols5==1)]=0
#zitot[(bankhols5==1)+1]=1.5*zitot[i+1]
for(b in 1:length(zitot)){
if(bankhols5[b]==1){
zitot[b]=0
zitot[b+1]=1.5*zitot[b+1]
}
}
zeros=rep(0,2)
weekend=seq(days5,days5*years*52,days5)
#weekend=seq(0,days5*years*52-1,days5)
for(s in 1:length(weekend)){
zt=append(zt,zeros,after=2*(s-1)+weekend[s])
zitot=append(zitot,zeros,after=2*(s-1)+weekend[s])
zoutbreak=append(zoutbreak,zeros,after=2*(s-1)+weekend[s])
}
simulateddata9[,i]=round(zt)
simulatedtotals9[,i]=round(zitot)
simulatedoutbreak9[,i]=round(zoutbreak)
}
plot(1:(52*years*7),simulateddata9[,1],typ='l',xlim=c(2185,2548),col='green')
lines(1:(52*years*7),simulateddata9[,1]+simulatedoutbreak9[,1])
#sigid10
for(i in 1:nsim){
set.seed(i)
yt=negbinNoise1(N=N,k=1,k2=1,alpha=3,beta=0,gama1=0.2,gama2=0.1,gama3=0.05,gama4=0.15,phi=1,shift=-200,shift2=-200)
#mu=exp(h1(N=N,k=1,k2=1,alpha=3,beta=0,gama1=0.2,gama2=0.1,gama3=0.05,gama4=0.15,shift=-200,shift2=-200))
set.seed(i)
out2=outbreak5(currentday=days5*52*years,weeklength=52*days5*years,wtime=(length(yt)-49*days5+1):length(yt),yi=yt,interval=0.25,k=1,k2=1,alpha=3,beta=0,gama1=0.2,
gama2=0.1,gama3=0.05,gama4=0.15,phi=1,shift=-200,shift2=-200,numoutbk=1,peakoutbk=3*days5,meanlog=0,sdlog=0.5)
zoutbreak=out2$outbreak
zt=yt#+out1
zitot=yt + out2$outbreak #+out1
#zitot[(bankhols5==1)]=0
#zitot[(bankhols5==1)+1]=1.5*zitot[i+1]
for(b in 1:length(zitot)){
if(bankhols5[b]==1){
zitot[b]=0
zitot[b+1]=1.5*zitot[b+1]
}
}
zeros=rep(0,2)
weekend=seq(days5,days5*years*52,days5)
#weekend=seq(0,days5*years*52-1,days5)
for(s in 1:length(weekend)){
zt=append(zt,zeros,after=2*(s-1)+weekend[s])
zitot=append(zitot,zeros,after=2*(s-1)+weekend[s])
zoutbreak=append(zoutbreak,zeros,after=2*(s-1)+weekend[s])
}
simulateddata10[,i]=round(zt)
simulatedtotals10[,i]=round(zitot)
simulatedoutbreak10[,i]=round(zoutbreak)
}
#sigid11
for(i in 1:nsim){
set.seed(i)
yt=negbinNoise1(N=N,k=1,k2=1,alpha=5,beta=0,gama1=0.2,gama2=0.1,gama3=0.05,gama4=0.1,phi=1,shift=0,shift2=0)
mu=exp(h1(N=N,k=1,k2=1,alpha=5,beta=0,gama1=0.2,gama2=0.1,gama3=0.05,gama4=0.1,shift=0,shift2=0))
set.seed(i)
out2=outbreak5(currentday=days5*52*years,weeklength=52*days5*years,wtime=(length(yt)-49*days5+1):length(yt),interval=0.25,yi=yt,k=1,k2=1,alpha=5,beta=0,gama1=0.2,
gama2=0.1,gama3=0.05,gama4=0.1,phi=1,shift=0,shift2=0,numoutbk=1,peakoutbk=3*days5,meanlog=0,sdlog=0.5)
zoutbreak=out2$outbreak
zt=yt#+out1
zitot=yt + out2$outbreak #+out1
#zitot[(bankhols5==1)]=0
#zitot[(bankhols5==1)+1]=1.5*zitot[i+1]
for(b in 1:length(zitot)){
if(bankhols5[b]==1){
zitot[b]=0
zitot[b+1]=1.5*zitot[b+1]
}
}
zeros=rep(0,2)
weekend=seq(days5,days5*years*52,days5)
#weekend=seq(0,days5*years*52-1,days5)
for(s in 1:length(weekend)){
zt=append(zt,zeros,after=2*(s-1)+weekend[s])
zitot=append(zitot,zeros,after=2*(s-1)+weekend[s])
zoutbreak=append(zoutbreak,zeros,after=2*(s-1)+weekend[s])
}
simulateddata11[,i]=round(zt)
simulatedtotals11[,i]=round(zitot)
simulatedoutbreak11[,i]=round(zoutbreak)
}
#sigid12
for(i in 1:nsim){
set.seed(i)
yt=negbinNoise1(N=N,k=2,k2=1,alpha=0.5,beta=0,gama1=0.4,gama2=0,gama3=0.05,gama4=0.15,phi=1,shift=0,shift2=0)
#mu=exp(h1(N=N,k=2,k2=1,alpha=0.5,beta=0,gama1=0.4,gama2=0,gama3=0.05,gama4=0.15,shift=0,shift2=0))
set.seed(i)
out2=outbreak5(currentday=days5*52*years,weeklength=52*days5*years,wtime=(length(yt)-49*days5+1):length(yt),yi=yt,interval=0.25,k=2,k2=1,alpha=0.5,beta=0,gama1=0.4,
gama2=0,gama3=0.05,gama4=0.15,phi=1,shift=0,shift2=0,numoutbk=1,peakoutbk=3*days5,meanlog=0,sdlog=0.5)
zoutbreak=out2$outbreak
zt=yt#+out1
zitot=yt + out2$outbreak #+out1
#zitot[(bankhols5==1)]=0
#zitot[(bankhols5==1)+1]=1.5*zitot[i+1]
for(b in 1:length(zitot)){
if(bankhols5[b]==1){
zitot[b]=0
zitot[b+1]=1.5*zitot[b+1]
}
}
zeros=rep(0,2)
weekend=seq(days5,days5*years*52,days5)
#weekend=seq(0,days5*years*52-1,days5)
for(s in 1:length(weekend)){
zt=append(zt,zeros,after=2*(s-1)+weekend[s])
zitot=append(zitot,zeros,after=2*(s-1)+weekend[s])
zoutbreak=append(zoutbreak,zeros,after=2*(s-1)+weekend[s])
}
simulateddata12[,i]=round(zt)
simulatedtotals12[,i]=round(zitot)
simulatedoutbreak12[,i]=round(zoutbreak)
}
#sigid13
for(i in 1:nsim){
set.seed(i)
yt=negbinNoise1(N=N,k=1,k2=1,alpha=9,beta=0,gama1=0.5,gama2=0.2,gama3=0.2,gama4=0.5,phi=1,shift=0,shift2=0)/100
#mu=exp(h1(N=N,k=1,k2=1,alpha=9,beta=0,gama1=0.5,gama2=0.2,gama3=0.2,gama4=0.5,shift=0,shift2=0))
set.seed(i)
out2=outbreak5(currentday=days5*52*years,weeklength=52*days5*years,wtime=(length(yt)-49*days5+1):length(yt),yi=yt,interval=0.25,k=1,k2=1,alpha=9,beta=0,gama1=0.5,
gama2=0.2,gama3=0.2,gama4=0.5,phi=1,shift=0,shift2=0,numoutbk=1,peakoutbk=3*days5,meanlog=0,sdlog=0.5)
zoutbreak=out2$outbreak
zt=yt#+out1
zitot=yt + out2$outbreak #+out1
#zitot[(bankhols5==1)]=0
#zitot[(bankhols5==1)+1]=1.5*zitot[i+1]
for(b in 1:length(zitot)){
if(bankhols5[b]==1){
zitot[b]=0
zitot[b+1]=1.5*zitot[b+1]
}
}
zeros=rep(0,2)
weekend=seq(days5,days5*years*52,days5)
#weekend=seq(0,days5*years*52-1,days5)
for(s in 1:length(weekend)){
zt=append(zt,zeros,after=2*(s-1)+weekend[s])
zitot=append(zitot,zeros,after=2*(s-1)+weekend[s])
zoutbreak=append(zoutbreak,zeros,after=2*(s-1)+weekend[s])
}
simulateddata13[,i]=round(zt)
simulatedtotals13[,i]=round(zitot)
simulatedoutbreak13[,i]=round(zoutbreak)
}
plot(1:length(simulatedtotals13[,1]),simulatedtotals13[,1],typ='l')
plot(1:N,simulatedtotals13[,1],typ='l',xlim=c(2206,2548),col='green')
lines(1:N,simulateddata13[,1],typ='l')
#=====================
# 7-day week syndromes
#=====================
years=7
days7=7
N=52*days7*years
#sigid1
for(i in 1:nsim){
set.seed(i)
yt=negbinNoise2(N=N,k=1,k2=2,alpha=6,beta=0,gama1=0.2,gama2=0.2,gama3=0.5,gama4=0.4,phi=2,shift=29)
#mu=exp(h2(N=N,k=1,k2=2,alpha=6,beta=0,gama1=0.2,gama2=0.2,gama3=0.5,gama4=0.4,shift=29))
set.seed(i)
out2=outbreak7(currentday=N,weeklength=52*days7*years,wtime=(length(yt)-49*days7+1):length(yt),yi=yt,interval=0.25,k=1,k2=2,alpha=6,beta=0,gama1=0.2,gama2=0.2,
gama3=0.5,gama4=0.4,phi=2,shift=29,numoutbk=1,peakoutbk=3*days7,meanlog=0,sdlog=0.5)
zoutbreak=out2$outbreak
zt=yt#+out1
zitot=yt + out2$outbreak #+out1
for(b in 1:length(zitot)){
if(bankhols7[b]==1){
zitot[b]=2*zitot[b]
}
}
simulateddata1[,i]=round(zt)
simulatedtotals1[,i]=round(zitot)
simulatedoutbreak1[,i]=round(zoutbreak)
}
#sigid3
for(i in 1:nsim){
set.seed(i)
yt=negbinNoise2(N=N,k=1,k2=2,alpha=0.5,beta=0,gama1=1.5,gama2=1.4,gama3=0.5,gama4=0.4,phi=1,shift=-167)
#mu=exp(h2(N=N,k=1,k2=2,alpha=0.5,beta=0,gama1=1.5,gama2=1.4,gama3=0.5,gama4=0.4,shift=-167))
set.seed(i)
out2=outbreak7(currentday=N,weeklength=52*days7*years,wtime=(length(yt)-49*7+1):length(yt),yi=yt,interval=0.25,k=1,k2=2,alpha=0.5,beta=0,gama1=1.5,
gama2=1.4,gama3=0.5,gama4=0.4,phi=1,shift=-167,numoutbk=1,peakoutbk=3*days7,meanlog=0,sdlog=0.5)
zoutbreak=out2$outbreak
zt=yt#+out1
zitot=yt + out2$outbreak #+out1
for(b in 1:length(zitot)){
if(bankhols7[b]==1){
zitot[b]=2*zitot[b]
}
}
simulateddata3[,i]=round(zt)
simulatedtotals3[,i]=round(zitot)
simulatedoutbreak3[,i]=round(zoutbreak)
}
#sigid4
for(i in 1:nsim){
set.seed(i)
yt=negbinNoise2(N=N,k=0,k2=2,alpha=5.5,beta=0,gama1=0,gama2=0,gama3=0.3,gama4=0.25,phi=1,shift=1)
#mu=exp(h2(N=N,k=0,k2=2,alpha=5.5,beta=0,gama1=0,gama2=0,gama3=0.3,gama4=0.25,shift=1))
set.seed(i)
out2=outbreak7(currentday=N,weeklength=52*days7*12,wtime=(length(yt)-49*7+1):length(yt),yi=yt,interval=0.25,k=0,k2=2,alpha=5.5,beta=0,gama1=0,
gama2=0,gama3=0.3,gama4=0.25,phi=1,shift=1,numoutbk=1,peakoutbk=3*days7,meanlog=0,sdlog=0.5)
zoutbreak=out2$outbreak
zt=yt#+out1
zitot=yt + out2$outbreak #+out1
for(b in 1:length(zitot)){
if(bankhols7[b]==1){
zitot[b]=2*zitot[b]
}
}
simulateddata4[,i]=round(zt)
simulatedtotals4[,i]=round(zitot)
simulatedoutbreak4[,i]=round(zoutbreak)
}
#sigid5
for(i in 1:nsim){
set.seed(i)
yt=negbinNoise2(N=N,k=0,k2=2,alpha=2,beta=0,gama1=0,gama2=0,gama3=0.3,gama4=0.25,phi=1,shift=1)
#mu=exp(h2(N=N,k=0,k2=2,alpha=2,beta=0,gama1=0,gama2=0,gama3=0.3,gama4=0.25,shift=1))
set.seed(i)
out2=outbreak7(currentday=N,weeklength=52*days7*years,wtime=(length(yt)-49*days7+1):length(yt),yi=yt,interval=0.25,k=0,k2=2,alpha=2,beta=0,gama1=0,
gama2=0,gama3=0.3,gama4=0.25,phi=1,shift=1,numoutbk=1,peakoutbk=3*days7,meanlog=0,sdlog=0.5)
zoutbreak=out2$outbreak
zt=yt#+out1
zitot=yt + out2$outbreak #+out1
for(b in 1:length(zitot)){
if(bankhols7[b]==1){
zitot[b]=2*zitot[b]
}
}
simulateddata5[,i]=round(zt)
simulatedtotals5[,i]=round(zitot)
simulatedoutbreak5[,i]=round(zoutbreak)
}
#sigid14
for(i in 1:nsim){
set.seed(i)
yt=negbinNoise2(N=N,k=1,k2=2,alpha=2,beta=0.0005,gama1=0.8,gama2=0.8,gama3=0.8,gama4=0.4,phi=4,shift=57)
#mu=exp(h2(N=N,k=1,k2=2,alpha=2,beta=0,gama1=0.8,gama2=0.8,gama3=0.8,gama4=0.4,shift=57))
set.seed(i)
out2=outbreak7(currentday=N,weeklength=52*days7*years,wtime=(length(yt)-49*days7+1):length(yt),yi=yt,interval=0.25,k=1,k2=2,alpha=6,beta=0,gama1=0.2,gama2=0.2,
gama3=0.5,gama4=0.4,phi=2,shift=29,numoutbk=1,peakoutbk=3*days7,meanlog=0,sdlog=0.5)
zoutbreak=out2$outbreak
zt=yt#+out1
zitot=yt + out2$outbreak #+out1
for(b in 1:length(zitot)){
if(bankhols7[b]==1){
zitot[b]=2*zitot[b]
}
}
simulateddata14[,i]=round(zt)
simulatedtotals14[,i]=round(zitot)
simulatedoutbreak14[,i]=round(zoutbreak)
}
#sigid15
for(i in 1:nsim){
set.seed(i)
#yt=0.1*(negbinNoise2(N=N,k=4,k2=1,alpha=1.5,beta=0,gama1=0.1,gama2=0.1,gama3=1.8,gama4=0.1,phi=1,shift=-85)+2)
yt=1*(negbinNoise2(N=N,k=4,k2=1,alpha=0.05,beta=0,gama1=0.01,gama2=0.01,gama3=1.8,gama4=0.1,phi=1,shift=-85)+0)
set.seed(i)
out2=outbreak7(currentday=N,weeklength=52*days7*years,wtime=(length(yt)-49*days7+1):length(yt),yi=yt,interval=0.25,k=1,k2=2,alpha=2,beta=0,gama1=0.8,
gama2=0.8,gama3=0.8,gama4=0.4,phi=4,shift=57,numoutbk=1,peakoutbk=3*days7,meanlog=0,sdlog=0.5)
zoutbreak=out2$outbreak
zt=yt#+out1
zitot=yt + out2$outbreak #+out1
for(b in 1:length(zitot)){
if(bankhols7[b]==1){
zitot[b]=2*zitot[b]
}
}
simulateddata15[,i]=round(zt)
simulatedtotals15[,i]=round(zitot)
simulatedoutbreak15[,i]=round(zoutbreak)
}
#plot(1:N,yt,typ='l')
#plot(1:(52*years*7),zt,typ='l',xlim=c(1,364))
#plot(1:(52*years*7),zitot,typ='l',xlim=c(1,364))
#plot(1:(52*years*7),zitot,typ='l',xlim=c(365,728))
#plot(1:(52*years*7),zitot,typ='l',xlim=c(729,1092))
#plot(1:(52*years*7),zitot,typ='l',xlim=c(1093,1456))
#plot(1:(52*years*7),zitot,typ='l',xlim=c(1457,1820))
#plot(1:(52*years*7),zitot,typ='l',xlim=c(1821,2184))
#plot(1:(52*years*7),zitot,typ='l',xlim=c(2185,2548))
#lines(1:(52*years*7),zoutbreak,col='green')
plot(1:(52*years*7),simulatedtotals6[,4],typ='l',xlim=c(1,7*364))
lines(1:(52*years*7),simulatedzseasoutbreak6[,4],col='green')
lines(1:(52*years*7),simulatedoutbreak6[,4],col='red')
plot(1:(52*years*7),zitot,typ='l',xlim=c(1,7*364))
lines(1:(52*years*7),yt,col='blue')
lines(1:(52*years*7),zseasoutbreak,col='green')
lines(1:(52*years*7),zoutbreak,col='red')
plot(1:(52*years*7),simulatedzseasoutbreak6[,4],col='green',typ='l')
#sigid16
for(i in 1:nsim){
set.seed(i)
yt=negbinNoise2(N=N,k=1,k2=2,alpha=3,beta=0,gama1=0.8,gama2=0.6,gama3=0.8,gama4=0.4,phi=4,shift=29)
#mu=exp(h2(N=N,k=1,k2=2,alpha=3,beta=0,gama1=0.8,gama2=0.6,gama3=0.8,gama4=0.4,shift=29))
out1=rep(0,N)
for(j in 1:years){
set.seed(j+years*i)
out=outbreak5(currentday=days7*52*years,weeklength=52*days7*years,wtime=((210+(j-1)*days7*52):(230+(j-1)*days7*52)),yi=yt,interval=0.02,k=1,k2=1,alpha=1,beta=0,gama1=0.1,
gama2=2,gama3=0.05,gama4=0.05,phi=1,shift=-50,shift2=-50,numoutbk=1,peakoutbk=3*days7*150,meanlog=0,sdlog=0.5)
out1=out1+out$outbreak
}
set.seed(i)
out2=outbreak7(currentday=N,weeklength=52*days7*years,wtime=(length(yt)-49*days7+1):length(yt),yi=yt,interval=0.25,k=1,k2=2,alpha=3,beta=0,gama1=0.8,
gama2=0.6,gama3=0.8,gama4=0.4,phi=4,shift=29,numoutbk=1,peakoutbk=3*days7,meanlog=0,sdlog=0.5)
zoutbreak=out2$outbreak
zseasoutbreak=out2$outbreak+out1
zt=yt +out1
zitot=yt + out2$outbreak +out1
for(b in 1:length(zitot)){
if(bankhols7[b]==1){
zitot[b]=2*zitot[b]
}
}
simulateddata16[,i]=round(zt)
simulatedtotals16[,i]=round(zitot)
simulatedoutbreak16[,i]=round(zoutbreak)
simulatedzseasoutbreak16[,i]=round(zseasoutbreak)
}
plot(1:(52*years*7),zitot,typ='l',xlim=c(1,7*364))
lines(1:(52*years*7),yt,col='blue')
lines(1:(52*years*7),zseasoutbreak,col='green')
lines(1:(52*years*7),zoutbreak,col='red')
plot(1:(52*years*7),simulatedtotals16[,1],typ='l',xlim=c(1,7*364))
lines(1:(52*years*7),simulatedzseasoutbreak16[,1],col='green')
lines(1:(52*years*7),simulatedoutbreak16[,1],col='red')
plot(1:(52*years*7),simulatedtotals16[,2],typ='l',xlim=c(1,7*364))
lines(1:(52*years*7),simulatedzseasoutbreak16[,2],col='green')
lines(1:(52*years*7),simulatedoutbreak16[,2],col='red')
#sigid17
for(i in 1:nsim){
set.seed(i)
yt=negbinNoise2(N=N,k=0,k2=2,alpha=6,beta=0,gama1=0,gama2=0,gama3=0.8,gama4=0.4,phi=4,shift=1)
#mu=exp(h2(N=N,k=0,k2=2,alpha=6,beta=0,gama1=0,gama2=0,gama3=0.8,gama4=0.4,shift=1))
set.seed(i)
out2=outbreak7(currentday=N,weeklength=52*7*12,wtime=(length(yt)-49*days7+1):length(yt),yi=yt,interval=0.25,k=0,k2=2,alpha=6,beta=0,gama1=0,
gama2=0,gama3=0.8,gama4=0.4,phi=4,shift=1,numoutbk=1,peakoutbk=3*days7,meanlog=0,sdlog=0.5)
zoutbreak=out2$outbreak
zt=yt#+out1
zitot=yt + out2$outbreak #+out1
for(b in 1:length(zitot)){
if(bankhols7[b]==1){
zitot[b]=2*zitot[b]
}
}
simulateddata17[,i]=round(zt)
simulatedtotals17[,i]=round(zitot)
simulatedoutbreak17[,i]=round(zoutbreak)
}
#=============================
# Define the alarm data frames
#=============================
days=7
nsim=100
alarmall1=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
alarmall2=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
alarmall3=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
alarmall4=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
alarmall5=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
alarmall6=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
alarmall7=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
alarmall8=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
alarmall9=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
alarmall10=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
alarmall11=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
alarmall12=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
alarmall13=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
alarmall14=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
alarmall15=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
alarmall16=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
alarmall17=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
###########################################
#========================================
#Implement the algorithm to data by days and record the alarms inthe above dataframes
#========================================
###########################################
myDates <- seq(ymd('2010-01-01'), ymd('2016-12-30'), by = '1 day')
dropDays <- as.POSIXct(c('2010-12-31','2011-12-31', '2012-12-31',
'2013-12-31', '2014-12-31', '2015-12-31',
'2016-02-29,', '2012-02-29'))
"%ni%" <- Negate("%in%")
myDates <- myDates[myDates %ni% dropDays]
# Convert to 7-day running totals
rolling <- function(x){
rollapplyr(x, width=7, FUN=sum, na.rm=T, fill=NA)
}
simdata1 <- apply(simulateddata1, 2, rolling)
# simdata2 <- apply(simulateddata2, 2, rolling)
simdata3 <- apply(simulateddata3, 2, rolling)
simdata4 <- apply(simulateddata4, 2, rolling)
simdata5 <- apply(simulateddata5, 2, rolling)
simdata6 <- apply(simulateddata6, 2, rolling)
simdata7 <- apply(simulateddata7, 2, rolling)
simdata8 <- apply(simulateddata8, 2, rolling)
simdata9 <- apply(simulateddata9, 2, rolling)
simdata10 <- apply(simulateddata10, 2, rolling)
simdata11 <- apply(simulateddata11, 2, rolling)
simdata12 <- apply(simulateddata12, 2, rolling)
simdata13 <- apply(simulateddata13, 2, rolling)
simdata14 <- apply(simulateddata14, 2, rolling)
simdata15 <- apply(simulateddata15, 2, rolling)
simdata16 <- apply(simulateddata16, 2, rolling)
simdata17 <- apply(simulateddata17, 2, rolling)
simtot1 <- apply(simulatedtotals1, 2, rolling)
# simtot2 <- apply(simulatedtotals2, 2, rolling)
simtot3 <- apply(simulatedtotals3, 2, rolling)
simtot4 <- apply(simulatedtotals4, 2, rolling)
simtot5 <- apply(simulatedtotals5, 2, rolling)
simtot6 <- apply(simulatedtotals6, 2, rolling)
simtot7 <- apply(simulatedtotals7, 2, rolling)
simtot8 <- apply(simulatedtotals8, 2, rolling)
simtot9 <- apply(simulatedtotals9, 2, rolling)
simtot10 <- apply(simulatedtotals10, 2, rolling)
simtot11 <- apply(simulatedtotals11, 2, rolling)
simtot12 <- apply(simulatedtotals12, 2, rolling)
simtot13 <- apply(simulatedtotals13, 2, rolling)
simtot14 <- apply(simulatedtotals14, 2, rolling)
simtot15 <- apply(simulatedtotals15, 2, rolling)
simtot16 <- apply(simulatedtotals16, 2, rolling)
simtot17 <- apply(simulatedtotals17, 2, rolling)
# Convert data to sts
simSts1 <- sts(simdata1, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
# simSts2 <- sts(simdata2, start=c(2010, 1), frequency=364,
# epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
simSts3 <- sts(simdata3, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
simSts4 <- sts(simdata4, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
simSts5 <- sts(simdata5, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
simSts6 <- sts(simdata6, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
simSts7 <- sts(simdata7, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
simSts8 <- sts(simdata8, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
simSts9 <- sts(simdata9, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
simSts10 <- sts(simdata10, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
simSts11 <- sts(simdata11, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
simSts12 <- sts(simdata12, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
simSts13 <- sts(simdata13, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
simSts14 <- sts(simdata14, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
simSts15 <- sts(simdata15, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
simSts16 <- sts(simdata16, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
simSts17 <- sts(simdata17, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
totSts1 <- sts(simtot1, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
# totSts2 <- sts(simtot2, start=c(2010, 1), frequency=364,
# epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
totSts3 <- sts(simtot3, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
totSts4 <- sts(simtot4, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
totSts5 <- sts(simtot5, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
totSts6 <- sts(simtot6, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
totSts7 <- sts(simtot7, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
totSts8 <- sts(simtot8, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
totSts9 <- sts(simtot9, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
totSts10 <- sts(simtot10, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
totSts11 <- sts(simtot11, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
totSts12 <- sts(simtot12, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
totSts13 <- sts(simtot13, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
totSts14 <- sts(simtot14, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
totSts15 <- sts(simtot15, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
totSts16 <- sts(simtot16, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
totSts17 <- sts(simtot17, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
in2016 <- 2206:2548
# Select range of data to monitor, algorithm and prediction interval
control <- list(range=in2016, alpha=NULL, mu0=list(S=2, trend=TRUE),
theta=NULL)
for(sim in seq(nsim)){
cat("\t", sim)
# Run detection algorithm
det1 <- algo.glrnb(sts2disProg(totSts1[,sim]), control=control)
det3 <- algo.glrnb(sts2disProg(totSts3[,sim]), control=control)
det4 <- algo.glrnb(sts2disProg(totSts4[,sim]), control=control)
det5 <- algo.glrnb(sts2disProg(totSts5[,sim]), control=control)
det6 <- algo.glrnb(sts2disProg(totSts6[,sim]), control=control)
det7 <- algo.glrnb(sts2disProg(totSts7[,sim]), control=control)
det8 <- algo.glrnb(sts2disProg(totSts8[,sim]), control=control)
det9 <- algo.glrnb(sts2disProg(totSts9[,sim]), control=control)
det10 <- algo.glrnb(sts2disProg(totSts10[,sim]), control=control)
det11 <- algo.glrnb(sts2disProg(totSts11[,sim]), control=control)
det12 <- algo.glrnb(sts2disProg(totSts12[,sim]), control=control)
det13 <- algo.glrnb(sts2disProg(totSts13[,sim]), control=control)
det14 <- algo.glrnb(sts2disProg(totSts14[,sim]), control=control)
det15 <- algo.glrnb(sts2disProg(totSts15[,sim]), control=control)
det16 <- algo.glrnb(sts2disProg(totSts16[,sim]), control=control)
det17 <- algo.glrnb(sts2disProg(totSts17[,sim]), control=control)
# Plot detection results
dir.create(file.path(myDir, "plots", "totals"),
recursive=TRUE)
png(file.path(myDir, "plots", "totals", paste0("Sim_", sim, ".png")),
width=16,height=14,units="in",res=300)
par(mfrow=c(4, 4), oma=c(0, 0, 2, 0))
plot(det1, main="Dataset 1", legend=NULL)
plot(det3, main="Dataset 3", legend=NULL)
plot(det4, main="Dataset 4", legend=NULL)
plot(det5, main="Dataset 5", legend=NULL)
plot(det6, main="Dataset 6", legend=NULL)
plot(det7, main="Dataset 7", legend=NULL)
plot(det8, main="Dataset 8", legend=NULL)
plot(det9, main="Dataset 9", legend=NULL)
plot(det10, main="Dataset 10", legend=NULL)
plot(det11, main="Dataset 11", legend=NULL)
plot(det12, main="Dataset 12", legend=NULL)
plot(det13, main="Dataset 13", legend=NULL)
plot(det14, main="Dataset 14", legend=NULL)
plot(det15, main="Dataset 15", legend=NULL)
plot(det16, main="Dataset 16", legend=NULL)
plot(det17, main="Dataset 17", legend=NULL)
title(main=list(paste("Simulation", sim, "Alpha", control$alpha ),
cex=2), outer=TRUE)
dev.off()
# Retrieve information about alarms
alarmall1[,sim] <- as.numeric(as.vector(unlist(det1$alarm)))
alarmall3[,sim] <- as.numeric(as.vector(unlist(det3$alarm)))
alarmall4[,sim] <- as.numeric(as.vector(unlist(det4$alarm)))
alarmall5[,sim] <- as.numeric(as.vector(unlist(det5$alarm)))
alarmall6[,sim] <- as.numeric(as.vector(unlist(det6$alarm)))
alarmall7[,sim] <- as.numeric(as.vector(unlist(det7$alarm)))
alarmall8[,sim] <- as.numeric(as.vector(unlist(det8$alarm)))
alarmall9[,sim] <- as.numeric(as.vector(unlist(det9$alarm)))
alarmall10[,sim] <- as.numeric(as.vector(unlist(det10$alarm)))
alarmall11[,sim] <- as.numeric(as.vector(unlist(det11$alarm)))
alarmall12[,sim] <- as.numeric(as.vector(unlist(det12$alarm)))
alarmall13[,sim] <- as.numeric(as.vector(unlist(det13$alarm)))
alarmall14[,sim] <- as.numeric(as.vector(unlist(det14$alarm)))
alarmall15[,sim] <- as.numeric(as.vector(unlist(det15$alarm)))
alarmall16[,sim] <- as.numeric(as.vector(unlist(det16$alarm)))
alarmall17[,sim] <- as.numeric(as.vector(unlist(det17$alarm)))
}
# Replace missing values with zero (?)
alarmall1[is.na(alarmall1)] <- 0
alarmall3[is.na(alarmall3)] <- 0
alarmall4[is.na(alarmall4)] <- 0
alarmall5[is.na(alarmall5)] <- 0
alarmall6[is.na(alarmall6)] <- 0
alarmall7[is.na(alarmall7)] <- 0
alarmall8[is.na(alarmall8)] <- 0
alarmall9[is.na(alarmall9)] <- 0
alarmall10[is.na(alarmall10)] <- 0
alarmall11[is.na(alarmall11)] <- 0
alarmall12[is.na(alarmall12)] <- 0
alarmall13[is.na(alarmall13)] <- 0
alarmall14[is.na(alarmall14)] <- 0
alarmall15[is.na(alarmall15)] <- 0
alarmall16[is.na(alarmall16)] <- 0
alarmall17[is.na(alarmall17)] <- 0
# Compare vs data without oubreaks
for(sim in seq(nsim)){
cat("\t", sim)
det1 <- earsC(simSts1[,sim], control=control)
det3 <- earsC(simSts3[,sim], control=control)
det4 <- earsC(simSts4[,sim], control=control)
det5 <- earsC(simSts5[,sim], control=control)
det6 <- earsC(simSts6[,sim], control=control)
det7 <- earsC(simSts7[,sim], control=control)
det8 <- earsC(simSts8[,sim], control=control)
det9 <- earsC(simSts9[,sim], control=control)
det10 <- earsC(simSts10[,sim], control=control)
det11 <- earsC(simSts11[,sim], control=control)
det12 <- earsC(simSts12[,sim], control=control)
det13 <- earsC(simSts13[,sim], control=control)
det14 <- earsC(simSts14[,sim], control=control)
det15 <- earsC(simSts15[,sim], control=control)
det16 <- earsC(simSts16[,sim], control=control)
det17 <- earsC(simSts17[,sim], control=control)
dir.create(file.path(myDir, "plots", "control"),
recursive=TRUE)
png(file.path(myDir, "plots", "control",
paste0("Sim_", sim, ".png")),
width=16,height=14,units="in",res=300)
par(mfrow=c(4, 4), oma=c(0, 0, 2, 0))
plot(det1, main="Dataset 1", legend=NULL)
plot(det3, main="Dataset 3", legend=NULL)
plot(det4, main="Dataset 4", legend=NULL)
plot(det5, main="Dataset 5", legend=NULL)
plot(det6, main="Dataset 6", legend=NULL)
plot(det7, main="Dataset 7", legend=NULL)
plot(det8, main="Dataset 8", legend=NULL)
plot(det9, main="Dataset 9", legend=NULL)
plot(det10, main="Dataset 10", legend=NULL)
plot(det11, main="Dataset 11", legend=NULL)
plot(det12, main="Dataset 12", legend=NULL)
plot(det13, main="Dataset 13", legend=NULL)
plot(det14, main="Dataset 14", legend=NULL)
plot(det15, main="Dataset 15", legend=NULL)
plot(det16, main="Dataset 16", legend=NULL)
plot(det17, main="Dataset 17", legend=NULL)
title(main=list(paste("Simulation", sim, "Alpha", control$alpha ),
cex=2), outer=TRUE)
dev.off()
}
#====================================
#====================================
#Summary
#====================================
#====================================
days=7
# FPR false positive rate
fpr=rep(0,17)
fprseas=rep(0,3)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall1[nrow(alarmall1)-i+1,j]==1 & simulatedoutbreak1[nrow(simulatedoutbreak1)-i+1,j]==0)+nu
}
}
a=
fpr[1]=nu/sum(simulatedoutbreak1[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall2[nrow(alarmall2)-i+1,j]==1 & simulatedoutbreak2[nrow(simulatedoutbreak2)-i+1,j]==0)+nu
}
}
fpr[2]=nu/sum(simulatedoutbreak2[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall3[nrow(alarmall3)-i+1,j]==1 & simulatedoutbreak3[nrow(simulatedoutbreak3)-i+1,j]==0)+nu
}
}
fpr[3]=nu/sum(simulatedoutbreak3[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall4[nrow(alarmall4)-i+1,j]==1 & simulatedoutbreak4[nrow(simulatedoutbreak4)-i+1,j]==0)+nu
}
}
fpr[4]=nu/sum(simulatedoutbreak4[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall5[nrow(alarmall5)-i+1,j]==1 & simulatedoutbreak5[nrow(simulatedoutbreak5)-i+1,j]==0)+nu
}
}
fpr[5]=nu/sum(simulatedoutbreak5[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall6[nrow(alarmall6)-i+1,j]==1 & simulatedoutbreak6[nrow(simulatedoutbreak6)-i+1,j]==0)+nu
}
}
fpr[6]=nu/sum(simulatedoutbreak6[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall6[nrow(alarmall6)-i+1,j]==1 & simulatedzseasoutbreak6[nrow(simulatedzseasoutbreak6)-i+1,j]==0)+nu
}
}
fprseas[1]=nu/sum(simulatedzseasoutbreak6[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall7[nrow(alarmall7)-i+1,j]==1 & simulatedoutbreak7[nrow(simulatedoutbreak7)-i+1,j]==0)+nu
}
}
fpr[7]=nu/sum(simulatedoutbreak7[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall7[nrow(alarmall7)-i+1,j]==1 & simulatedzseasoutbreak7[nrow(simulatedzseasoutbreak7)-i+1,j]==0)+nu
}
}
fprseas[2]=nu/sum(simulatedzseasoutbreak7[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall8[nrow(alarmall8)-i+1,j]==1 & simulatedoutbreak8[nrow(simulatedoutbreak8)-i+1,j]==0)+nu
}
}
fpr[8]=nu/sum(simulatedoutbreak8[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall9[nrow(alarmall9)-i+1,j]==1 & simulatedoutbreak9[nrow(simulatedoutbreak9)-i+1,j]==0)+nu
}
}
fpr[9]=nu/sum(simulatedoutbreak9[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall10[nrow(alarmall10)-i+1,j]==1 & simulatedoutbreak10[nrow(simulatedoutbreak10)-i+1,j]==0)+nu
}
}
fpr[10]=nu/sum(simulatedoutbreak10[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall11[nrow(alarmall11)-i+1,j]==1 & simulatedoutbreak11[nrow(simulatedoutbreak11)-i+1,j]==0)+nu
}
}
fpr[11]=nu/sum(simulatedoutbreak11[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall12[nrow(alarmall12)-i+1,j]==1 & simulatedoutbreak12[nrow(simulatedoutbreak12)-i+1,j]==0)+nu
}
}
fpr[12]=nu/sum(simulatedoutbreak12[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall13[nrow(alarmall13)-i+1,j]==1 & simulatedoutbreak13[nrow(simulatedoutbreak13)-i+1,j]==0)+nu
}
}
fpr[13]=nu/sum(simulatedoutbreak13[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall14[nrow(alarmall14)-i+1,j]==1 & simulatedoutbreak14[nrow(simulatedoutbreak14)-i+1,j]==0)+nu
}
}
fpr[14]=nu/sum(simulatedoutbreak14[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall15[nrow(alarmall15)-i+1,j]==1 & simulatedoutbreak15[nrow(simulatedoutbreak15)-i+1,j]==0)+nu
}
}
fpr[15]=nu/sum(simulatedoutbreak15[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall16[nrow(alarmall16)-i+1,j]==1 & simulatedoutbreak16[nrow(simulatedoutbreak16)-i+1,j]==0)+nu
}
}
fpr[16]=nu/sum(simulatedoutbreak16[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall16[nrow(alarmall16)-i+1,j]==1 & simulatedzseasoutbreak16[nrow(simulatedzseasoutbreak16)-i+1,j]==0)+nu
}
}
fprseas[3]=nu/sum(simulatedzseasoutbreak16[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall17[nrow(alarmall17)-i+1,j]==1 & simulatedoutbreak17[nrow(simulatedoutbreak17)-i+1,j]==0)+nu
}
}
fpr[17]=nu/sum(simulatedoutbreak17[2206:2548,]==0)
#--------------------------------------------------------
# POD power of detection
pod=rep(0,17)
podseas=rep(0,3)
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall1[nrow(alarmall1)-i+1,j]==1 & simulatedoutbreak1[nrow(simulatedoutbreak1)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[1]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall2[nrow(alarmall2)-i+1,j]==1 & simulatedoutbreak2[nrow(simulatedoutbreak2)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[2]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall3[nrow(alarmall3)-i+1,j]==1 & simulatedoutbreak3[nrow(simulatedoutbreak3)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[3]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall4[nrow(alarmall4)-i+1,j]==1 & simulatedoutbreak4[nrow(simulatedoutbreak4)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[4]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall5[nrow(alarmall5)-i+1,j]==1 & simulatedoutbreak5[nrow(simulatedoutbreak5)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[5]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall6[nrow(alarmall6)-i+1,j]==1 & simulatedoutbreak6[nrow(simulatedoutbreak6)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[6]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall6[nrow(alarmall6)-i+1,j]==1 & simulatedzseasoutbreak6[nrow(simulatedzseasoutbreak6)-i+1,j]>0)
}
mu=mu+(nu>0)
}
podseas[1]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall7[nrow(alarmall7)-i+1,j]==1 & simulatedoutbreak7[nrow(simulatedoutbreak7)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[7]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall7[nrow(alarmall7)-i+1,j]==1 & simulatedzseasoutbreak7[nrow(simulatedzseasoutbreak7)-i+1,j]>0)
}
mu=mu+(nu>0)
}
podseas[2]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall8[nrow(alarmall8)-i+1,j]==1 & simulatedoutbreak8[nrow(simulatedoutbreak8)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[8]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall9[nrow(alarmall9)-i+1,j]==1 & simulatedoutbreak9[nrow(simulatedoutbreak9)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[9]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall10[nrow(alarmall10)-i+1,j]==1 & simulatedoutbreak10[nrow(simulatedoutbreak10)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[10]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall11[nrow(alarmall11)-i+1,j]==1 & simulatedoutbreak11[nrow(simulatedoutbreak11)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[11]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall12[nrow(alarmall12)-i+1,j]==1 & simulatedoutbreak12[nrow(simulatedoutbreak12)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[12]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall13[nrow(alarmall13)-i+1,j]==1 & simulatedoutbreak13[nrow(simulatedoutbreak13)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[13]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall14[nrow(alarmall14)-i+1,j]==1 & simulatedoutbreak14[nrow(simulatedoutbreak14)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[14]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall15[nrow(alarmall15)-i+1,j]==1 & simulatedoutbreak15[nrow(simulatedoutbreak15)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[15]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall16[nrow(alarmall16)-i+1,j]==1 & simulatedoutbreak16[nrow(simulatedoutbreak16)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[16]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall16[nrow(alarmall16)-i+1,j]==1 & simulatedzseasoutbreak16[nrow(simulatedzseasoutbreak16)-i+1,j]>0)
}
mu=mu+(nu>0)
}
podseas[3]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall17[nrow(alarmall17)-i+1,j]==1 & simulatedoutbreak17[nrow(simulatedoutbreak17)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[17]=mu/nsim
#--------------------------------------------------------
# Sensitivity
sensitivity=rep(0,17)
sensitivityseas=rep(0,3)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall1[nrow(alarmall1)-i+1,j]==1 & simulatedoutbreak1[nrow(simulatedoutbreak1)-i+1,j]>0)
}
}
sensitivity[1]=nu/sum(simulatedoutbreak1>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall2[nrow(alarmall2)-i+1,j]==1 & simulatedoutbreak2[nrow(simulatedoutbreak2)-i+1,j]>0)
}
}
sensitivity[2]=nu/sum(simulatedoutbreak2>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall3[nrow(alarmall3)-i+1,j]==1 & simulatedoutbreak3[nrow(simulatedoutbreak3)-i+1,j]>0)
}
}
sensitivity[3]=nu/sum(simulatedoutbreak3>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall4[nrow(alarmall4)-i+1,j]==1 & simulatedoutbreak4[nrow(simulatedoutbreak4)-i+1,j]>0)
}
}
sensitivity[4]=nu/sum(simulatedoutbreak4>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall5[nrow(alarmall5)-i+1,j]==1 & simulatedoutbreak5[nrow(simulatedoutbreak5)-i+1,j]>0)
}
}
sensitivity[5]=nu/sum(simulatedoutbreak5>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall6[nrow(alarmall6)-i+1,j]==1 & simulatedoutbreak6[nrow(simulatedoutbreak6)-i+1,j]>0)
}
}
sensitivity[6]=nu/sum(simulatedoutbreak6>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall6[nrow(alarmall6)-i+1,j]==1 & simulatedzseasoutbreak6[nrow(simulatedzseasoutbreak6)-i+1,j]>0)
}
}
sensitivityseas[1]=nu/sum(simulatedzseasoutbreak6>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall7[nrow(alarmall7)-i+1,j]==1 & simulatedoutbreak7[nrow(simulatedoutbreak7)-i+1,j]>0)
}
}
sensitivity[7]=nu/sum(simulatedoutbreak7>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall7[nrow(alarmall7)-i+1,j]==1 & simulatedzseasoutbreak7[nrow(simulatedzseasoutbreak7)-i+1,j]>0)
}
}
sensitivityseas[2]=nu/sum(simulatedzseasoutbreak7>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall8[nrow(alarmall8)-i+1,j]==1 & simulatedoutbreak8[nrow(simulatedoutbreak8)-i+1,j]>0)
}
}
sensitivity[8]=nu/sum(simulatedoutbreak8>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall9[nrow(alarmall9)-i+1,j]==1 & simulatedoutbreak9[nrow(simulatedoutbreak9)-i+1,j]>0)
}
}
sensitivity[9]=nu/sum(simulatedoutbreak9>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall10[nrow(alarmall10)-i+1,j]==1 & simulatedoutbreak10[nrow(simulatedoutbreak10)-i+1,j]>0)
}
}
sensitivity[10]=nu/sum(simulatedoutbreak10>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall11[nrow(alarmall11)-i+1,j]==1 & simulatedoutbreak11[nrow(simulatedoutbreak11)-i+1,j]>0)
}
}
sensitivity[11]=nu/sum(simulatedoutbreak11>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall12[nrow(alarmall12)-i+1,j]==1 & simulatedoutbreak12[nrow(simulatedoutbreak12)-i+1,j]>0)
}
}
sensitivity[12]=nu/sum(simulatedoutbreak12>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall13[nrow(alarmall13)-i+1,j]==1 & simulatedoutbreak13[nrow(simulatedoutbreak13)-i+1,j]>0)
}
}
sensitivity[13]=nu/sum(simulatedoutbreak13>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall14[nrow(alarmall14)-i+1,j]==1 & simulatedoutbreak14[nrow(simulatedoutbreak14)-i+1,j]>0)
}
}
sensitivity[14]=nu/sum(simulatedoutbreak14>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall15[nrow(alarmall15)-i+1,j]==1 & simulatedoutbreak15[nrow(simulatedoutbreak15)-i+1,j]>0)
}
}
sensitivity[15]=nu/sum(simulatedoutbreak15>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall16[nrow(alarmall16)-i+1,j]==1 & simulatedoutbreak16[nrow(simulatedoutbreak16)-i+1,j]>0)
}
}
sensitivity[16]=nu/sum(simulatedoutbreak16>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall16[nrow(alarmall16)-i+1,j]==1 & simulatedzseasoutbreak16[nrow(simulatedzseasoutbreak16)-i+1,j]>0)
}
}
sensitivityseas[3]=nu/sum(simulatedzseasoutbreak16>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall17[nrow(alarmall17)-i+1,j]==1 & simulatedoutbreak17[nrow(simulatedoutbreak17)-i+1,j]>0)
}
}
sensitivity[17]=nu/sum(simulatedoutbreak17>0)
#--------------------------------------------------------
# Specificity
specificity=rep(0,17)
specificityseas=rep(0,3)
# Specificity
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall1[nrow(alarmall1)-i+1,j]==0 & simulatedoutbreak1[nrow(simulatedoutbreak1)-i+1,j]==0)
}
}
specificity[1]=nu/sum(simulatedoutbreak1[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall2[nrow(alarmall2)-i+1,j]==0 & simulatedoutbreak2[nrow(simulatedoutbreak2)-i+1,j]==0)
}
}
specificity[2]=nu/sum(simulatedoutbreak2[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall3[nrow(alarmall3)-i+1,j]==0 & simulatedoutbreak3[nrow(simulatedoutbreak3)-i+1,j]==0)
}
}
specificity[3]=nu/sum(simulatedoutbreak3[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall4[nrow(alarmall4)-i+1,j]==0 & simulatedoutbreak4[nrow(simulatedoutbreak4)-i+1,j]==0)
}
}
specificity[4]=nu/sum(simulatedoutbreak4[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall5[nrow(alarmall5)-i+1,j]==0 & simulatedoutbreak5[nrow(simulatedoutbreak5)-i+1,j]==0)
}
}
specificity[5]=nu/sum(simulatedoutbreak5[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall6[nrow(alarmall6)-i+1,j]==0 & simulatedoutbreak6[nrow(simulatedoutbreak6)-i+1,j]==0)
}
}
specificity[6]=nu/sum(simulatedoutbreak6[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall6[nrow(alarmall6)-i+1,j]==0 & simulatedzseasoutbreak6[nrow(simulatedzseasoutbreak6)-i+1,j]==0)
}
}
specificityseas[1]=nu/sum(simulatedzseasoutbreak6[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall7[nrow(alarmall7)-i+1,j]==0 & simulatedoutbreak7[nrow(simulatedoutbreak7)-i+1,j]==0)
}
}
specificity[7]=nu/sum(simulatedoutbreak7[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall7[nrow(alarmall7)-i+1,j]==0 & simulatedzseasoutbreak7[nrow(simulatedzseasoutbreak7)-i+1,j]==0)
}
}
specificityseas[2]=nu/sum(simulatedzseasoutbreak7[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall8[nrow(alarmall8)-i+1,j]==0 & simulatedoutbreak8[nrow(simulatedoutbreak8)-i+1,j]==0)
}
}
specificity[8]=nu/sum(simulatedoutbreak8[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall9[nrow(alarmall9)-i+1,j]==0 & simulatedoutbreak9[nrow(simulatedoutbreak9)-i+1,j]==0)
}
}
specificity[9]=nu/sum(simulatedoutbreak9[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall10[nrow(alarmall10)-i+1,j]==0 & simulatedoutbreak10[nrow(simulatedoutbreak10)-i+1,j]==0)
}
}
specificity[10]=nu/sum(simulatedoutbreak10[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall11[nrow(alarmall11)-i+1,j]==0 & simulatedoutbreak11[nrow(simulatedoutbreak11)-i+1,j]==0)
}
}
specificity[11]=nu/sum(simulatedoutbreak11[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall12[nrow(alarmall12)-i+1,j]==0 & simulatedoutbreak12[nrow(simulatedoutbreak12)-i+1,j]==0)
}
}
specificity[12]=nu/sum(simulatedoutbreak12[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall13[nrow(alarmall13)-i+1,j]==0 & simulatedoutbreak13[nrow(simulatedoutbreak13)-i+1,j]==0)
}
}
specificity[13]=nu/sum(simulatedoutbreak13[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall14[nrow(alarmall14)-i+1,j]==0 & simulatedoutbreak14[nrow(simulatedoutbreak14)-i+1,j]==0)
}
}
specificity[14]=nu/sum(simulatedoutbreak14[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall15[nrow(alarmall15)-i+1,j]==0 & simulatedoutbreak15[nrow(simulatedoutbreak15)-i+1,j]==0)
}
}
specificity[15]=nu/sum(simulatedoutbreak15[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall16[nrow(alarmall16)-i+1,j]==0 & simulatedoutbreak16[nrow(simulatedoutbreak16)-i+1,j]==0)
}
}
specificity[16]=nu/sum(simulatedoutbreak16[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall16[nrow(alarmall16)-i+1,j]==0 & simulatedzseasoutbreak16[nrow(simulatedzseasoutbreak16)-i+1,j]==0)
}
}
specificityseas[3]=nu/sum(simulatedzseasoutbreak16[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall17[nrow(alarmall17)-i+1,j]==0 & simulatedoutbreak17[nrow(simulatedoutbreak17)-i+1,j]==0)
}
}
specificity[17]=nu/sum(simulatedoutbreak17[2206:2548,]==0)
#----------------------------------------------
# Timeliness
timeliness=rep(0,17)
timelinessseas=rep(0,3)
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak1[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak1[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall1[nrow(alarmall1)-i+1,j]==1 & simulatedoutbreak1[nrow(simulatedoutbreak1)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak1)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[1]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak2[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak2[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall2[nrow(alarmall2)-i+1,j]==1 & simulatedoutbreak2[nrow(simulatedoutbreak2)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak2)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[2]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak3[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak3[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall3[nrow(alarmall3)-i+1,j]==1 & simulatedoutbreak3[nrow(simulatedoutbreak3)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak3)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[3]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak4[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak4[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall4[nrow(alarmall4)-i+1,j]==1 & simulatedoutbreak4[nrow(simulatedoutbreak4)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak4)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[4]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak5[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak5[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall5[nrow(alarmall5)-i+1,j]==1 & simulatedoutbreak5[nrow(simulatedoutbreak5)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak5)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[5]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak6[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak6[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall6[nrow(alarmall6)-i+1,j]==1 & simulatedoutbreak6[nrow(simulatedoutbreak6)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak6)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[6]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedzseasoutbreak6[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedzseasoutbreak6[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall6[nrow(alarmall6)-i+1,j]==1 & simulatedzseasoutbreak6[nrow(simulatedzseasoutbreak6)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedzseasoutbreak6)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timelinessseas[1]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak7[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak7[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall7[nrow(alarmall7)-i+1,j]==1 & simulatedoutbreak7[nrow(simulatedoutbreak7)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak7)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[7]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedzseasoutbreak7[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedzseasoutbreak7[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall7[nrow(alarmall7)-i+1,j]==1 & simulatedzseasoutbreak7[nrow(simulatedzseasoutbreak7)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedzseasoutbreak7)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timelinessseas[2]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak8[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak8[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall8[nrow(alarmall8)-i+1,j]==1 & simulatedoutbreak8[nrow(simulatedoutbreak8)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak8)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[8]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak9[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak9[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall9[nrow(alarmall9)-i+1,j]==1 & simulatedoutbreak9[nrow(simulatedoutbreak9)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak9)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[9]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak10[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak10[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall10[nrow(alarmall10)-i+1,j]==1 & simulatedoutbreak10[nrow(simulatedoutbreak10)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak10)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[10]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak11[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak11[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall11[nrow(alarmall11)-i+1,j]==1 & simulatedoutbreak11[nrow(simulatedoutbreak11)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak11)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[11]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak12[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak12[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall12[nrow(alarmall12)-i+1,j]==1 & simulatedoutbreak12[nrow(simulatedoutbreak12)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak12)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[12]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak13[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak13[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall13[nrow(alarmall13)-i+1,j]==1 & simulatedoutbreak13[nrow(simulatedoutbreak1)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak13)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[13]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak14[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak14[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall14[nrow(alarmall14)-i+1,j]==1 & simulatedoutbreak14[nrow(simulatedoutbreak14)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak14)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[14]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak15[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak15[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall15[nrow(alarmall15)-i+1,j]==1 & simulatedoutbreak15[nrow(simulatedoutbreak15)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak15)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[15]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak16[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak16[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall16[nrow(alarmall16)-i+1,j]==1 & simulatedoutbreak16[nrow(simulatedoutbreak16)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak16)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[16]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedzseasoutbreak16[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedzseasoutbreak16[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall16[nrow(alarmall16)-i+1,j]==1 & simulatedzseasoutbreak16[nrow(simulatedzseasoutbreak16)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedzseasoutbreak16)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timelinessseas[3]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak17[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak17[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall17[nrow(alarmall17)-i+1,j]==1 & simulatedoutbreak17[nrow(simulatedoutbreak17)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak17)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[17]=(ss+n)/nsim
#==================================
# Summary=data.frame(fpr,pod,sensitivity,specificity,timeliness)
# row.names(Summary)=c("sigid1","sigid2","sigid3","sigid4","sigid5","sigid6","sigid7","sigid8","sigid9","sigid10","sigid11","sigid12","sigid13","sigid14","sigid15","sigid16","sigid17")
#
# Summaryseas=data.frame(fprseas,podseas,sensitivityseas,specificityseas,timelinessseas)
# row.names(Summaryseas)=c("sigid6","sigid7","sigid16")
#
#
# fix(Summary)
# fix(Summaryseas)
summary1=data.frame(fpr, pod, sensitivity, specificity, timeliness)
row.names(summary1)=c("sigid1", "sigid2", "sigid3", "sigid4", "sigid5",
"sigid6", "sigid7", "sigid8", "sigid9", "sigid10",
"sigid11", "sigid12", "sigid13", "sigid14", "sigid15",
"sigid16","sigid17")
summary2=data.frame(fprseas, podseas, sensitivityseas,
specificityseas, timelinessseas)
row.names(summary2)=c("sigid6", "sigid7", "sigid16")
if(!dir.exists(file.path(myDir, "output"))){
dir.create(file.path(myDir, "output"))
}
fwrite(summary1, file.path(myDir, "output", "summaryNB-18.csv"),
row.names=FALSE)
fwrite(summary2, file.path(myDir, "output", "summarySeasNB-18.csv"),
row.names=FALSE)
|
rcloud.out <- function(expr, terminate="\n") {
expr <- substitute(expr)
rval <- NULL
file <- textConnection("rval", "w", local = TRUE)
sink(file)
on.exit({ sink(); close(file) })
v <- withVisible(eval(expr, parent.frame()))
if (v$visible) print(v$value)
on.exit()
sink()
self.oobSend(list("console.out", paste0(paste(as.character(rval), collapse="\n"), terminate)))
invisible(v$value)
}
| /rcloud.support/R/output.R | permissive | cscheid/rcloud | R | false | false | 409 | r | rcloud.out <- function(expr, terminate="\n") {
expr <- substitute(expr)
rval <- NULL
file <- textConnection("rval", "w", local = TRUE)
sink(file)
on.exit({ sink(); close(file) })
v <- withVisible(eval(expr, parent.frame()))
if (v$visible) print(v$value)
on.exit()
sink()
self.oobSend(list("console.out", paste0(paste(as.character(rval), collapse="\n"), terminate)))
invisible(v$value)
}
|
setwd("Documents/Project Team/Rossman")
#Importing prediction csv from different models
pred1 <- read_csv("xgb7.csv")
pred2 <- read_csv("h2o_random_forest2.csv")
pred2 <- read_csv("h2o_random_forest1.csv")
pred2 <- read_csv("h2o_random_forest3.csv")
#After few tries, decided with the following weights
pred <- 0.8*pred1$Sales + 0.2*(0.3*pred2$Sales + 0.3*pred3$Sales + 0.4*pred4$Sales)
submission <- data.frame(Id=pred1$Id, Sales=pred)
write.csv(submission, "combined1.csv",row.names=F)
| /Ensembling.R | no_license | leotrj/Rossmann-Store-Challenge | R | false | false | 490 | r | setwd("Documents/Project Team/Rossman")
#Importing prediction csv from different models
pred1 <- read_csv("xgb7.csv")
pred2 <- read_csv("h2o_random_forest2.csv")
pred2 <- read_csv("h2o_random_forest1.csv")
pred2 <- read_csv("h2o_random_forest3.csv")
#After few tries, decided with the following weights
pred <- 0.8*pred1$Sales + 0.2*(0.3*pred2$Sales + 0.3*pred3$Sales + 0.4*pred4$Sales)
submission <- data.frame(Id=pred1$Id, Sales=pred)
write.csv(submission, "combined1.csv",row.names=F)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/download.R
\name{dlGoogle}
\alias{dlGoogle}
\title{Download file from Google Drive}
\usage{
dlGoogle(
url,
archive = NULL,
targetFile = NULL,
checkSums,
messSkipDownload,
destinationPath,
type = NULL,
overwrite,
needChecksums,
verbose = getOption("reproducible.verbose", 1),
team_drive = NULL,
...
)
}
\arguments{
\item{url}{The url (link) to the file.}
\item{archive}{Optional character string giving the path of an archive
containing \code{targetFile}, or a vector giving a set of nested archives
(e.g., \code{c("xxx.tar", "inner.zip", "inner.rar")}). If there is/are (an) inner
archive(s), but they are unknown, the function will try all until it finds
the \code{targetFile}. See table in \code{\link[=preProcess]{preProcess()}}. If it is \code{NA},
then it will \emph{not} attempt to see it as an archive, even if it has archive-like
file extension (e.g., \code{.zip}). This may be useful when an R function
is expecting an archive directly.}
\item{targetFile}{Character string giving the filename (without relative or
absolute path) to the eventual file
(raster, shapefile, csv, etc.) after downloading and extracting from a zip
or tar archive. This is the file \emph{before} it is passed to
\code{postProcess}. The internal checksumming does not checksum
the file after it is \code{postProcess}ed (e.g., cropped/reprojected/masked).
Using \code{Cache} around \code{prepInputs} will do a sufficient job in these cases.
See table in \code{\link[=preProcess]{preProcess()}}.}
\item{destinationPath}{Character string of a directory in which to download
and save the file that comes from \code{url} and is also where the function
will look for \code{archive} or \code{targetFile}. NOTE (still experimental):
To prevent repeated downloads in different locations, the user can also set
\code{options("reproducible.inputPaths")} to one or more local file paths to
search for the file before attempting to download. Default for that option is
\code{NULL} meaning do not search locally.}
\item{overwrite}{Logical. Should downloading and all the other actions occur
even if they pass the checksums or the files are all there.}
\item{verbose}{Numeric, -1 silent (where possible), 0 being very quiet,
1 showing more messaging, 2 being more messaging, etc.
Default is 1. Above 3 will output much more information about the internals of
Caching, which may help diagnose Caching challenges. Can set globally with an
option, e.g., \verb{options('reproducible.verbose' = 0) to reduce to minimal}}
\item{...}{Not used here. Only used to allow other arguments to other fns to not fail.}
}
\description{
Download file from Google Drive
}
\author{
Eliot McIntire and Alex Chubaty
}
\keyword{internal}
| /man/dlGoogle.Rd | no_license | PredictiveEcology/reproducible | R | false | true | 2,793 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/download.R
\name{dlGoogle}
\alias{dlGoogle}
\title{Download file from Google Drive}
\usage{
dlGoogle(
url,
archive = NULL,
targetFile = NULL,
checkSums,
messSkipDownload,
destinationPath,
type = NULL,
overwrite,
needChecksums,
verbose = getOption("reproducible.verbose", 1),
team_drive = NULL,
...
)
}
\arguments{
\item{url}{The url (link) to the file.}
\item{archive}{Optional character string giving the path of an archive
containing \code{targetFile}, or a vector giving a set of nested archives
(e.g., \code{c("xxx.tar", "inner.zip", "inner.rar")}). If there is/are (an) inner
archive(s), but they are unknown, the function will try all until it finds
the \code{targetFile}. See table in \code{\link[=preProcess]{preProcess()}}. If it is \code{NA},
then it will \emph{not} attempt to see it as an archive, even if it has archive-like
file extension (e.g., \code{.zip}). This may be useful when an R function
is expecting an archive directly.}
\item{targetFile}{Character string giving the filename (without relative or
absolute path) to the eventual file
(raster, shapefile, csv, etc.) after downloading and extracting from a zip
or tar archive. This is the file \emph{before} it is passed to
\code{postProcess}. The internal checksumming does not checksum
the file after it is \code{postProcess}ed (e.g., cropped/reprojected/masked).
Using \code{Cache} around \code{prepInputs} will do a sufficient job in these cases.
See table in \code{\link[=preProcess]{preProcess()}}.}
\item{destinationPath}{Character string of a directory in which to download
and save the file that comes from \code{url} and is also where the function
will look for \code{archive} or \code{targetFile}. NOTE (still experimental):
To prevent repeated downloads in different locations, the user can also set
\code{options("reproducible.inputPaths")} to one or more local file paths to
search for the file before attempting to download. Default for that option is
\code{NULL} meaning do not search locally.}
\item{overwrite}{Logical. Should downloading and all the other actions occur
even if they pass the checksums or the files are all there.}
\item{verbose}{Numeric, -1 silent (where possible), 0 being very quiet,
1 showing more messaging, 2 being more messaging, etc.
Default is 1. Above 3 will output much more information about the internals of
Caching, which may help diagnose Caching challenges. Can set globally with an
option, e.g., \verb{options('reproducible.verbose' = 0) to reduce to minimal}}
\item{...}{Not used here. Only used to allow other arguments to other fns to not fail.}
}
\description{
Download file from Google Drive
}
\author{
Eliot McIntire and Alex Chubaty
}
\keyword{internal}
|
##
## Problem 2 in Programming Assign
##
best <- function(state, outcome) {
## Read outcome data
outcomeData <- read.csv("outcome-of-care-measures.csv",
colClasses = "character")
## Check that state and outcome are valid
## state validity check
if(!(state %in% outcomeData$State))
{
stop("invalid state")
}
## outcome validity check
if(!(outcome %in% c("heart attack", "heart failure", "pneumonia")))
{
stop("invalid outcome")
}
## Return hospital name in that state with lowest 30-day death
## rate
## find the subset with the hospitals in the specified state
outcomesInState = outcomeData[outcomeData$State == state,]
## get the column index representing the outcomes
outcomeIndex = NULL
if (outcome == "heart attack")
{
outcomeIndex = 11
}
else if (outcome == "heart failure")
{
outcomeIndex = 17
}
else if (outcome == "pneumonia")
{
outcomeIndex = 23
}
outcomesInState
## transform the column to numeric
outcomesInState[, outcomeIndex] = suppressWarnings(
as.numeric(outcomesInState[, outcomeIndex]))
## Data that is marked as "Not Available" will be replaced with NA in the
## outcomeIndex column. Now, we know that the rest of the columns are
## character vectors only, based on the way the data was made to be read
## from the csv file. So, we have just that one column with NA values and
## thus a call to complete.cases() will take out rows that have NA values in
## in that column. We are guaranteed to not have NA values in the other
## columns by virtue of having imported the data as character vectors
relevantRowVector = complete.cases(outcomesInState)
## make sure we have non-NA data that we are sorting through
outcomeDataComplete = outcomesInState[relevantRowVector,]
## sort in ascending order. First, based on the mortality rate and then
## based on hospital names
sortedOutcomeData = outcomeDataComplete[
order(outcomeDataComplete[,outcomeIndex],
outcomeDataComplete[,2]),]
## the first row has the best hospital. Return the name of the hospital
bestHospital = sortedOutcomeData[1,2]
bestHospital
} | /ProgrammingAssignment3/best.R | no_license | skdb2015/datasciencecoursera | R | false | false | 2,440 | r | ##
## Problem 2 in Programming Assign
##
best <- function(state, outcome) {
## Read outcome data
outcomeData <- read.csv("outcome-of-care-measures.csv",
colClasses = "character")
## Check that state and outcome are valid
## state validity check
if(!(state %in% outcomeData$State))
{
stop("invalid state")
}
## outcome validity check
if(!(outcome %in% c("heart attack", "heart failure", "pneumonia")))
{
stop("invalid outcome")
}
## Return hospital name in that state with lowest 30-day death
## rate
## find the subset with the hospitals in the specified state
outcomesInState = outcomeData[outcomeData$State == state,]
## get the column index representing the outcomes
outcomeIndex = NULL
if (outcome == "heart attack")
{
outcomeIndex = 11
}
else if (outcome == "heart failure")
{
outcomeIndex = 17
}
else if (outcome == "pneumonia")
{
outcomeIndex = 23
}
outcomesInState
## transform the column to numeric
outcomesInState[, outcomeIndex] = suppressWarnings(
as.numeric(outcomesInState[, outcomeIndex]))
## Data that is marked as "Not Available" will be replaced with NA in the
## outcomeIndex column. Now, we know that the rest of the columns are
## character vectors only, based on the way the data was made to be read
## from the csv file. So, we have just that one column with NA values and
## thus a call to complete.cases() will take out rows that have NA values in
## in that column. We are guaranteed to not have NA values in the other
## columns by virtue of having imported the data as character vectors
relevantRowVector = complete.cases(outcomesInState)
## make sure we have non-NA data that we are sorting through
outcomeDataComplete = outcomesInState[relevantRowVector,]
## sort in ascending order. First, based on the mortality rate and then
## based on hospital names
sortedOutcomeData = outcomeDataComplete[
order(outcomeDataComplete[,outcomeIndex],
outcomeDataComplete[,2]),]
## the first row has the best hospital. Return the name of the hospital
bestHospital = sortedOutcomeData[1,2]
bestHospital
} |
library(tidyverse)
library(stringdist)
library(rvest)
raw_stories_no_covid <- read_csv(here::here("data", "cumulative cash bail no covid story urls.csv"),
col_types = cols(
stories_id = col_double(),
publish_date = col_datetime(format = "%m/%d/%Y %H:%M"),
title = col_character(),
url = col_character(),
language = col_character(),
ap_syndicated = col_logical(),
themes = col_character(),
media_id = col_double(),
media_name = col_character(),
media_url = col_character()
)) %>%
mutate(file = "cumulative cash bail no covid story urls.csv")
raw_stories <- read_csv(here::here("data", "cumulative cash bail story urls.csv"),
col_types = cols(
stories_id = col_double(),
publish_date = col_datetime(format = "%m/%d/%Y %H:%M"),
title = col_character(),
url = col_character(),
language = col_character(),
ap_syndicated = col_logical(),
themes = col_character(),
media_id = col_double(),
media_name = col_character(),
media_url = col_character()
)) %>%
mutate(file = "cumulative cash bail story urls.csv")
raw_stories <- full_join(raw_stories_no_covid, raw_stories,
by = c("stories_id", "publish_date",
"title", "url", "language", "ap_syndicated",
"themes", "media_id", "media_name", "media_url"))
# mediacloud sources
files <- list.files(here::here("data", "sources"),
full.names = TRUE, pattern = "*.csv")
sources <- map_dfr(files, read_csv,
.id = "source_group",
col_types = cols(
.default = col_character(),
media_id = col_double(),
editor_notes = col_logical(),
stories_per_day = col_double(),
first_story = col_date(format = "")
)) %>%
mutate(source_group = factor(
source_group, labels = map_chr(files, ~ str_split(., "\\(|\\)")[[1]][2])
)) %>%
arrange(source_group) %>%
group_by_at(vars(-source_group)) %>%
filter(row_number() == 1) %>% # some sources belong to > 1 group
ungroup()
# merge with stories
stories <- raw_stories %>%
distinct() %>% # remove any initial duplicates (none)
left_join(sources, by = "media_id", suffix = c("", "_source")) %>%
mutate(
date = lubridate::date(publish_date),
raw_title = str_to_lower(title) %>%
str_remove_all("[[:punct:]]") %>%
str_squish()
)
stories_no_dupes <- stories %>%
group_by(raw_title) %>% # remove duplicate titles w/out punctuation, capitals
arrange(-ap_syndicated, source_group) %>%
mutate(n = n(), duplicate = n() > 1) %>%
filter(row_number() == 1) %>%
ungroup()
# calculate a measure of distance between titles
title_dist <- stringdistmatrix(stories_no_dupes$raw_title, stories_no_dupes$raw_title,
method = "lcs", useNames = "strings")
# kind of normalize it be number of characters
# (just ad hoc for later checking manually)
total_chars <- outer(stories_no_dupes$raw_title, stories_no_dupes$raw_title,
FUN = function(x, y) nchar(paste(x, y)))
dist_meas <- title_dist / total_chars
# the upper bound is arbitrary
poss_dupes <- apply(dist_meas, 1, function(x) any(between(x, 1e-12, .05)))
# these can now be filtered manually
stories_no_dupes <- stories_no_dupes %>%
mutate(poss_dupe = poss_dupes)
# same news source with different urls, or reported at the exact same time
# left in others that weren't from the same source and posted at slightly different times
duplicate_stories <- c(
1221383338, 1236247116, 1503596450, 1230784162, 1239841475,
1236912331, 1532884548, 1181500976, 1172022793, 605323993,
1498847812, 1298474367, 826172229, 1484194571,
657168505, 804324704
)
stories_no_dupes <- stories_no_dupes %>%
filter(!stories_id %in% duplicate_stories)
# attempt to download html from url
# commented out and read in when already downloaded
safe_full_text <- possibly(read_html, otherwise = NA)
full_texts_html <- map(stories_no_dupes$url, safe_full_text)
safe_write_xml <- possibly(write_xml, otherwise = NA)
walk2(full_texts_html, stories_no_dupes$stories_id, # not writing directly to project directory??
~safe_write_xml(.x, file = str_glue("~/Google Drive/Projects/COVID/Project13 - Jails:Prisons/data/cash_bail_full",
"/text_{.y}.xml")))
full_texts_html <- map(stories_no_dupes$stories_id,
~safe_full_text(here::here("data", "cash_bail_full",
str_glue("text_{.}.xml"))))
# extract the p elements -- generally have the article text
safe_get_text <- possibly(~xml_text(xml_find_all(.x, "//p")), otherwise = NA)
full_texts <- map(full_texts_html, safe_get_text)
stories_no_dupes <- stories_no_dupes %>%
mutate(full_text = map_chr(full_texts, ~ reduce(., paste, .init = "")))
work <- stories_no_dupes %>%
select(date, media_name, url, raw_title, full_text) %>%
mutate(
text = str_remove_all(full_text, "(\\n)|(\\t)") %>%
str_to_lower() %>%
str_remove_all("©|°|\\$|~|\\|") %>%
str_remove_all("[0-9]") %>%
str_squish(),
first_letters = str_sub(text, 1, 15),
last_letters = str_sub(text, nchar(text) - 15, nchar(text))
)
to_remove <- work %>%
group_by(first_letters) %>%
mutate(n = n()) %>%
filter(row_number() == 1) %>%
arrange(desc(n))
to_remove2 <- work %>%
group_by(last_letters) %>%
mutate(n = n()) %>%
filter(row_number() == 1) %>%
arrange(desc(n))
strings_to_remove <- c( # manually extracted from the beginning or end of the stories that share beginnings/ends
"have an existing account? already have a subscription? don't have an account? get the news let friends in your social network know what you are reading about",
"a link has been sent to your friends email address a link has been posted to your facebook feed to find out more about facebook commenting please read the conversation guidelines and faqs welcome to our new and improved comments which are for subscribers only this is a test to see whether we can improve the experience for you you do not need a facebook profile to participate you will need to register before adding a comment typed comments will be lost if you are not logged in please be polite its ok to disagree with someones ideas but personal attacks insults threats hate speech advocating violence and other violations can result in a ban if you see comments in violation of our community guidelines please report them with help from the cdc we answer some of googles most searched questions about the coronavirus crisis",
"we invite you to use our commenting platform to engage in insightful conversations about issues in our community although we do not prescreen comments we reserve the right at all times to remove any information or materials that are unlawful threatening abusive libelous defamatory obscene vulgar pornographic profane indecent or otherwise objectionable to us and to disclose any information necessary to satisfy the law regulation or government request we might permanently block any user who abuses these conditions if you see comments that you find offensive please use the flag as inappropriate feature by hovering over the right side of the post and pulling down on the arrow that appears or contact our editors by emailing moderatorscngcom this website uses cookies to improve your experience by continuing to use the site you accept our privacy policy and cookie policy",
"get breaking news in your browser. click here to turn on notifications",
"notifications can be turned off anytime in the browser settings",
"check the status of the virus in your state with your state health departments websites by tapping below download the brand new wusa app here sign up for the get up dc newsletter your forecast your commute your news",
"settings cancel set",
"thanks for contacting us. we've received your submission",
"would you like to receive local news notifications on your desktop",
"watch videos",
" ad choices",
"(about us)",
"sign in manage newsletters",
"filed under",
"not a member? register",
"this material may not be published, broadcast, rewritten, or redistributed. fox news network, llc. all rights reserved",
"all market data delayed minutes",
"fox news flash top headlines are here check out whats clicking on foxnewscom get all the latest news on coronavirus and more delivered daily to your inbox sign up here",
"support us about us, contact us, staff, careers, circulation, privacy, terms",
"help or sign in with a social account: don't have an account yet? sign up › get the most out of your experience with a personalized all-access pass to everything local on events, music, restaurants, news and more. enter your email or sign up with a social account to get started already registered? login",
"the lens in-depth news and investigations for new orleans",
"this content is being provided for free as a public service to our readers during the coronavirus outbreak",
"sign in to your forbes account or register for instructions on how to disable your ad blocker, click here. if this is your first time registering, please check your inbox for more information about the benefits of your forbes account and what you can do next",
"rigorous nonprofit news for vermont",
"watch cbsn live by",
"help us keep reporting",
"triblive's daily and weekly email newsletters deliver the news you want and information you need, right to your inbox",
"all rights reserved",
"free daily headlines",
"dear readers, we need your help. the coronavirus crisis in portland is a major threat to the mercury's ability to keep the city informed. we pride ourselves on having navigated many storms in the world of independent local media, but this time is different. % of our revenue—from advertising, ticketing fees, and our own events—is directly tied to people getting together in groups. the coronavirus situation has virtually eliminated this income all at once. at a time when the city needs local coverage more than ever, we're asking for your help to support continued coverage or everything happening in portland. you can make one-time or recurring donations. we can't say enough how much we appreciate your support. thank you",
"dear readers, the coronavirus pandemic has caused widespread disruption to the lives of everyone in tampa bay and to so many businesses in our community. here at the tampa bay times, we continue to provide free, up-to-date information at tampabay.com/coronavirus as a public service. but we need your help. please consider supporting us by subscribing or donating, and by sharing our work. thank you",
"subscribe donate newsletters editor’s note: the salt lake tribune is providing readers free access to critical local stories about the coronavirus during this time of heightened concern. see more coverage here",
"get the latest news delivered daily we invite you to use our commenting platform to engage in insightful conversations about issues in our community although we do not prescreen comments we reserve the right at all times to remove any information or materials that are unlawful threatening abusive libelous defamatory obscene vulgar pornographic profane indecent or otherwise objectionable to us and to disclose any information necessary to satisfy the law regulation or government request we might permanently block any user who abuses these conditions if you see comments that you find offensive please use the flag as inappropriate feature by hovering over the right side of the post and pulling down on the arrow that appears or contact our editors by emailing moderatorscngcom this website uses cookies to improve your experience by continuing to use the site you accept our privacy policy and cookie policy",
"usa today network choose the plan thats right for you digital access or digital and print delivery",
"your california privacy rights privacy policy gannett",
"do not sell my personal information cookie policy do not sell my personal information privacy policy terms of service",
"your california privacy rights / privacy policy gannett usa today network",
"choose the plan thats right for you. digital access or digital and print delivery",
"original content available for noncommercial use under a creative commons license except where noted",
"hearst television participates in various affiliate marketing programs, which means we may get paid commissions on purchases made through our links to retailer sites",
"all rights reservedterms of useprivacy noticeyour ad choicessitemapcalifornia privacy rightsdo not sell my personal information would you like to receive desktop browser notifications about breaking news and other major stories? not nowyes please",
"you must log in to post a comment",
"note to readers: if you purchase something through one of our affiliate links we may earn a commission",
"registration on or use of this site constitutes acceptance of our user agreement, privacy policy and cookie statement, and your california privacy rights",
"advance local media llc. all rights reserved (about us). the material on this site may not be reproduced, distributed, transmitted, cached or otherwise used, except with the prior written permission of advance local. community rules apply to all content you upload or otherwise submit to this site. ad choices",
"get up-to-the-minute news sent straight to your device",
"get up to speed with our essential california newsletter, sent six days a week",
"sign up for the latest news, best stories and what they mean for you, plus answers to your questions",
"subscribe for unlimited access",
"follow the latest on the outbreak with our newsletter every weekday all stories in the newsletter are free to access by signing up you agree to our terms of use and privacy policy follow the latest on the outbreak with our newsletter every weekday all stories in the newsletter are free to access by signing up you agree to our terms of use and privacy policy",
"click here to access the online public inspection file viewers with disabilities can get assistance accessing this station's fcc public inspection file by contacting the station with the information listed below. questions or concerns relating to the accessibility of the fcc's online public file system should be directed to the fcc",
"view the discussion thread",
"accessibility tools",
"readers around grass valley and nevada county make the unions work possible your financial contribution supports our efforts to deliver quality locally relevant journalism now more than ever your support is critical to help us keep our community informed about the evolving coronavirus pandemic and the impact it is having locally every contribution however large or small will make a difference your donation will help us continue to cover covid and our other vital local news get immediate access to organizations and people in our area that need your help or can provide help during the coronavirus crisis start a dialogue stay on topic and be civil if you dont follow the rules your comment may be deleted card vfcounter vfcommentscountdisplaynone card vfcounter vfcommentscountdisplaynone card vfcounter vfcommentscountdisplaynone card vfcounter vfcommentscountdisplaynone card vfcounter vfcommentscountdisplaynone card vfcounter vfcommentscountdisplaynone card vfcounter vfcommentscountdisplaynone card vfcounter vfcommentscountdisplaynone card vfcounter vfcommentscountdisplaynone card vfcounter vfcommentscountdisplaynone card vfcounter vfcommentscountdisplaynone card vfcounter vfcommentscountdisplaynone card vfcounter vfcommentscountdisplaynone card vfcounter vfcommentscountdisplaynone card vfcounter vfcommentscountdisplaynone card vfcounter vfcommentscountdisplaynone card vfcounter vfcommentscountdisplaynone card vfcounter vfcommentscountdisplaynone work for the best boss you invest hrs daily and make an extra seeking a contractor for an inground fiberglass spa install andor sq ft building enclosure real estate agents save covid = stress destresser = no fees functiond s id var jsijs=dgetelementsbytagnamesifdgetelementbyididreturnjs=dcreateelementsjsid=idjssrc=embedscribblelivecomwidgetsembedjsijsparentnodeinsertbeforejs ijsdocument script scrbbljs thuh l frih l sath l sunh l monh l card vfcounter vfcommentscountdisplaynone card vfcounter vfcommentscountdisplaynone card vfcounter vfcommentscountdisplaynone card vfcounter vfcommentscountdisplaynone card vfcounter vfcommentscountdisplaynone classifieds jobs real estate rentals autos business service directory pets photos for sale merchandise garage sales contact us contribute subscribe subscriber services about us comment policy advertise newsletter signup magazines cookie list sierra sun tahoe daily tribune tahoecom the wildwood independent swift communications inc",
"want the latest news and weather updates",
"watch live",
"copyright the associated press",
"the associated press contributed to this report",
"quotes delayed at least minutes. real-time quotes provided by bats bzx real-time price. market data provided by interactive data (terms & conditions). powered and implemented by interactive data managed solutions. company fundamental data provided by morningstar. earnings estimates data provided by zacks. mutual fund and etf data provided by lipper. economic data provided by econoday. dow jones & company terms & conditions. this material may not be published, broadcast, rewritten, or redistributed. fox news network, llc. all rights reserved. faq - updated privacy policy",
"this material may not be published, broadcast, rewritten or redistributed",
"start a dialogue, stay on topic and be civil",
"if you don't follow the rules, your comment may be deleted",
"classifieds jobs real estate rentals autos business & service directory pets photos for sale merchandise garage sales contact us contribute subscribe subscriber services about us comment policy advertise newsletter signup magazines cookie list sierra sun tahoe daily tribune tahoe.com the wildwood independent - swift communications, inc",
"you must log in to post a comment",
"this website uses cookies to improve your experience. by continuing to use the site, you accept our privacy policy and cookie policy",
"do not sell my personal information",
"cookie policy",
"privacy policy",
"terms of service",
"wilmington tv. . contact@wilm-tv.com capitol broadcasting company wilm-tv terms of use fcc/eeo reportsite developed and hosted by impact media solutions",
"for more information, go to",
"sign up for our newsletters",
"associated press and may not be published, broadcast, rewritten, or redistributed. associated press text, photo, graphic, audio and/or video material shall not be published, broadcast, rewritten for broadcast or publication or redistributed directly or indirectly in any medium. neither these ap materials nor any portion thereof may be stored in a computer except for personal and noncommercial use. the ap will not be held liable for any delays, inaccuracies, errors or omissions therefrom or in the transmission or delivery of all or any part thereof or for any damages arising from any of the foregoing",
"would you like to receive desktop browser notifications about breaking news and other major stories",
"choose the plan that’s right for you. digital access or digital and print delivery",
"original content available for non-commercial use under a creative commons license, except where noted",
"check back later for updates to this story. get morning report and other email newsletters",
"the material on this site may not be reproduced, distributed, transmitted, cached or otherwise used, except with the prior written permission of advance local",
"community rules apply to all content you upload or otherwise submit to this site",
"join our facebook group for the latest updates on coronavirus",
"x trending:",
"you are now logged in. forgot your password? create new account",
"accessibility terms of use",
"get morning report and other email newsletters",
"this story has been shared",
"not nowyes please",
"manage newsletters",
"all stories in the newsletter are free to access",
"by signing up you agree to our terms of use",
"click to read more and view comments",
"click to hide terms of use",
"washington weather summary: degrees washington",
"to keep our community informed of the most urgent coronavirus news, our critical updates are free to read. for more in-depth coverage and analysis, subscribe now",
"already a subscriber?",
"sign up now to get the most recent coronavirus headlines and other important local and national news sent to your email inbox daily",
"log in or activate your account",
"have an upcoming event? click below to share it with the community! plus, after your event is approved, log back into your user dashboard for an opportunity to enhance your listing",
"get the latest local and national news",
"please log in, or sign up for a new account and purchase a subscription to continue reading",
"the best local, regional and national news in sports, politics, business and more",
"on your next view you will be asked to",
"subscribe today for unlimited access",
"if you're a current print subscriber, you can opt-in for all access at any time",
"sorry, an error occurred",
"we hope that you enjoy our free content",
"thank you for reading",
"if you previously used a social network to login to wral.com, click the “forgot your password” link to reset your password",
"orry, no promotional deals were found matching that code",
"please subscribe or activate your digital account today",
"stories about the coronavirus pandemic are free to read as a public service",
"if this coverage is important to you, consider supporting local journalism by subscribing",
"follow the latest on the outbreak with our newsletter every weekday",
"please donate to keep us thriving through this crisis and beyond",
"become a donor and go ad-free",
"get the latest updates in news, food, music and culture, and receive special offers direct to your inbox",
"get the latest news delivered daily! we invite you to use our commenting platform to engage in insightful conversations about issues in our community. although we do not pre-screen comments, we reserve the right at all times to remove any information or materials that are unlawful, threatening, abusive, libelous, defamatory, obscene, vulgar, pornographic, profane, indecent or otherwise objectionable to us, and to disclose any information necessary to satisfy the law, regulation, or government request. we might permanently block any user who abuses these conditions. if you see comments that you find offensive, please use the \"flag as inappropriate\" feature by hovering over the right side of the post, and pulling down on the arrow that appears. or, contact our editors by emailing",
"get social working for tips are you a covid- expert, public health worker, medical provider, elected official, employer, business owner, or patient? we’d like to include your expertise, data, experiences, concerns, or anonymous tips related to covid- in our reporting. click to connect with our newsroom"
)
# for exact matching
strings_to_remove_coll <- map(strings_to_remove, coll)
# remove those strings and try again to see if anything else pops up
work <- work %>%
mutate(
new_text = reduce(strings_to_remove_coll, str_remove_all, .init = text),
first_letters = str_sub(new_text, 1, 40),
last_letters = str_sub(new_text, nchar(new_text) - 40, nchar(new_text))
)
# check again
to_remove <- work %>%
group_by(first_letters) %>%
mutate(n = n()) %>%
filter(row_number() == 1) %>%
arrange(desc(n))
to_remove2 <- work %>%
group_by(last_letters) %>%
mutate(n = n()) %>%
filter(row_number() == 1) %>%
arrange(desc(n))
# revised text is in column new_text
stories_no_dupes <- left_join(stories_no_dupes, work)
write_rds(stories_no_dupes, here::here("data", "cash_bail_full.rds"))
write_csv(stories_no_dupes, here::here("data", "cash_bail_full.csv"))
| /code/cash_bail_headlines.R | no_license | COVID19-DVRN/P_13a-Public-sentiment-toward-government-handling-of-COVID-19-and-treatment-of-the-incarcerated- | R | false | false | 24,938 | r | library(tidyverse)
library(stringdist)
library(rvest)
raw_stories_no_covid <- read_csv(here::here("data", "cumulative cash bail no covid story urls.csv"),
col_types = cols(
stories_id = col_double(),
publish_date = col_datetime(format = "%m/%d/%Y %H:%M"),
title = col_character(),
url = col_character(),
language = col_character(),
ap_syndicated = col_logical(),
themes = col_character(),
media_id = col_double(),
media_name = col_character(),
media_url = col_character()
)) %>%
mutate(file = "cumulative cash bail no covid story urls.csv")
raw_stories <- read_csv(here::here("data", "cumulative cash bail story urls.csv"),
col_types = cols(
stories_id = col_double(),
publish_date = col_datetime(format = "%m/%d/%Y %H:%M"),
title = col_character(),
url = col_character(),
language = col_character(),
ap_syndicated = col_logical(),
themes = col_character(),
media_id = col_double(),
media_name = col_character(),
media_url = col_character()
)) %>%
mutate(file = "cumulative cash bail story urls.csv")
raw_stories <- full_join(raw_stories_no_covid, raw_stories,
by = c("stories_id", "publish_date",
"title", "url", "language", "ap_syndicated",
"themes", "media_id", "media_name", "media_url"))
# mediacloud sources
files <- list.files(here::here("data", "sources"),
full.names = TRUE, pattern = "*.csv")
sources <- map_dfr(files, read_csv,
.id = "source_group",
col_types = cols(
.default = col_character(),
media_id = col_double(),
editor_notes = col_logical(),
stories_per_day = col_double(),
first_story = col_date(format = "")
)) %>%
mutate(source_group = factor(
source_group, labels = map_chr(files, ~ str_split(., "\\(|\\)")[[1]][2])
)) %>%
arrange(source_group) %>%
group_by_at(vars(-source_group)) %>%
filter(row_number() == 1) %>% # some sources belong to > 1 group
ungroup()
# merge with stories
stories <- raw_stories %>%
distinct() %>% # remove any initial duplicates (none)
left_join(sources, by = "media_id", suffix = c("", "_source")) %>%
mutate(
date = lubridate::date(publish_date),
raw_title = str_to_lower(title) %>%
str_remove_all("[[:punct:]]") %>%
str_squish()
)
stories_no_dupes <- stories %>%
group_by(raw_title) %>% # remove duplicate titles w/out punctuation, capitals
arrange(-ap_syndicated, source_group) %>%
mutate(n = n(), duplicate = n() > 1) %>%
filter(row_number() == 1) %>%
ungroup()
# calculate a measure of distance between titles
title_dist <- stringdistmatrix(stories_no_dupes$raw_title, stories_no_dupes$raw_title,
method = "lcs", useNames = "strings")
# kind of normalize it be number of characters
# (just ad hoc for later checking manually)
total_chars <- outer(stories_no_dupes$raw_title, stories_no_dupes$raw_title,
FUN = function(x, y) nchar(paste(x, y)))
dist_meas <- title_dist / total_chars
# the upper bound is arbitrary
poss_dupes <- apply(dist_meas, 1, function(x) any(between(x, 1e-12, .05)))
# these can now be filtered manually
stories_no_dupes <- stories_no_dupes %>%
mutate(poss_dupe = poss_dupes)
# same news source with different urls, or reported at the exact same time
# left in others that weren't from the same source and posted at slightly different times
duplicate_stories <- c(
1221383338, 1236247116, 1503596450, 1230784162, 1239841475,
1236912331, 1532884548, 1181500976, 1172022793, 605323993,
1498847812, 1298474367, 826172229, 1484194571,
657168505, 804324704
)
stories_no_dupes <- stories_no_dupes %>%
filter(!stories_id %in% duplicate_stories)
# attempt to download html from url
# commented out and read in when already downloaded
safe_full_text <- possibly(read_html, otherwise = NA)
full_texts_html <- map(stories_no_dupes$url, safe_full_text)
safe_write_xml <- possibly(write_xml, otherwise = NA)
walk2(full_texts_html, stories_no_dupes$stories_id, # not writing directly to project directory??
~safe_write_xml(.x, file = str_glue("~/Google Drive/Projects/COVID/Project13 - Jails:Prisons/data/cash_bail_full",
"/text_{.y}.xml")))
full_texts_html <- map(stories_no_dupes$stories_id,
~safe_full_text(here::here("data", "cash_bail_full",
str_glue("text_{.}.xml"))))
# extract the p elements -- generally have the article text
safe_get_text <- possibly(~xml_text(xml_find_all(.x, "//p")), otherwise = NA)
full_texts <- map(full_texts_html, safe_get_text)
stories_no_dupes <- stories_no_dupes %>%
mutate(full_text = map_chr(full_texts, ~ reduce(., paste, .init = "")))
work <- stories_no_dupes %>%
select(date, media_name, url, raw_title, full_text) %>%
mutate(
text = str_remove_all(full_text, "(\\n)|(\\t)") %>%
str_to_lower() %>%
str_remove_all("©|°|\\$|~|\\|") %>%
str_remove_all("[0-9]") %>%
str_squish(),
first_letters = str_sub(text, 1, 15),
last_letters = str_sub(text, nchar(text) - 15, nchar(text))
)
to_remove <- work %>%
group_by(first_letters) %>%
mutate(n = n()) %>%
filter(row_number() == 1) %>%
arrange(desc(n))
to_remove2 <- work %>%
group_by(last_letters) %>%
mutate(n = n()) %>%
filter(row_number() == 1) %>%
arrange(desc(n))
strings_to_remove <- c( # manually extracted from the beginning or end of the stories that share beginnings/ends
"have an existing account? already have a subscription? don't have an account? get the news let friends in your social network know what you are reading about",
"a link has been sent to your friends email address a link has been posted to your facebook feed to find out more about facebook commenting please read the conversation guidelines and faqs welcome to our new and improved comments which are for subscribers only this is a test to see whether we can improve the experience for you you do not need a facebook profile to participate you will need to register before adding a comment typed comments will be lost if you are not logged in please be polite its ok to disagree with someones ideas but personal attacks insults threats hate speech advocating violence and other violations can result in a ban if you see comments in violation of our community guidelines please report them with help from the cdc we answer some of googles most searched questions about the coronavirus crisis",
"we invite you to use our commenting platform to engage in insightful conversations about issues in our community although we do not prescreen comments we reserve the right at all times to remove any information or materials that are unlawful threatening abusive libelous defamatory obscene vulgar pornographic profane indecent or otherwise objectionable to us and to disclose any information necessary to satisfy the law regulation or government request we might permanently block any user who abuses these conditions if you see comments that you find offensive please use the flag as inappropriate feature by hovering over the right side of the post and pulling down on the arrow that appears or contact our editors by emailing moderatorscngcom this website uses cookies to improve your experience by continuing to use the site you accept our privacy policy and cookie policy",
"get breaking news in your browser. click here to turn on notifications",
"notifications can be turned off anytime in the browser settings",
"check the status of the virus in your state with your state health departments websites by tapping below download the brand new wusa app here sign up for the get up dc newsletter your forecast your commute your news",
"settings cancel set",
"thanks for contacting us. we've received your submission",
"would you like to receive local news notifications on your desktop",
"watch videos",
" ad choices",
"(about us)",
"sign in manage newsletters",
"filed under",
"not a member? register",
"this material may not be published, broadcast, rewritten, or redistributed. fox news network, llc. all rights reserved",
"all market data delayed minutes",
"fox news flash top headlines are here check out whats clicking on foxnewscom get all the latest news on coronavirus and more delivered daily to your inbox sign up here",
"support us about us, contact us, staff, careers, circulation, privacy, terms",
"help or sign in with a social account: don't have an account yet? sign up › get the most out of your experience with a personalized all-access pass to everything local on events, music, restaurants, news and more. enter your email or sign up with a social account to get started already registered? login",
"the lens in-depth news and investigations for new orleans",
"this content is being provided for free as a public service to our readers during the coronavirus outbreak",
"sign in to your forbes account or register for instructions on how to disable your ad blocker, click here. if this is your first time registering, please check your inbox for more information about the benefits of your forbes account and what you can do next",
"rigorous nonprofit news for vermont",
"watch cbsn live by",
"help us keep reporting",
"triblive's daily and weekly email newsletters deliver the news you want and information you need, right to your inbox",
"all rights reserved",
"free daily headlines",
"dear readers, we need your help. the coronavirus crisis in portland is a major threat to the mercury's ability to keep the city informed. we pride ourselves on having navigated many storms in the world of independent local media, but this time is different. % of our revenue—from advertising, ticketing fees, and our own events—is directly tied to people getting together in groups. the coronavirus situation has virtually eliminated this income all at once. at a time when the city needs local coverage more than ever, we're asking for your help to support continued coverage or everything happening in portland. you can make one-time or recurring donations. we can't say enough how much we appreciate your support. thank you",
"dear readers, the coronavirus pandemic has caused widespread disruption to the lives of everyone in tampa bay and to so many businesses in our community. here at the tampa bay times, we continue to provide free, up-to-date information at tampabay.com/coronavirus as a public service. but we need your help. please consider supporting us by subscribing or donating, and by sharing our work. thank you",
"subscribe donate newsletters editor’s note: the salt lake tribune is providing readers free access to critical local stories about the coronavirus during this time of heightened concern. see more coverage here",
"get the latest news delivered daily we invite you to use our commenting platform to engage in insightful conversations about issues in our community although we do not prescreen comments we reserve the right at all times to remove any information or materials that are unlawful threatening abusive libelous defamatory obscene vulgar pornographic profane indecent or otherwise objectionable to us and to disclose any information necessary to satisfy the law regulation or government request we might permanently block any user who abuses these conditions if you see comments that you find offensive please use the flag as inappropriate feature by hovering over the right side of the post and pulling down on the arrow that appears or contact our editors by emailing moderatorscngcom this website uses cookies to improve your experience by continuing to use the site you accept our privacy policy and cookie policy",
"usa today network choose the plan thats right for you digital access or digital and print delivery",
"your california privacy rights privacy policy gannett",
"do not sell my personal information cookie policy do not sell my personal information privacy policy terms of service",
"your california privacy rights / privacy policy gannett usa today network",
"choose the plan thats right for you. digital access or digital and print delivery",
"original content available for noncommercial use under a creative commons license except where noted",
"hearst television participates in various affiliate marketing programs, which means we may get paid commissions on purchases made through our links to retailer sites",
"all rights reservedterms of useprivacy noticeyour ad choicessitemapcalifornia privacy rightsdo not sell my personal information would you like to receive desktop browser notifications about breaking news and other major stories? not nowyes please",
"you must log in to post a comment",
"note to readers: if you purchase something through one of our affiliate links we may earn a commission",
"registration on or use of this site constitutes acceptance of our user agreement, privacy policy and cookie statement, and your california privacy rights",
"advance local media llc. all rights reserved (about us). the material on this site may not be reproduced, distributed, transmitted, cached or otherwise used, except with the prior written permission of advance local. community rules apply to all content you upload or otherwise submit to this site. ad choices",
"get up-to-the-minute news sent straight to your device",
"get up to speed with our essential california newsletter, sent six days a week",
"sign up for the latest news, best stories and what they mean for you, plus answers to your questions",
"subscribe for unlimited access",
"follow the latest on the outbreak with our newsletter every weekday all stories in the newsletter are free to access by signing up you agree to our terms of use and privacy policy follow the latest on the outbreak with our newsletter every weekday all stories in the newsletter are free to access by signing up you agree to our terms of use and privacy policy",
"click here to access the online public inspection file viewers with disabilities can get assistance accessing this station's fcc public inspection file by contacting the station with the information listed below. questions or concerns relating to the accessibility of the fcc's online public file system should be directed to the fcc",
"view the discussion thread",
"accessibility tools",
"readers around grass valley and nevada county make the unions work possible your financial contribution supports our efforts to deliver quality locally relevant journalism now more than ever your support is critical to help us keep our community informed about the evolving coronavirus pandemic and the impact it is having locally every contribution however large or small will make a difference your donation will help us continue to cover covid and our other vital local news get immediate access to organizations and people in our area that need your help or can provide help during the coronavirus crisis start a dialogue stay on topic and be civil if you dont follow the rules your comment may be deleted card vfcounter vfcommentscountdisplaynone card vfcounter vfcommentscountdisplaynone card vfcounter vfcommentscountdisplaynone card vfcounter vfcommentscountdisplaynone card vfcounter vfcommentscountdisplaynone card vfcounter vfcommentscountdisplaynone card vfcounter vfcommentscountdisplaynone card vfcounter vfcommentscountdisplaynone card vfcounter vfcommentscountdisplaynone card vfcounter vfcommentscountdisplaynone card vfcounter vfcommentscountdisplaynone card vfcounter vfcommentscountdisplaynone card vfcounter vfcommentscountdisplaynone card vfcounter vfcommentscountdisplaynone card vfcounter vfcommentscountdisplaynone card vfcounter vfcommentscountdisplaynone card vfcounter vfcommentscountdisplaynone card vfcounter vfcommentscountdisplaynone work for the best boss you invest hrs daily and make an extra seeking a contractor for an inground fiberglass spa install andor sq ft building enclosure real estate agents save covid = stress destresser = no fees functiond s id var jsijs=dgetelementsbytagnamesifdgetelementbyididreturnjs=dcreateelementsjsid=idjssrc=embedscribblelivecomwidgetsembedjsijsparentnodeinsertbeforejs ijsdocument script scrbbljs thuh l frih l sath l sunh l monh l card vfcounter vfcommentscountdisplaynone card vfcounter vfcommentscountdisplaynone card vfcounter vfcommentscountdisplaynone card vfcounter vfcommentscountdisplaynone card vfcounter vfcommentscountdisplaynone classifieds jobs real estate rentals autos business service directory pets photos for sale merchandise garage sales contact us contribute subscribe subscriber services about us comment policy advertise newsletter signup magazines cookie list sierra sun tahoe daily tribune tahoecom the wildwood independent swift communications inc",
"want the latest news and weather updates",
"watch live",
"copyright the associated press",
"the associated press contributed to this report",
"quotes delayed at least minutes. real-time quotes provided by bats bzx real-time price. market data provided by interactive data (terms & conditions). powered and implemented by interactive data managed solutions. company fundamental data provided by morningstar. earnings estimates data provided by zacks. mutual fund and etf data provided by lipper. economic data provided by econoday. dow jones & company terms & conditions. this material may not be published, broadcast, rewritten, or redistributed. fox news network, llc. all rights reserved. faq - updated privacy policy",
"this material may not be published, broadcast, rewritten or redistributed",
"start a dialogue, stay on topic and be civil",
"if you don't follow the rules, your comment may be deleted",
"classifieds jobs real estate rentals autos business & service directory pets photos for sale merchandise garage sales contact us contribute subscribe subscriber services about us comment policy advertise newsletter signup magazines cookie list sierra sun tahoe daily tribune tahoe.com the wildwood independent - swift communications, inc",
"you must log in to post a comment",
"this website uses cookies to improve your experience. by continuing to use the site, you accept our privacy policy and cookie policy",
"do not sell my personal information",
"cookie policy",
"privacy policy",
"terms of service",
"wilmington tv. . contact@wilm-tv.com capitol broadcasting company wilm-tv terms of use fcc/eeo reportsite developed and hosted by impact media solutions",
"for more information, go to",
"sign up for our newsletters",
"associated press and may not be published, broadcast, rewritten, or redistributed. associated press text, photo, graphic, audio and/or video material shall not be published, broadcast, rewritten for broadcast or publication or redistributed directly or indirectly in any medium. neither these ap materials nor any portion thereof may be stored in a computer except for personal and noncommercial use. the ap will not be held liable for any delays, inaccuracies, errors or omissions therefrom or in the transmission or delivery of all or any part thereof or for any damages arising from any of the foregoing",
"would you like to receive desktop browser notifications about breaking news and other major stories",
"choose the plan that’s right for you. digital access or digital and print delivery",
"original content available for non-commercial use under a creative commons license, except where noted",
"check back later for updates to this story. get morning report and other email newsletters",
"the material on this site may not be reproduced, distributed, transmitted, cached or otherwise used, except with the prior written permission of advance local",
"community rules apply to all content you upload or otherwise submit to this site",
"join our facebook group for the latest updates on coronavirus",
"x trending:",
"you are now logged in. forgot your password? create new account",
"accessibility terms of use",
"get morning report and other email newsletters",
"this story has been shared",
"not nowyes please",
"manage newsletters",
"all stories in the newsletter are free to access",
"by signing up you agree to our terms of use",
"click to read more and view comments",
"click to hide terms of use",
"washington weather summary: degrees washington",
"to keep our community informed of the most urgent coronavirus news, our critical updates are free to read. for more in-depth coverage and analysis, subscribe now",
"already a subscriber?",
"sign up now to get the most recent coronavirus headlines and other important local and national news sent to your email inbox daily",
"log in or activate your account",
"have an upcoming event? click below to share it with the community! plus, after your event is approved, log back into your user dashboard for an opportunity to enhance your listing",
"get the latest local and national news",
"please log in, or sign up for a new account and purchase a subscription to continue reading",
"the best local, regional and national news in sports, politics, business and more",
"on your next view you will be asked to",
"subscribe today for unlimited access",
"if you're a current print subscriber, you can opt-in for all access at any time",
"sorry, an error occurred",
"we hope that you enjoy our free content",
"thank you for reading",
"if you previously used a social network to login to wral.com, click the “forgot your password” link to reset your password",
"orry, no promotional deals were found matching that code",
"please subscribe or activate your digital account today",
"stories about the coronavirus pandemic are free to read as a public service",
"if this coverage is important to you, consider supporting local journalism by subscribing",
"follow the latest on the outbreak with our newsletter every weekday",
"please donate to keep us thriving through this crisis and beyond",
"become a donor and go ad-free",
"get the latest updates in news, food, music and culture, and receive special offers direct to your inbox",
"get the latest news delivered daily! we invite you to use our commenting platform to engage in insightful conversations about issues in our community. although we do not pre-screen comments, we reserve the right at all times to remove any information or materials that are unlawful, threatening, abusive, libelous, defamatory, obscene, vulgar, pornographic, profane, indecent or otherwise objectionable to us, and to disclose any information necessary to satisfy the law, regulation, or government request. we might permanently block any user who abuses these conditions. if you see comments that you find offensive, please use the \"flag as inappropriate\" feature by hovering over the right side of the post, and pulling down on the arrow that appears. or, contact our editors by emailing",
"get social working for tips are you a covid- expert, public health worker, medical provider, elected official, employer, business owner, or patient? we’d like to include your expertise, data, experiences, concerns, or anonymous tips related to covid- in our reporting. click to connect with our newsroom"
)
# for exact matching
strings_to_remove_coll <- map(strings_to_remove, coll)
# remove those strings and try again to see if anything else pops up
work <- work %>%
mutate(
new_text = reduce(strings_to_remove_coll, str_remove_all, .init = text),
first_letters = str_sub(new_text, 1, 40),
last_letters = str_sub(new_text, nchar(new_text) - 40, nchar(new_text))
)
# check again
to_remove <- work %>%
group_by(first_letters) %>%
mutate(n = n()) %>%
filter(row_number() == 1) %>%
arrange(desc(n))
to_remove2 <- work %>%
group_by(last_letters) %>%
mutate(n = n()) %>%
filter(row_number() == 1) %>%
arrange(desc(n))
# revised text is in column new_text
stories_no_dupes <- left_join(stories_no_dupes, work)
write_rds(stories_no_dupes, here::here("data", "cash_bail_full.rds"))
write_csv(stories_no_dupes, here::here("data", "cash_bail_full.csv"))
|
# sourced by drug_server.R ------------------------------------------------
fn_filter_drug <- function(.x, .cor = 0.2, .fdr = 0.05) {
.x %>% dplyr::filter(abs(cor_sprm) > .cor, fdr < .fdr)
}
fn_filter_drug_ctrp <- function(.x, .cor = 0.2, .fdr = 0.05) {
.x %>% dplyr::filter(abs(cor_sprm) > .cor, p_val < .fdr)
}
# GDSC --------------------------------------------------------------------
gdsc_plot <- function(tcga_path, gs) {
t_gdsc <- readr::read_rds(file.path(tcga_path, "Drug", "drug_target_gdsc.rds.gz")) %>%
tidyr::unnest() %>%
dplyr::select(drug_name, target_pathway) %>%
dplyr::distinct() %>%
dplyr::group_by(target_pathway) %>%
dplyr::mutate(count = n()) %>%
dplyr::ungroup()
print(glue::glue("{paste0(rep('-', 10), collapse = '')} Start Load GDSC @ {Sys.time()} {paste0(rep('-', 10), collapse = '')}"))
drug_gdsc <- readr::read_rds(file.path(tcga_path, "Drug", "gdsc_exp_spearman.rds.gz"))
print(glue::glue("{paste0(rep('-', 10), collapse = '')} End Load GDSC @ {Sys.time()} {paste0(rep('-', 10), collapse = '')}"))
print(glue::glue("{paste0(rep('-', 10), collapse = '')} Start GDSC Plot @ {Sys.time()} {paste0(rep('-', 10), collapse = '')}"))
drug_gdsc %>%
dplyr::filter(symbol %in% gs) %>%
dplyr::mutate(cor_drug = purrr::map(.x = drug, .f = fn_filter_drug)) %>%
tidyr::unnest(cor_drug) -> gdsc_gene_list_sig_drug
gdsc_gene_list_sig_drug %>%
dplyr::mutate(
fdr = ifelse(-log10(fdr) > 40, 40, -log10(fdr)),
cor_sprm = ifelse(cor_sprm > 0.4, 0.4, cor_sprm),
cor_sprm = ifelse(cor_sprm < -0.4, -0.4, cor_sprm)
) %>%
dplyr::left_join(t_gdsc, by = "drug_name") -> gdsc_plot_ready
gdsc_plot_ready %>%
dplyr::group_by(symbol) %>%
dplyr::summarise(cor_sum = sum(cor_sprm)) %>%
dplyr::arrange(cor_sum) -> gdsc_gene_rank
gdsc_plot_ready %>%
dplyr::distinct(drug_name, target_pathway, count) %>%
dplyr::group_by(target_pathway) %>%
dplyr::mutate(per = n() / count) %>%
dplyr::arrange(per) %>%
dplyr::ungroup() %>%
dplyr::select(drug_name, target_pathway, per) -> gdsc_drug_per
gdsc_plot_ready %>%
dplyr::left_join(gdsc_drug_per, by = c("drug_name", "target_pathway")) %>%
dplyr::group_by(drug_name) %>%
dplyr::mutate(drug_count = n()) %>%
dplyr::ungroup() %>%
dplyr::arrange(per, target_pathway, drug_count) %>%
dplyr::select(drug_name, target_pathway, drug_count, per) %>%
dplyr::distinct() %>%
dplyr::mutate(target_pathway = stringr::str_to_title(target_pathway)) -> gdsc_drug_rank_pre
gdsc_drug_rank_pre %>%
dplyr::distinct(target_pathway, per) %>%
dplyr::arrange(per, target_pathway) -> .foo
pathway_color <-
.foo %>%
dplyr::mutate(color = ggthemes::gdocs_pal()(nrow(.foo)))
gdsc_drug_rank_pre %>%
dplyr::select(-per) %>%
dplyr::left_join(pathway_color, by = "target_pathway") -> drug_rank
p <-
gdsc_plot_ready %>%
ggplot(aes(x = symbol, y = drug_name, color = cor_sprm)) +
geom_point(aes(size = fdr)) +
scale_x_discrete(limits = gdsc_gene_rank$symbol, expand = c(0.012, 0.012)) +
scale_y_discrete(limits = drug_rank$drug_name, expand = c(0.012, 0.012), position = "right") +
scale_color_gradient2(
name = "Spearman Correlation",
high = "red",
mid = "white",
low = "blue"
) +
scale_size_continuous(
name = "FDR"
) +
# ggthemes::theme_gdocs() +
theme(
panel.background = element_rect(color = "black", fill = "white", size = 0.1),
panel.grid = element_line(colour = "grey", linetype = "dashed"),
panel.grid.major = element_line(colour = "grey", linetype = "dashed", size = 0.2),
axis.title = element_blank(),
axis.text.x = element_text(size = 9, angle = 90, hjust = 1, vjust = 0.5),
axis.text.y = element_text(size = 10, color = drug_rank$color),
axis.ticks = element_line(color = "black"),
legend.position = "bottom",
legend.direction = "horizontal",
legend.text = element_text(size = 10),
legend.title = element_text(size = 10),
# legend.key.width = unit(1,"cm"),
# legend.key.heigh = unit(0.3,"cm"),
legend.key = element_rect(fill = "white", colour = "black")
) + guides(
color = guide_colorbar(
title.position = "top",
title.hjust = 0.5,
barheight = 0.5,
barwidth = 10
)
)
print(glue::glue("{paste0(rep('-', 10), collapse = '')} End GDSC Plot @ {Sys.time()} {paste0(rep('-', 10), collapse = '')}"))
p
}
# CTRP --------------------------------------------------------------------
ctrp_plot <- function(tcga_path, gs) {
t_ctrp <- readr::read_rds(file.path(tcga_path, "Drug", "drug_target_ctrp.rds.gz")) %>%
tidyr::unnest() %>%
dplyr::select(drug_name, target_pathway) %>%
dplyr::distinct()
print(glue::glue("{paste0(rep('-', 10), collapse = '')} Start Load CTRP @ {Sys.time()} {paste0(rep('-', 10), collapse = '')}"))
drug_ctrp <- readr::read_rds(file.path(tcga_path, "Drug", "ctrp_exp_spearman.rds.gz"))
print(glue::glue("{paste0(rep('-', 10), collapse = '')} End Load CTRP @ {Sys.time()} {paste0(rep('-', 10), collapse = '')}"))
print(glue::glue("{paste0(rep('-', 10), collapse = '')} Start CTRP plot @ {Sys.time()} {paste0(rep('-', 10), collapse = '')}"))
drug_ctrp %>%
dplyr::filter(symbol %in% gs) %>%
dplyr::mutate(cor_drug = purrr::map(.x = drug, .f = fn_filter_drug_ctrp)) %>%
tidyr::unnest(cor_drug) -> ctrp_gene_list_sig_drug
ctrp_gene_list_sig_drug %>%
dplyr::mutate(
p_val = ifelse(-log10(p_val) > 50, 50, -log10(p_val)),
cor_sprm = ifelse(cor_sprm > 0.5, 0.5, cor_sprm),
cor_sprm = ifelse(cor_sprm < -0.5, -0.5, cor_sprm)
) -> ctrp_plot_ready
ctrp_plot_ready %>%
dplyr::group_by(symbol) %>%
dplyr::summarise(cor_sum = sum(cor_sprm)) %>%
dplyr::arrange(cor_sum) -> ctrp_gene_rank
p <- ctrp_plot_ready %>%
ggplot(aes(x = symbol, y = drug_name, color = cor_sprm)) +
geom_point(aes(size = p_val)) +
scale_x_discrete(limits = ctrp_gene_rank$symbol, expand = c(0.012, 0.012)) +
scale_y_discrete(
# limits = drug_rank$drug_name,
expand = c(0.012, 0.012),
position = "right"
) +
scale_color_gradient2(
name = "Spearman Correlation",
high = "red",
mid = "white",
low = "blue"
) +
scale_size_continuous(
name = "FDR"
) +
# ggthemes::theme_gdocs() +
theme(
panel.background = element_rect(color = "black", fill = "white", size = 0.1),
panel.grid = element_line(colour = "grey", linetype = "dashed"),
panel.grid.major = element_line(colour = "grey", linetype = "dashed", size = 0.2),
axis.title = element_blank(),
axis.text.x = element_text(
size = 9,
angle = 90,
hjust = 1,
vjust = 0.5
),
axis.text.y = element_text(
# color = drug_rank$color,
size = 10
),
axis.ticks = element_line(color = "black"),
legend.position = "bottom",
legend.direction = "horizontal",
legend.text = element_text(size = 10),
legend.title = element_text(size = 10),
# legend.key.width = unit(1,"cm"),
# legend.key.heigh = unit(0.3,"cm"),
legend.key = element_rect(fill = "white", colour = "black")
) + guides(
color = guide_colorbar(
title.position = "top",
title.hjust = 0.5,
barheight = 0.5,
barwidth = 10
)
)
print(glue::glue("{paste0(rep('-', 10), collapse = '')} End CTRP plot @ {Sys.time()} {paste0(rep('-', 10), collapse = '')}"))
p
} | /functions/drug_analysis.R | permissive | COMODr/GSCALite | R | false | false | 7,629 | r |
# sourced by drug_server.R ------------------------------------------------
fn_filter_drug <- function(.x, .cor = 0.2, .fdr = 0.05) {
.x %>% dplyr::filter(abs(cor_sprm) > .cor, fdr < .fdr)
}
fn_filter_drug_ctrp <- function(.x, .cor = 0.2, .fdr = 0.05) {
.x %>% dplyr::filter(abs(cor_sprm) > .cor, p_val < .fdr)
}
# GDSC --------------------------------------------------------------------
gdsc_plot <- function(tcga_path, gs) {
t_gdsc <- readr::read_rds(file.path(tcga_path, "Drug", "drug_target_gdsc.rds.gz")) %>%
tidyr::unnest() %>%
dplyr::select(drug_name, target_pathway) %>%
dplyr::distinct() %>%
dplyr::group_by(target_pathway) %>%
dplyr::mutate(count = n()) %>%
dplyr::ungroup()
print(glue::glue("{paste0(rep('-', 10), collapse = '')} Start Load GDSC @ {Sys.time()} {paste0(rep('-', 10), collapse = '')}"))
drug_gdsc <- readr::read_rds(file.path(tcga_path, "Drug", "gdsc_exp_spearman.rds.gz"))
print(glue::glue("{paste0(rep('-', 10), collapse = '')} End Load GDSC @ {Sys.time()} {paste0(rep('-', 10), collapse = '')}"))
print(glue::glue("{paste0(rep('-', 10), collapse = '')} Start GDSC Plot @ {Sys.time()} {paste0(rep('-', 10), collapse = '')}"))
drug_gdsc %>%
dplyr::filter(symbol %in% gs) %>%
dplyr::mutate(cor_drug = purrr::map(.x = drug, .f = fn_filter_drug)) %>%
tidyr::unnest(cor_drug) -> gdsc_gene_list_sig_drug
gdsc_gene_list_sig_drug %>%
dplyr::mutate(
fdr = ifelse(-log10(fdr) > 40, 40, -log10(fdr)),
cor_sprm = ifelse(cor_sprm > 0.4, 0.4, cor_sprm),
cor_sprm = ifelse(cor_sprm < -0.4, -0.4, cor_sprm)
) %>%
dplyr::left_join(t_gdsc, by = "drug_name") -> gdsc_plot_ready
gdsc_plot_ready %>%
dplyr::group_by(symbol) %>%
dplyr::summarise(cor_sum = sum(cor_sprm)) %>%
dplyr::arrange(cor_sum) -> gdsc_gene_rank
gdsc_plot_ready %>%
dplyr::distinct(drug_name, target_pathway, count) %>%
dplyr::group_by(target_pathway) %>%
dplyr::mutate(per = n() / count) %>%
dplyr::arrange(per) %>%
dplyr::ungroup() %>%
dplyr::select(drug_name, target_pathway, per) -> gdsc_drug_per
gdsc_plot_ready %>%
dplyr::left_join(gdsc_drug_per, by = c("drug_name", "target_pathway")) %>%
dplyr::group_by(drug_name) %>%
dplyr::mutate(drug_count = n()) %>%
dplyr::ungroup() %>%
dplyr::arrange(per, target_pathway, drug_count) %>%
dplyr::select(drug_name, target_pathway, drug_count, per) %>%
dplyr::distinct() %>%
dplyr::mutate(target_pathway = stringr::str_to_title(target_pathway)) -> gdsc_drug_rank_pre
gdsc_drug_rank_pre %>%
dplyr::distinct(target_pathway, per) %>%
dplyr::arrange(per, target_pathway) -> .foo
pathway_color <-
.foo %>%
dplyr::mutate(color = ggthemes::gdocs_pal()(nrow(.foo)))
gdsc_drug_rank_pre %>%
dplyr::select(-per) %>%
dplyr::left_join(pathway_color, by = "target_pathway") -> drug_rank
p <-
gdsc_plot_ready %>%
ggplot(aes(x = symbol, y = drug_name, color = cor_sprm)) +
geom_point(aes(size = fdr)) +
scale_x_discrete(limits = gdsc_gene_rank$symbol, expand = c(0.012, 0.012)) +
scale_y_discrete(limits = drug_rank$drug_name, expand = c(0.012, 0.012), position = "right") +
scale_color_gradient2(
name = "Spearman Correlation",
high = "red",
mid = "white",
low = "blue"
) +
scale_size_continuous(
name = "FDR"
) +
# ggthemes::theme_gdocs() +
theme(
panel.background = element_rect(color = "black", fill = "white", size = 0.1),
panel.grid = element_line(colour = "grey", linetype = "dashed"),
panel.grid.major = element_line(colour = "grey", linetype = "dashed", size = 0.2),
axis.title = element_blank(),
axis.text.x = element_text(size = 9, angle = 90, hjust = 1, vjust = 0.5),
axis.text.y = element_text(size = 10, color = drug_rank$color),
axis.ticks = element_line(color = "black"),
legend.position = "bottom",
legend.direction = "horizontal",
legend.text = element_text(size = 10),
legend.title = element_text(size = 10),
# legend.key.width = unit(1,"cm"),
# legend.key.heigh = unit(0.3,"cm"),
legend.key = element_rect(fill = "white", colour = "black")
) + guides(
color = guide_colorbar(
title.position = "top",
title.hjust = 0.5,
barheight = 0.5,
barwidth = 10
)
)
print(glue::glue("{paste0(rep('-', 10), collapse = '')} End GDSC Plot @ {Sys.time()} {paste0(rep('-', 10), collapse = '')}"))
p
}
# CTRP --------------------------------------------------------------------
ctrp_plot <- function(tcga_path, gs) {
t_ctrp <- readr::read_rds(file.path(tcga_path, "Drug", "drug_target_ctrp.rds.gz")) %>%
tidyr::unnest() %>%
dplyr::select(drug_name, target_pathway) %>%
dplyr::distinct()
print(glue::glue("{paste0(rep('-', 10), collapse = '')} Start Load CTRP @ {Sys.time()} {paste0(rep('-', 10), collapse = '')}"))
drug_ctrp <- readr::read_rds(file.path(tcga_path, "Drug", "ctrp_exp_spearman.rds.gz"))
print(glue::glue("{paste0(rep('-', 10), collapse = '')} End Load CTRP @ {Sys.time()} {paste0(rep('-', 10), collapse = '')}"))
print(glue::glue("{paste0(rep('-', 10), collapse = '')} Start CTRP plot @ {Sys.time()} {paste0(rep('-', 10), collapse = '')}"))
drug_ctrp %>%
dplyr::filter(symbol %in% gs) %>%
dplyr::mutate(cor_drug = purrr::map(.x = drug, .f = fn_filter_drug_ctrp)) %>%
tidyr::unnest(cor_drug) -> ctrp_gene_list_sig_drug
ctrp_gene_list_sig_drug %>%
dplyr::mutate(
p_val = ifelse(-log10(p_val) > 50, 50, -log10(p_val)),
cor_sprm = ifelse(cor_sprm > 0.5, 0.5, cor_sprm),
cor_sprm = ifelse(cor_sprm < -0.5, -0.5, cor_sprm)
) -> ctrp_plot_ready
ctrp_plot_ready %>%
dplyr::group_by(symbol) %>%
dplyr::summarise(cor_sum = sum(cor_sprm)) %>%
dplyr::arrange(cor_sum) -> ctrp_gene_rank
p <- ctrp_plot_ready %>%
ggplot(aes(x = symbol, y = drug_name, color = cor_sprm)) +
geom_point(aes(size = p_val)) +
scale_x_discrete(limits = ctrp_gene_rank$symbol, expand = c(0.012, 0.012)) +
scale_y_discrete(
# limits = drug_rank$drug_name,
expand = c(0.012, 0.012),
position = "right"
) +
scale_color_gradient2(
name = "Spearman Correlation",
high = "red",
mid = "white",
low = "blue"
) +
scale_size_continuous(
name = "FDR"
) +
# ggthemes::theme_gdocs() +
theme(
panel.background = element_rect(color = "black", fill = "white", size = 0.1),
panel.grid = element_line(colour = "grey", linetype = "dashed"),
panel.grid.major = element_line(colour = "grey", linetype = "dashed", size = 0.2),
axis.title = element_blank(),
axis.text.x = element_text(
size = 9,
angle = 90,
hjust = 1,
vjust = 0.5
),
axis.text.y = element_text(
# color = drug_rank$color,
size = 10
),
axis.ticks = element_line(color = "black"),
legend.position = "bottom",
legend.direction = "horizontal",
legend.text = element_text(size = 10),
legend.title = element_text(size = 10),
# legend.key.width = unit(1,"cm"),
# legend.key.heigh = unit(0.3,"cm"),
legend.key = element_rect(fill = "white", colour = "black")
) + guides(
color = guide_colorbar(
title.position = "top",
title.hjust = 0.5,
barheight = 0.5,
barwidth = 10
)
)
print(glue::glue("{paste0(rep('-', 10), collapse = '')} End CTRP plot @ {Sys.time()} {paste0(rep('-', 10), collapse = '')}"))
p
} |
##' NCBI Database API - Get NCBI taxonomy information from given NCBI taxonomy IDs
##'
##' Get NCBI taxonomy information.
##' @title Get NCBI taxonomy information
##' @param NCBITaxoIDs A vector of NCBI taxonomy IDs.
##' @inheritParams getNCBIGenesInfo
##' @return A list containing taxonomy information for each ID.
##' @examples
##' ## with two cores
##' tax3 <- getNCBITaxo(c('9606', '511145', '797302'), n = 2)
##' @author Yulong Niu \email{niuylscu@@gmail.com}
##' @importFrom RCurl postForm
##' @importFrom xml2 read_xml xml_children xml_text
##' @importFrom foreach foreach %do% %dopar%
##' @importFrom doParallel registerDoParallel stopImplicitCluster
##' @importFrom ParaMisc CutSeqEqu
##' @references Entrez Programming Utilities Help \url{http://www.ncbi.nlm.nih.gov/books/NBK25499/}
##' @export
##'
##'
getNCBITaxo <- function(NCBITaxoIDs, n = 1, maxEach = 10000) {
## register multiple core
registerDoParallel(cores = n)
##~~~~~~~~~~~~~~~~~~~~~~~~~EPost~~~~~~~~~~~~~~~~~~~~~~~
## compress taxonomy IDs
taxoIDs <- paste(NCBITaxoIDs, collapse = ',')
infoPostPara <- list(db = 'taxonomy', id = taxoIDs)
infoPost <- EPostNCBI(infoPostPara)
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
##~~~~~~~~~~~~~~~~~~~~~~ESummary~~~~~~~~~~~~~~~~~~~~~~~~~
cutMat <- CutSeqEqu(length(NCBITaxoIDs), maxEach)
## The start number is from 0.
cutMat <- cutMat - 1
## fetch url base
fetchUrlBase <- EUrl('efetch')
key = infoPost$QueryKey
webEnv = infoPost$WebEnv
taxoInfo <- foreach (i = 1:ncol(cutMat), .combine = c) %do% {
eachFetchStr <- postForm(uri = fetchUrlBase,
db = 'taxonomy',
query_key = key,
WebEnv = webEnv,
retstart = cutMat[1, i],
retmax = maxEach,
retmode = 'xml')
eachFetchXml <- read_xml(eachFetchStr)
childXml <- xml_find_all(eachFetchXml, 'Taxon')
eachInfo <- foreach(j = 1 : length(childXml)) %dopar% {
singleInfo <- singleTaxoInfo(childXml[[j]])
return(singleInfo)
}
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
return(eachInfo)
}
names(taxoInfo) <- NCBITaxoIDs
## stop multiple core
stopImplicitCluster()
return(taxoInfo)
}
##' NCBI Database API - Get NCBI taxonomy information from given NCBI taxonomy IDs
##'
##' Get taxonomy information form single NCBI taxonomy ID.
##' @title Get single NCBI taxonomy information
##' @param taxoXml Taxonomy xml data
##' @return A matrix of taxonomy information
##' @author Yulong Niu \email{niuylscu@@gmail.com}
##' @keywords internal
##'
##'
singleTaxoInfo <- function(taxoXml) {
taxoPrefix <- './/'
taxoItems <- c('TaxId', 'ScientificName', 'Rank')
taxoInfo <- BatchXmlText(taxoXml, taxoPrefix, taxoItems)
taxoMat <- do.call(cbind, taxoInfo)
return(taxoMat)
}
##' NCBI Database API - Get NCBI gene or protein information from given NCBI gene IDs
##'
##' Get NCBI gene information, including gene name, description, genetic source, aliases, gene location. To retrieve thousands of proteins, use EPost to post record into the web server and then retrieve data using ESummary. If the gene ID is not found, return an error information in the list.
##' @title Get NCBI genes information
##' @param NCBIGeneIDs A vector of NCBI gene or protein IDs.
##' @param type Character string either "protein", "gene", "nuccore".
##' @param n The number of CPUs or processors, and the default value is 1.
##' @param maxEach The maximum retrieve number in each visit. The ESearch, EFetch, and ESummary, the max number in one query is 10,000.
##' @return A list containing gene information for each ID. A empty character vector (whose length is 0) will be returned for the items if the contents are not found.
##' @examples
##' gene3 <- getNCBIGenesInfo(c('100286922', '948242', '15486644'), type = 'gene', n = 2)
##' protein2 <- getNCBIGenesInfo(c('WP_084863515', 'BAI64724'), type = 'protein', n = 2)
##' nuc3 <- getNCBIGenesInfo(c('AF538355.1', 'AY560609.1', 'CP048101.1'), type = 'nuccore')
##' ## not found
##' ghostInfo <- getNCBIGenesInfo('111111111', n = 1)
##' \dontrun{
##' require(KEGGAPI)
##' ## signle genome with two plasmids
##' smuGenes <- convKEGG('smu', 'ncbi-geneid')
##' smuGeneNames <- sapply(strsplit(smuGenes[, 1], split = ':', fixed = TRUE), '[[', 2)
##' smuInfo <- getNCBIGenesInfo(smuGeneNames, n = 4)
##'
##' ## two genomes with two plasmids
##' draGenes <- convKEGG('dra', 'ncbi-geneid')
##' draGeneNames <- sapply(strsplit(draGenes[, 1], split = ':', fixed = TRUE), '[[', 2)
##' draInfo <- getNCBIGenesInfo(draGeneNames, n = 4)
##' }
##' @author Yulong Niu \email{niuylscu@@gmail.com}
##' @importFrom RCurl postForm
##' @importFrom xml2 read_xml xml_children
##' @importFrom foreach foreach %do% %dopar%
##' @importFrom doParallel registerDoParallel stopImplicitCluster
##' @importFrom ParaMisc CutSeqEqu
##' @references Entrez Programming Utilities Help \url{http://www.ncbi.nlm.nih.gov/books/NBK25499/}
##' @export
##'
##'
getNCBIGenesInfo <- function(NCBIGeneIDs, type = 'gene', n = 1, maxEach = 10000) {
## register multiple core
registerDoParallel(cores = n)
##~~~~~~~~~~~~~~~~~~~~~~~~~EPost~~~~~~~~~~~~~~~~~~~~~~~
## compress gene IDs
geneIDs <- paste(NCBIGeneIDs, collapse = ',')
infoPostPara <- list(db = type, id = geneIDs)
infoPost <- EPostNCBI(infoPostPara)
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
##~~~~~~~~~~~~~~~~~~~~~~ESummary~~~~~~~~~~~~~~~~~~~~~~~~~
cutMat <- CutSeqEqu(length(NCBIGeneIDs), maxEach)
## The start number is from 0.
cutMat <- cutMat - 1
## fetch url base
fetchUrlBase <- EUrl('esummary')
key = infoPost$QueryKey
webEnv = infoPost$WebEnv
geneInfo <- foreach (i = 1:ncol(cutMat), .combine = c) %do% {
eachFetchStr <- postForm(uri = fetchUrlBase,
db = type,
query_key = key,
WebEnv = webEnv,
retstart = cutMat[1, i],
retmax = maxEach,
retmode = 'xml')
eachFetchXml <- read_xml(eachFetchStr)
topNode <- ifelse(type == 'gene', 'DocumentSummarySet/DocumentSummary', 'DocSum')
childXml <- xml_find_all(eachFetchXml, topNode)
eachInfo <- foreach(j = 1 : length(childXml)) %dopar% {
if (type %in% c('gene')) {
singleInfo <- singleGeneInfo(childXml[[j]])
}
else if (type %in% c('protein', 'nuccore')) {
singleInfo <- singleProteinInfo(childXml[[j]])
}
else {}
return(singleInfo)
}
return(eachInfo)
}
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
names(geneInfo) <- NCBIGeneIDs
## stop multiple core
stopImplicitCluster()
return(geneInfo)
}
##' NCBI Database API - Get single NCBI gene information
##'
##' Get gene information form single NCBI gene ID.
##' @title Get single NCBI gene information
##' @param geneXml Gene xml data.
##' @return A list of gene information.
##' @author Yulong Niu \email{niuylscu@@gmail.com}
##' @importFrom xml2 xml_find_all xml_text
##' @importFrom magrittr %>%
##' @keywords internal
##'
##'
singleGeneInfo <- function(geneXml) {
## first check if the no candidate for input gene
errorText <- geneXml %>%
xml_find_all('error') %>%
xml_text
if (length(errorText) > 0) {
geneInfo <- errorText
return(geneInfo)
} else {}
## gene summary
docSumPrefix <- ''
docSumItems <- c('Name', 'Description', 'Chromosome', 'GeneticSource', 'MapLocation', 'OtherAliases')
geneInfo <- BatchXmlText(geneXml, docSumPrefix, docSumItems)
## gene location
## LocationHist also includes gene location which is not what we want
locPrefix <- 'GenomicInfo/GenomicInfoType/'
locItems <- c('ChrLoc', 'ChrAccVer', 'ChrStart', 'ChrStop', 'ExonCount')
locText <- BatchXmlText(geneXml, locPrefix, locItems)
locMat <- do.call(cbind, locText)
## combine summary and gene location
geneInfo$GenomicInfo = locMat
return(geneInfo)
}
##' NCBI Database API - Get single NCBI protein information
##'
##' Get gene information form single NCBI protein ID.
##' @title Get single NCBI protein information
##' @param geneXml Gene xml data.
##' @return A list of protein information.
##' @author Yulong Niu \email{niuylscu@@gmail.com}
##' @importFrom xml2 xml_find_all xml_text
##' @importFrom magrittr %>%
##' @importFrom stringr str_replace
##' @keywords internal
##'
##'
singleProteinInfo <- function(proteinXml) {
## first check if the no candidate for input gene
errorText <- proteinXml %>%
xml_find_all('error') %>%
xml_text
if (length(errorText) > 0) {
proteinInfo <- errorText
return(proteinInfo)
} else {}
## protein summary
itemsAtts <- c('Caption', 'Title', 'Extra', 'Gi', 'CreateDate', 'UpdateDate', 'Flags', 'TaxId', 'Length', 'Status')
proteinInfo <- sapply(itemsAtts, function(eachAttr) {
eachAttr %>%
str_replace('Item[@Name="Attrs"]', 'Attrs', .) %>%
xml_find_all(proteinXml, .) %>%
xml_text
})
return(proteinInfo)
}
##' NCBI Database API - Get single NCBI whole genomic gene annotation
##'
##' Get whole gene annotation form single NCBI genome ID. The locus tag is used as names for each gene. If one of the gene feature value is missed, a "" (with length of 1) will return. If the genome has no gene featurs, "NULL" will be returned.
##' This function now supports two feature types, "gene" or "CDS" (coding sequence). Other features such as RNAs ("ncRNA", "rRNA", "tRNA", "tmRNA"), "misc_feature", "rep_origin", "repeat_region" are not supported yet. It is shown in E. coli K-12 MG1655 "genes" features not only includes all "CDS" and RNAs, but some sRNA ("b4714"). "misc_feature" are mainly about cryptic prophage genes, and "repeat_region" are repetitive extragentic palindromic (REP) elements.
##' @title Get single NCBI whole genomic gene annotation
##' @param genomeID Single NCBI genome ID.
##' @param type "gene" or "CDS". The KEGG database use "CDS" as the protein gene count.
##' @inheritParams getNCBIGenesInfo
##' @return A list of annotation.
##' @examples
##' ## no gene features
##' nofeature <- singleGenomeAnno('BA000048')
##'
##' \dontrun{
##' aeuGenome <- singleGenomeAnno('CP007715', n = 4)
##'
##' ## missed value is replaced by ''
##' pseudo <- singleGenomeAnno('AE001826')[54]}
##' @importFrom RCurl postForm
##' @importFrom xml2 read_xml
##' @importFrom foreach foreach %dopar%
##' @importFrom doParallel registerDoParallel stopImplicitCluster
##' @author Yulong Niu \email{niuylscu@@gmail.com}
##' @export
##'
singleGenomeAnno <- function(genomeID, type = 'gene', n = 1) {
getEachAnno <- function(featureNode) {
## USE: extact annotation from each node
## INPUT: `featureNode` is the child node in xml format
## OUTPUT: A list of gene annotation
locNode <- xml_find_all(featureNode, 'GBFeature_intervals/GBInterval')
loc <- BatchXmlText(locNode, './/', c('GBInterval_from', 'GBInterval_to'))
loc <- do.call(cbind, loc)
GBNodes <- xml_find_all(featureNode, 'GBFeature_quals/GBQualifier')
GBf <- lapply(GBNodes, function(x) {
## value may be missed, for example a <psedudo>
## example: singleGenomeAnno('AE001826')[50]
eachGB <- BatchXmlText(x, '', c('GBQualifier_name', 'GBQualifier_value'))
## '' may be assighed to multiple elements
eachGB[which(sapply(eachGB, length) == 0)] <- ''
eachGB <- unlist(eachGB)
return(eachGB)
})
GBf <- do.call(rbind, GBf)
GBf[, 2] <- gsub('\\n', '', GBf[, 2])
geneAnno <- list(GBInterval = loc,
GBFeature_quals = GBf)
return(geneAnno)
}
## register multiple core
registerDoParallel(cores = n)
##~~~~~~~~~~~~~~~~~~~load in whole genomic annotation~~~~~~~~~~~~
urlBase <- EUrl('efetch')
postList <- list(db = 'nuccore',
id = genomeID,
retmode = 'xml')
annoStr <- postForm(urlBase, .params = postList)
annoXml <- read_xml(annoStr)
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
## extract annotation node and keys
annoNode <- xml_find_all(annoXml, 'GBSeq/GBSeq_feature-table')
keys <- xml_text(xml_find_all(annoNode, './/GBFeature_key'))
## may be no gene features
if (length(keys) == 1) {
## only one key that is "source", and return NULL
annoList <- NULL
} else {
rightKeys <- which(keys == type)
annoChild <- xml_children(annoNode)[rightKeys]
##~~~~~~~~~~~~~~~~~~extract features~~~~~~~~~~~~~~~~~~~~~~~~~~~~
annoList <- foreach(i = 1:length(annoChild)) %dopar% {
eachAnno <- getEachAnno(annoChild[[i]])
}
locusName <- sapply(annoList, function(x) {
eachLocus <- x[[2]]
eachLocus <- eachLocus[eachLocus[, 1] == 'locus_tag', 2]
return(eachLocus)
})
names(annoList) <- locusName
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
}
## stop multiple core
stopImplicitCluster()
return(annoList)
}
| /R/getNCBI.R | no_license | YulongNiu/NCBIAPI | R | false | false | 13,149 | r | ##' NCBI Database API - Get NCBI taxonomy information from given NCBI taxonomy IDs
##'
##' Get NCBI taxonomy information.
##' @title Get NCBI taxonomy information
##' @param NCBITaxoIDs A vector of NCBI taxonomy IDs.
##' @inheritParams getNCBIGenesInfo
##' @return A list containing taxonomy information for each ID.
##' @examples
##' ## with two cores
##' tax3 <- getNCBITaxo(c('9606', '511145', '797302'), n = 2)
##' @author Yulong Niu \email{niuylscu@@gmail.com}
##' @importFrom RCurl postForm
##' @importFrom xml2 read_xml xml_children xml_text
##' @importFrom foreach foreach %do% %dopar%
##' @importFrom doParallel registerDoParallel stopImplicitCluster
##' @importFrom ParaMisc CutSeqEqu
##' @references Entrez Programming Utilities Help \url{http://www.ncbi.nlm.nih.gov/books/NBK25499/}
##' @export
##'
##'
getNCBITaxo <- function(NCBITaxoIDs, n = 1, maxEach = 10000) {
## register multiple core
registerDoParallel(cores = n)
##~~~~~~~~~~~~~~~~~~~~~~~~~EPost~~~~~~~~~~~~~~~~~~~~~~~
## compress taxonomy IDs
taxoIDs <- paste(NCBITaxoIDs, collapse = ',')
infoPostPara <- list(db = 'taxonomy', id = taxoIDs)
infoPost <- EPostNCBI(infoPostPara)
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
##~~~~~~~~~~~~~~~~~~~~~~ESummary~~~~~~~~~~~~~~~~~~~~~~~~~
cutMat <- CutSeqEqu(length(NCBITaxoIDs), maxEach)
## The start number is from 0.
cutMat <- cutMat - 1
## fetch url base
fetchUrlBase <- EUrl('efetch')
key = infoPost$QueryKey
webEnv = infoPost$WebEnv
taxoInfo <- foreach (i = 1:ncol(cutMat), .combine = c) %do% {
eachFetchStr <- postForm(uri = fetchUrlBase,
db = 'taxonomy',
query_key = key,
WebEnv = webEnv,
retstart = cutMat[1, i],
retmax = maxEach,
retmode = 'xml')
eachFetchXml <- read_xml(eachFetchStr)
childXml <- xml_find_all(eachFetchXml, 'Taxon')
eachInfo <- foreach(j = 1 : length(childXml)) %dopar% {
singleInfo <- singleTaxoInfo(childXml[[j]])
return(singleInfo)
}
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
return(eachInfo)
}
names(taxoInfo) <- NCBITaxoIDs
## stop multiple core
stopImplicitCluster()
return(taxoInfo)
}
##' NCBI Database API - Get NCBI taxonomy information from given NCBI taxonomy IDs
##'
##' Get taxonomy information form single NCBI taxonomy ID.
##' @title Get single NCBI taxonomy information
##' @param taxoXml Taxonomy xml data
##' @return A matrix of taxonomy information
##' @author Yulong Niu \email{niuylscu@@gmail.com}
##' @keywords internal
##'
##'
singleTaxoInfo <- function(taxoXml) {
taxoPrefix <- './/'
taxoItems <- c('TaxId', 'ScientificName', 'Rank')
taxoInfo <- BatchXmlText(taxoXml, taxoPrefix, taxoItems)
taxoMat <- do.call(cbind, taxoInfo)
return(taxoMat)
}
##' NCBI Database API - Get NCBI gene or protein information from given NCBI gene IDs
##'
##' Get NCBI gene information, including gene name, description, genetic source, aliases, gene location. To retrieve thousands of proteins, use EPost to post record into the web server and then retrieve data using ESummary. If the gene ID is not found, return an error information in the list.
##' @title Get NCBI genes information
##' @param NCBIGeneIDs A vector of NCBI gene or protein IDs.
##' @param type Character string either "protein", "gene", "nuccore".
##' @param n The number of CPUs or processors, and the default value is 1.
##' @param maxEach The maximum retrieve number in each visit. The ESearch, EFetch, and ESummary, the max number in one query is 10,000.
##' @return A list containing gene information for each ID. A empty character vector (whose length is 0) will be returned for the items if the contents are not found.
##' @examples
##' gene3 <- getNCBIGenesInfo(c('100286922', '948242', '15486644'), type = 'gene', n = 2)
##' protein2 <- getNCBIGenesInfo(c('WP_084863515', 'BAI64724'), type = 'protein', n = 2)
##' nuc3 <- getNCBIGenesInfo(c('AF538355.1', 'AY560609.1', 'CP048101.1'), type = 'nuccore')
##' ## not found
##' ghostInfo <- getNCBIGenesInfo('111111111', n = 1)
##' \dontrun{
##' require(KEGGAPI)
##' ## signle genome with two plasmids
##' smuGenes <- convKEGG('smu', 'ncbi-geneid')
##' smuGeneNames <- sapply(strsplit(smuGenes[, 1], split = ':', fixed = TRUE), '[[', 2)
##' smuInfo <- getNCBIGenesInfo(smuGeneNames, n = 4)
##'
##' ## two genomes with two plasmids
##' draGenes <- convKEGG('dra', 'ncbi-geneid')
##' draGeneNames <- sapply(strsplit(draGenes[, 1], split = ':', fixed = TRUE), '[[', 2)
##' draInfo <- getNCBIGenesInfo(draGeneNames, n = 4)
##' }
##' @author Yulong Niu \email{niuylscu@@gmail.com}
##' @importFrom RCurl postForm
##' @importFrom xml2 read_xml xml_children
##' @importFrom foreach foreach %do% %dopar%
##' @importFrom doParallel registerDoParallel stopImplicitCluster
##' @importFrom ParaMisc CutSeqEqu
##' @references Entrez Programming Utilities Help \url{http://www.ncbi.nlm.nih.gov/books/NBK25499/}
##' @export
##'
##'
getNCBIGenesInfo <- function(NCBIGeneIDs, type = 'gene', n = 1, maxEach = 10000) {
## register multiple core
registerDoParallel(cores = n)
##~~~~~~~~~~~~~~~~~~~~~~~~~EPost~~~~~~~~~~~~~~~~~~~~~~~
## compress gene IDs
geneIDs <- paste(NCBIGeneIDs, collapse = ',')
infoPostPara <- list(db = type, id = geneIDs)
infoPost <- EPostNCBI(infoPostPara)
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
##~~~~~~~~~~~~~~~~~~~~~~ESummary~~~~~~~~~~~~~~~~~~~~~~~~~
cutMat <- CutSeqEqu(length(NCBIGeneIDs), maxEach)
## The start number is from 0.
cutMat <- cutMat - 1
## fetch url base
fetchUrlBase <- EUrl('esummary')
key = infoPost$QueryKey
webEnv = infoPost$WebEnv
geneInfo <- foreach (i = 1:ncol(cutMat), .combine = c) %do% {
eachFetchStr <- postForm(uri = fetchUrlBase,
db = type,
query_key = key,
WebEnv = webEnv,
retstart = cutMat[1, i],
retmax = maxEach,
retmode = 'xml')
eachFetchXml <- read_xml(eachFetchStr)
topNode <- ifelse(type == 'gene', 'DocumentSummarySet/DocumentSummary', 'DocSum')
childXml <- xml_find_all(eachFetchXml, topNode)
eachInfo <- foreach(j = 1 : length(childXml)) %dopar% {
if (type %in% c('gene')) {
singleInfo <- singleGeneInfo(childXml[[j]])
}
else if (type %in% c('protein', 'nuccore')) {
singleInfo <- singleProteinInfo(childXml[[j]])
}
else {}
return(singleInfo)
}
return(eachInfo)
}
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
names(geneInfo) <- NCBIGeneIDs
## stop multiple core
stopImplicitCluster()
return(geneInfo)
}
##' NCBI Database API - Get single NCBI gene information
##'
##' Get gene information form single NCBI gene ID.
##' @title Get single NCBI gene information
##' @param geneXml Gene xml data.
##' @return A list of gene information.
##' @author Yulong Niu \email{niuylscu@@gmail.com}
##' @importFrom xml2 xml_find_all xml_text
##' @importFrom magrittr %>%
##' @keywords internal
##'
##'
singleGeneInfo <- function(geneXml) {
## first check if the no candidate for input gene
errorText <- geneXml %>%
xml_find_all('error') %>%
xml_text
if (length(errorText) > 0) {
geneInfo <- errorText
return(geneInfo)
} else {}
## gene summary
docSumPrefix <- ''
docSumItems <- c('Name', 'Description', 'Chromosome', 'GeneticSource', 'MapLocation', 'OtherAliases')
geneInfo <- BatchXmlText(geneXml, docSumPrefix, docSumItems)
## gene location
## LocationHist also includes gene location which is not what we want
locPrefix <- 'GenomicInfo/GenomicInfoType/'
locItems <- c('ChrLoc', 'ChrAccVer', 'ChrStart', 'ChrStop', 'ExonCount')
locText <- BatchXmlText(geneXml, locPrefix, locItems)
locMat <- do.call(cbind, locText)
## combine summary and gene location
geneInfo$GenomicInfo = locMat
return(geneInfo)
}
##' NCBI Database API - Get single NCBI protein information
##'
##' Get gene information form single NCBI protein ID.
##' @title Get single NCBI protein information
##' @param geneXml Gene xml data.
##' @return A list of protein information.
##' @author Yulong Niu \email{niuylscu@@gmail.com}
##' @importFrom xml2 xml_find_all xml_text
##' @importFrom magrittr %>%
##' @importFrom stringr str_replace
##' @keywords internal
##'
##'
singleProteinInfo <- function(proteinXml) {
## first check if the no candidate for input gene
errorText <- proteinXml %>%
xml_find_all('error') %>%
xml_text
if (length(errorText) > 0) {
proteinInfo <- errorText
return(proteinInfo)
} else {}
## protein summary
itemsAtts <- c('Caption', 'Title', 'Extra', 'Gi', 'CreateDate', 'UpdateDate', 'Flags', 'TaxId', 'Length', 'Status')
proteinInfo <- sapply(itemsAtts, function(eachAttr) {
eachAttr %>%
str_replace('Item[@Name="Attrs"]', 'Attrs', .) %>%
xml_find_all(proteinXml, .) %>%
xml_text
})
return(proteinInfo)
}
##' NCBI Database API - Get single NCBI whole genomic gene annotation
##'
##' Get whole gene annotation form single NCBI genome ID. The locus tag is used as names for each gene. If one of the gene feature value is missed, a "" (with length of 1) will return. If the genome has no gene featurs, "NULL" will be returned.
##' This function now supports two feature types, "gene" or "CDS" (coding sequence). Other features such as RNAs ("ncRNA", "rRNA", "tRNA", "tmRNA"), "misc_feature", "rep_origin", "repeat_region" are not supported yet. It is shown in E. coli K-12 MG1655 "genes" features not only includes all "CDS" and RNAs, but some sRNA ("b4714"). "misc_feature" are mainly about cryptic prophage genes, and "repeat_region" are repetitive extragentic palindromic (REP) elements.
##' @title Get single NCBI whole genomic gene annotation
##' @param genomeID Single NCBI genome ID.
##' @param type "gene" or "CDS". The KEGG database use "CDS" as the protein gene count.
##' @inheritParams getNCBIGenesInfo
##' @return A list of annotation.
##' @examples
##' ## no gene features
##' nofeature <- singleGenomeAnno('BA000048')
##'
##' \dontrun{
##' aeuGenome <- singleGenomeAnno('CP007715', n = 4)
##'
##' ## missed value is replaced by ''
##' pseudo <- singleGenomeAnno('AE001826')[54]}
##' @importFrom RCurl postForm
##' @importFrom xml2 read_xml
##' @importFrom foreach foreach %dopar%
##' @importFrom doParallel registerDoParallel stopImplicitCluster
##' @author Yulong Niu \email{niuylscu@@gmail.com}
##' @export
##'
singleGenomeAnno <- function(genomeID, type = 'gene', n = 1) {
getEachAnno <- function(featureNode) {
## USE: extact annotation from each node
## INPUT: `featureNode` is the child node in xml format
## OUTPUT: A list of gene annotation
locNode <- xml_find_all(featureNode, 'GBFeature_intervals/GBInterval')
loc <- BatchXmlText(locNode, './/', c('GBInterval_from', 'GBInterval_to'))
loc <- do.call(cbind, loc)
GBNodes <- xml_find_all(featureNode, 'GBFeature_quals/GBQualifier')
GBf <- lapply(GBNodes, function(x) {
## value may be missed, for example a <psedudo>
## example: singleGenomeAnno('AE001826')[50]
eachGB <- BatchXmlText(x, '', c('GBQualifier_name', 'GBQualifier_value'))
## '' may be assighed to multiple elements
eachGB[which(sapply(eachGB, length) == 0)] <- ''
eachGB <- unlist(eachGB)
return(eachGB)
})
GBf <- do.call(rbind, GBf)
GBf[, 2] <- gsub('\\n', '', GBf[, 2])
geneAnno <- list(GBInterval = loc,
GBFeature_quals = GBf)
return(geneAnno)
}
## register multiple core
registerDoParallel(cores = n)
##~~~~~~~~~~~~~~~~~~~load in whole genomic annotation~~~~~~~~~~~~
urlBase <- EUrl('efetch')
postList <- list(db = 'nuccore',
id = genomeID,
retmode = 'xml')
annoStr <- postForm(urlBase, .params = postList)
annoXml <- read_xml(annoStr)
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
## extract annotation node and keys
annoNode <- xml_find_all(annoXml, 'GBSeq/GBSeq_feature-table')
keys <- xml_text(xml_find_all(annoNode, './/GBFeature_key'))
## may be no gene features
if (length(keys) == 1) {
## only one key that is "source", and return NULL
annoList <- NULL
} else {
rightKeys <- which(keys == type)
annoChild <- xml_children(annoNode)[rightKeys]
##~~~~~~~~~~~~~~~~~~extract features~~~~~~~~~~~~~~~~~~~~~~~~~~~~
annoList <- foreach(i = 1:length(annoChild)) %dopar% {
eachAnno <- getEachAnno(annoChild[[i]])
}
locusName <- sapply(annoList, function(x) {
eachLocus <- x[[2]]
eachLocus <- eachLocus[eachLocus[, 1] == 'locus_tag', 2]
return(eachLocus)
})
names(annoList) <- locusName
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
}
## stop multiple core
stopImplicitCluster()
return(annoList)
}
|
#' Get (individual) treatment effect draws from bartcFit posterior
#'
#' CTE = Conditional Treatment Effects (usually used to generate (C)ATE or ATT)
#' \code{newdata} specifies the conditions, if unspecified it defaults to the original data.
#' Assumes treated column is either a integer column of 1's (treated) and 0's (nontreated) or logical indicating treatment if TRUE.
#'
#' @param model A supported Bayesian model fit that can provide fits and predictions.
#' @param treatment Not used. Treatment variable specified by \code{bartcFit} object.
#' @param newdata Not used. extracts treatment effects already calculated by \code{bartcFit} object.
#' @param subset Either "treated", "nontreated", or "all". Default is "all".
#' @param common_support_method Either "sd", or "chisq". Default is unspecified, and no common support calculation is done.
#' @param cutoff Cutoff for common support (if in use).
#' @param ... Arguments to be passed to \code{tidybayes::fitted_draws} typically scale for \code{BART} models.
#'
#' @return A tidy data frame (tibble) with treatment effect values.
#' @export
#'
treatment_effects.bartcFit <- function(model, treatment = NULL, newdata = NULL, subset = "all", common_support_method, cutoff, ...) {
stopifnot(is.null(treatment), is.null(newdata))
# update specified common support arguments
if(missing(common_support_method)){
commonSup.rule <- "none"
commonSup.cut <- NA_real_
if(!missing(cutoff)) warning("Argument cutoff ignored as common_support_method unspecified.")
} else {
commonSup.rule <- common_support_method
if(missing(cutoff)){
commonSup.cut = switch(common_support_method,
sd = 1,
chisq = 0.05
)
warning("Default value for cutoff used.")
} else {
commonSup.cut = cutoff
}
}
refitmodel <- bartCause::refit(model, newresp = NULL, commonSup.rule = commonSup.rule, commonSup.cut = commonSup.cut)
# extract treatment effect
rowinfo <- dplyr::tibble(.row = 1:length(refitmodel$commonSup.sub), treated = model$trt)
if(commonSup.rule != "none"){
rowinfo <- rowinfo %>% dplyr::mutate(supported = refitmodel$commonSup.sub)
}
te_df <- tidy_draws(refitmodel, type = "icate", fitstage = "response", sample = "all") %>%
dplyr::left_join(tidy_draws(refitmodel, type = "ite", fitstage = "response"),
by = dplyr::join_by(".chain", ".iteration", ".draw", ".row")) %>%
dplyr::left_join(rowinfo, by = dplyr::join_by(.row))
if(subset == "treated"){
te_df <- te_df %>% dplyr::filter(!!as.symbol("treated") == 1)
} else if (subset == "nontreated") {
te_df <- te_df %>% dplyr::filter(!!as.symbol("treated") == 0)
}
return(te_df)
}
| /R/treatment-effects-bartCause.R | permissive | bonStats/tidytreatment | R | false | false | 2,769 | r |
#' Get (individual) treatment effect draws from bartcFit posterior
#'
#' CTE = Conditional Treatment Effects (usually used to generate (C)ATE or ATT)
#' \code{newdata} specifies the conditions, if unspecified it defaults to the original data.
#' Assumes treated column is either a integer column of 1's (treated) and 0's (nontreated) or logical indicating treatment if TRUE.
#'
#' @param model A supported Bayesian model fit that can provide fits and predictions.
#' @param treatment Not used. Treatment variable specified by \code{bartcFit} object.
#' @param newdata Not used. extracts treatment effects already calculated by \code{bartcFit} object.
#' @param subset Either "treated", "nontreated", or "all". Default is "all".
#' @param common_support_method Either "sd", or "chisq". Default is unspecified, and no common support calculation is done.
#' @param cutoff Cutoff for common support (if in use).
#' @param ... Arguments to be passed to \code{tidybayes::fitted_draws} typically scale for \code{BART} models.
#'
#' @return A tidy data frame (tibble) with treatment effect values.
#' @export
#'
treatment_effects.bartcFit <- function(model, treatment = NULL, newdata = NULL, subset = "all", common_support_method, cutoff, ...) {
stopifnot(is.null(treatment), is.null(newdata))
# update specified common support arguments
if(missing(common_support_method)){
commonSup.rule <- "none"
commonSup.cut <- NA_real_
if(!missing(cutoff)) warning("Argument cutoff ignored as common_support_method unspecified.")
} else {
commonSup.rule <- common_support_method
if(missing(cutoff)){
commonSup.cut = switch(common_support_method,
sd = 1,
chisq = 0.05
)
warning("Default value for cutoff used.")
} else {
commonSup.cut = cutoff
}
}
refitmodel <- bartCause::refit(model, newresp = NULL, commonSup.rule = commonSup.rule, commonSup.cut = commonSup.cut)
# extract treatment effect
rowinfo <- dplyr::tibble(.row = 1:length(refitmodel$commonSup.sub), treated = model$trt)
if(commonSup.rule != "none"){
rowinfo <- rowinfo %>% dplyr::mutate(supported = refitmodel$commonSup.sub)
}
te_df <- tidy_draws(refitmodel, type = "icate", fitstage = "response", sample = "all") %>%
dplyr::left_join(tidy_draws(refitmodel, type = "ite", fitstage = "response"),
by = dplyr::join_by(".chain", ".iteration", ".draw", ".row")) %>%
dplyr::left_join(rowinfo, by = dplyr::join_by(.row))
if(subset == "treated"){
te_df <- te_df %>% dplyr::filter(!!as.symbol("treated") == 1)
} else if (subset == "nontreated") {
te_df <- te_df %>% dplyr::filter(!!as.symbol("treated") == 0)
}
return(te_df)
}
|
library(arrow)
library(rugarch)
library(rmgarch)
#unlink(pkgFile)
path <- "C:/Users/Lazar/Desktop/Financial Volatility/Assignment/data.feather"
data_full <- arrow::read_feather(path)
data_full <- data.frame(data_full)
data <- data_full[1:which(data_full$DT == '2019-10-31 17:00:00'),]
data_val <- data_full[which(data_full$DT == '2019-11-01 11:00:00'):which(data_full$DT == '2019-11-29 17:00:00'),]
data_pred <- data_full[which(data_full$DT == '2019-12-02 11:00:00'):which(data_full$DT == '2019-12-31 17:00:00'),]
uspec <- ugarchspec(variance.model = list(model = 'sGARCH'))
# DCC 1,1 3 Stocks
number_ticks <- function(n) {function(limits) pretty(limits, n)}
DCC11spec <- dccspec(multispec(c(uspec, uspec, uspec)), distribution = 'mvt', model = 'DCC')
dcc11fit <- dccfit(DCC11spec, data = data[,2:4])
varcovDCC11 <- rcov(dcc11fit)
cormatDCC11 <- rcor(dcc11fit)
library(ggplot2)
library(xtable)
# Model Summary
summaryDCC11 <- show(dcc11fit)
coefDCC11 <- coef(dcc11fit)
# Conditional Variance plot
DCC11_var <- ggplot(data = data.frame('rcov' = rcov(dcc11fit)[1,1,], 'time' = data['DT']), aes(x = DT, y = rcov)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Variance') + ggtitle('Conditional Variance from DCC GARCH (1,1)') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcov' = rcov(dcc11fit)[2,2,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcov' = rcov(dcc11fit)[3,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Variance", labels = c("EEM", "SPY", 'EZU'))
# Correlation Plot
DCC11_cor <- ggplot(data = data.frame('rcor' = rcor(dcc11fit)[1,2,], 'time' = data['DT']), aes(x = DT, y = rcor)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Correlation') + ggtitle('Conditional Correlation from DCC GARCH (1,1)') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcor' = rcor(dcc11fit)[1,3,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcor' = rcor(dcc11fit)[2,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Correlation", labels = c("cor(EEM,SPY)", "cor(EEM,EZU)", 'cor(SPY,EZU)'))
# Conditional Covariance plot
DCC11_cov <- ggplot(data = data.frame('rcov' = rcov(dcc11fit)[1,2,], 'time' = data['DT']), aes(x = DT, y = rcov)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Covariance') + ggtitle('Conditional Covariance from DCC GARCH (1,1)') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcov' = rcov(dcc11fit)[1,3,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcov' = rcov(dcc11fit)[2,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Covariance", labels = c("cov(EEM,SPY)", "cov(EEM,EZU)", 'cov(SPY,EZU)'))
# DCC 1,1 Normal with GARCH 1,1
DCC11spec_n <- dccspec(multispec(c(uspec, uspec, uspec)), distribution = 'mvnorm', model = 'DCC')
dcc11fit_n <- dccfit(DCC11spec_n, data = data[,2:4])
varcovDCC11_n <- rcov(dcc11fit_n)
cormatDCC11_n <- rcor(dcc11fit_n)
# Model Summary
summaryDCC11_n <- show(dcc11fit_n)
coefDCC11_n <- coef(dcc11fit_n)
# Conditional Variance plot
DCC11_var_n <- ggplot(data = data.frame('rcov' = rcov(dcc11fit_n)[1,1,], 'time' = data['DT']), aes(x = DT, y = rcov)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Variance') + ggtitle('Conditional Variance from DCC GARCH (1,1) with normal distribution') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcov' = rcov(dcc11fit_n)[2,2,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcov' = rcov(dcc11fit_n)[3,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Variance", labels = c("EEM", "SPY", 'EZU'))
# Correlation Plot
DCC11_cor_n <- ggplot(data = data.frame('rcor' = rcor(dcc11fit_n)[1,2,], 'time' = data['DT']), aes(x = DT, y = rcor)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Correlation') + ggtitle('Conditional Correlation from DCC GARCH (1,1) with normal distribution') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcor' = rcor(dcc11fit_n)[1,3,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcor' = rcor(dcc11fit_n)[2,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Correlation", labels = c("cor(EEM,SPY)", "cor(EEM,EZU)", 'cor(SPY,EZU)'))
# Conditional Covariance plot
DCC11_cov_n <- ggplot(data = data.frame('rcov' = rcov(dcc11fit_n)[1,2,], 'time' = data['DT']), aes(x = DT, y = rcov)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Covariance') + ggtitle('Conditional Covariance from DCC GARCH (1,1) with normal distribution') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcov' = rcov(dcc11fit_n)[1,3,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcov' = rcov(dcc11fit_n)[2,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Covariance", labels = c("cov(EEM,SPY)", "cov(EEM,EZU)", 'cov(SPY,EZU)'))
# Flexible DCC GARCH 1,1 model
DCC11spec_f <- dccspec(multispec(c(uspec, uspec, uspec)), distribution = 'mvnorm', model = 'FDCC', groups = seq(1,3))
dcc11fit_f <- dccfit(DCC11spec_f, data = data[,2:4])
varcovDCC11_f <- rcov(dcc11fit_f)
cormatDCC11_f <- rcor(dcc11fit_f)
# Model Summary
summaryDCC11_f <- show(dcc11fit_f)
coefDCC11_f <- coef(dcc11fit_f)
# Conditional Variance plot
DCC11_f_var <- ggplot(data = data.frame('rcov' = rcov(dcc11fit_f)[1,1,], 'time' = data['DT']), aes(x = DT, y = rcov)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Variance') + ggtitle('Conditional Variance from Asymetric DCC GARCH (1,1)') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcov' = rcov(dcc11fit_f)[2,2,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcov' = rcov(dcc11fit_f)[3,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Variance", labels = c("EEM", "SPY", 'EZU'))
# Correlation Plot
DCC11_f_cor <- ggplot(data = data.frame('rcor' = rcor(dcc11fit_f)[1,2,], 'time' = data['DT']), aes(x = DT, y = rcor)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Correlation') + ggtitle('Conditional Correlation from Asymetric DCC GARCH (1,1)') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcor' = rcor(dcc11fit_f)[1,3,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcor' = rcor(dcc11fit_f)[2,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Correlation", labels = c("cor(EEM,SPY)", "cor(EEM,EZU)", 'cor(SPY,EZU)'))
# Conditional Covariance plot
DCC11_f_cov <- ggplot(data = data.frame('rcov' = rcov(dcc11fit_f)[1,2,], 'time' = data['DT']), aes(x = DT, y = rcov)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Covariance') + ggtitle('Conditional Covariance from Asymetric DCC GARCH (1,1)') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcov' = rcov(dcc11fit_f)[1,3,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcov' = rcov(dcc11fit_f)[2,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Covariance", labels = c("cov(EEM,SPY)", "cov(EEM,EZU)", 'cov(SPY,EZU)'))
# Asymetric DCC GARCH 1,1 model
DCC11spec_a <- dccspec(multispec(c(uspec, uspec, uspec)), distribution = 'mvt', model = "aDCC")
dcc11fit_a <- dccfit(DCC11spec_a, data = data[,2:4])
varcovDCC11_a <- rcov(dcc11fit_a)
cormatDCC11_a <- rcor(dcc11fit_a)
# Model Summary
summaryDCC11_a <- show(dcc11fit_a)
coefDCC11_a <- coef(dcc11fit_a)
# Conditional Variance plot
DCC11_a_var <- ggplot(data = data.frame('rcov' = rcov(dcc11fit_a)[1,1,], 'time' = data['DT']), aes(x = DT, y = rcov)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Variance') + ggtitle('Conditional Variance from Asymetric DCC GARCH (1,1)') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcov' = rcov(dcc11fit_a)[2,2,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcov' = rcov(dcc11fit_a)[3,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Variance", labels = c("EEM", "SPY", 'EZU'))
# Correlation Plot
DCC11_a_cor <- ggplot(data = data.frame('rcor' = rcor(dcc11fit_a)[1,2,], 'time' = data['DT']), aes(x = DT, y = rcor)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Correlation') + ggtitle('Conditional Correlation from Asymetric DCC GARCH (1,1)') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcor' = rcor(dcc11fit_a)[1,3,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcor' = rcor(dcc11fit_a)[2,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Correlation", labels = c("cor(EEM,SPY)", "cor(EEM,EZU)", 'cor(SPY,EZU)'))
# Conditional Covariance plot
DCC11_a_cov <- ggplot(data = data.frame('rcov' = rcov(dcc11fit_a)[1,2,], 'time' = data['DT']), aes(x = DT, y = rcov)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Covariance') + ggtitle('Conditional Covariance from Asymetric DCC GARCH (1,1)') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcov' = rcov(dcc11fit_a)[1,3,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcov' = rcov(dcc11fit_a)[2,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Covariance", labels = c("cov(EEM,SPY)", "cov(EEM,EZU)", 'cov(SPY,EZU)'))
#GO-GARCH (1,1)
GGARCHspec <- gogarchspec(mmean.model = 'AR')
GGARCHfit <-gogarchfit(GGARCHspec, data[,2:4])
varcovGGARCH <- rcov(GGARCHfit)
cormatGGARCH <- rcor(GGARCHfit)
#Model Summary
summaryGGARCH <- show(GGARCHfit)
coefGGARCH <- coef(GGARCHfit)
# Conditional Variance plot
GGARCH_var <- ggplot(data = data.frame('rcov' = rcov(GGARCHfit)[1,1,], 'time' = data['DT']), aes(x = DT, y = rcov)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Variance') + ggtitle('Conditional Variance from GO-GARCH') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcov' = rcov(GGARCHfit)[2,2,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcov' = rcov(GGARCHfit)[3,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Variance", labels = c("EEM", "SPY", 'EZU'))
# Correlation Plot
GGARCH_cor <- ggplot(data = data.frame('rcor' = rcor(GGARCHfit)[1,2,], 'time' = data['DT']), aes(x = DT, y = rcor)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Correlation') + ggtitle('Conditional Correlation from GO-GARCH') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcor' = rcor(GGARCHfit)[1,3,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcor' = rcor(GGARCHfit)[2,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Correlation", labels = c("cor(EEM,SPY)", "cor(EEM,EZU)", 'cor(SPY,EZU)'))
# Conditional Covariance plot
GGARCH_cov <- ggplot(data = data.frame('rcov' = rcov(GGARCHfit)[1,2,], 'time' = data['DT']), aes(x = DT, y = rcov)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Covariance') + ggtitle('Conditional Covariance from GO-GARCH') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcov' = rcov(GGARCHfit)[1,3,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcov' = rcov(GGARCHfit)[2,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Covariance", labels = c("cov(EEM,SPY)", "cov(EEM,EZU)", 'cov(SPY,EZU)'))
# Finding Optimal Univariate Settings as basis for multivariate models
uspec <- ugarchspec(variance.model = list(model = 'sGARCH'))
ugarchfit(spec = uspec, data[,2], distribution = 'mvt')
ugarchfit(spec = uspec, data[,3], distribution = 'mvt')
ugarchfit(spec = uspec, data[,4], distribution = 'mvt')
# Sign Bias in 1 Series ('EZU') and weak significance in 'EEM' (10%)
models_list = list(c('sGARCH','gjrGARCH', 'eGARCH', 'iGARCH', 'csGARCH', 'apARCH', 'fGARCH', 'fGARCH', 'fGARCH'))
submodels_list = list(c('GARCH','TGARCH','GJRGARCH'))
coef_sums <- matrix(NA, nrow = lengths(models_list), ncol = 3)
rownames(coef_sums) <- c('sGARCH','gjrGARCH', 'eGARCH', 'iGARCH', 'csGARCH', 'apARCH', 'fGARCH','fTGARCH','fGJRGARCH')
colnames(coef_sums) <- c('EEM', 'SPY', 'EZU')
BIC_mat <- matrix(NA, nrow = lengths(models_list), ncol = 3)
rownames(BIC_mat) <- c('sGARCH','gjrGARCH', 'eGARCH', 'iGARCH', 'csGARCH', 'apARCH', 'fGARCH','fTGARCH','fGJRGARCH')
colnames(BIC_mat) <- c('EEM', 'SPY', 'EZU')
for (y in 2:length(data)){
for (i in 1:lengths(models_list)){
if (i >= 7){
fit <- ugarchfit(ugarchspec(variance.model = list(model = models_list[[1]][i], submodel = submodels_list[[1]][i-6]), distribution.model = 'std', fixed.pars=list(omega=0)), data[,y])
coef_sums[i,y-1] <- sum(coef(fit)[-length(coef(fit))])
fit_val <- ugarchfit(ugarchspec(variance.model = list(model = models_list[[1]][i], submodel = submodels_list[[1]][i-6]), fixed.pars = list(coef(fit)), distribution.model = 'std'), data_val[,y])
BIC_mat[i,y-1] <- infocriteria(fit_val)[2]
} else if (i<7){
fit <- ugarchfit(ugarchspec(variance.model = list(model = models_list[[1]][i]), distribution.model = 'std'), data[,y])
fit_val <- ugarchfit(ugarchspec(variance.model = list(model = models_list[[1]][i]), fixed.pars = list(coef(fit)), distribution.model = 'std'), data_val[,y])
BIC_mat[i,y-1] <- infocriteria(fit_val)[2]
coef_sums[i,y-1] <- sum(coef(fit)[-length(coef(fit))])
}
}
}
coef_sums # Check Weak Staionrity
BIC_mat # Check BIC values
fit <- ugarchfit(ugarchspec(variance.model = list(model = 'fGARCH', submodel = 'TGARCH'), distribution.model = 'std'), data[,4])
(coef(fit)[5] - coef(fit)[7] + coef(fit)[6]) < 1 # TGARCH stationary (Zakoian version 1994)
fit <- ugarchfit(ugarchspec(variance.model = list(model = 'eGARCH'), distribution.model = 'std'), data[,2])
(coef(fit)[5] - coef(fit)[7] + coef(fit)[6]) < 1 # EGARCH stationary
fit <- ugarchfit(ugarchspec(variance.model = list(model = 'eGARCH'),, distribution.model = 'std'), data[,3])
(coef(fit)[5] - coef(fit)[7] + coef(fit)[6]) < 1 # EGARCH stationary
min(BIC_mat[,1]) # Optimal Stationary Specification: EGARCH
min(BIC_mat[,2]) # Optimal Stationary Specification: EGARCH
min(BIC_mat[,3]) # Optimal Stationary Specification: fGARCH
uspec_opt1 <- ugarchspec(variance.model = list(model = 'eGARCH'), distribution.model = 'std')
uspec_opt2 <- ugarchspec(variance.model = list(model = 'eGARCH'), distribution.model = 'std')
uspec_opt3 <- ugarchspec(variance.model = list(model = 'fGARCH', submodel = 'TGARCH'), distribution.model = 'std')
# DCC optimum 3 Stocks
DCC_opt_spec <- dccspec(multispec(c(uspec_opt1, uspec_opt2, uspec_opt3)), distribution = 'mvt', model = 'DCC')
dcc_opt_fit <- dccfit(DCC_opt_spec, data = data[,2:4])
varcovDCC_opt <- rcov(dcc_opt_fit)
cormatDCC_opt <- rcor(dcc_opt_fit)
# Model Summary
summaryDCC_opt <- show(dcc_opt_fit)
coefDCC_opt <- coef(dcc_opt_fit)
# Conditional Variance plot
DCC_opt_var <- ggplot(data = data.frame('rcov' = rcov(dcc_opt_fit)[1,1,], 'time' = data['DT']), aes(x = DT, y = rcov)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Variance') + ggtitle('Conditional Variance from DCC(1,1) and optimal univariate models') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcov' = rcov(dcc_opt_fit)[2,2,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcov' = rcov(dcc_opt_fit)[3,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Variance", labels = c("EEM", "SPY", 'EZU'))
# Correlation Plot
DCC_opt_cor <- ggplot(data = data.frame('rcor' = rcor(dcc_opt_fit)[1,2,], 'time' = data['DT']), aes(x = DT, y = rcor)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Correlation') + ggtitle('Conditional Correlation from DCC(1,1) and optimal univariate models') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcor' = rcor(dcc_opt_fit)[1,3,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcor' = rcor(dcc_opt_fit)[2,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Correlation", labels = c("cor(EEM,SPY)", "cor(EEM,EZU)", 'cor(SPY,EZU)'))
# Conditional Covariance plot
DCC_opt_cov <- ggplot(data = data.frame('rcov' = rcov(dcc_opt_fit)[1,2,], 'time' = data['DT']), aes(x = DT, y = rcov)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Covariance') + ggtitle('Conditional Covariance from DCC(1,1) and optimal univariate models') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcov' = rcov(dcc_opt_fit)[1,3,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcov' = rcov(dcc_opt_fit)[2,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Covariance", labels = c("cov(EEM,SPY)", "cov(EEM,EZU)", 'cov(SPY,EZU)'))
# DCC optimum 3 Stocks and Normal Distribution
DCC_opt_spec_n <- dccspec(multispec(c(uspec_opt1, uspec_opt2, uspec_opt3)), distribution = 'mvnorm', model = 'DCC')
dcc_opt_fit_n <- dccfit(DCC_opt_spec_n, data = data[,2:4])
varcovDCC_opt_n <- rcov(dcc_opt_fit_n)
cormatDCC_opt_n <- rcor(dcc_opt_fit_n)
# Model Summary
summaryDCC_opt_n <- show(dcc_opt_fit_n)
coefDCC_opt_n <- coef(dcc_opt_fit_n)
# Conditional Variance plot
DCC_opt_var_n <- ggplot(data = data.frame('rcov' = rcov(dcc_opt_fit_n)[1,1,], 'time' = data['DT']), aes(x = DT, y = rcov)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Variance') + ggtitle('Conditional Variance from DCC(1,1) and optimal univariate models with normal distribution') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcov' = rcov(dcc_opt_fit_n)[2,2,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcov' = rcov(dcc_opt_fit_n)[3,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Variance", labels = c("EEM", "SPY", 'EZU'))
# Correlation Plot
DCC_opt_cor_n <- ggplot(data = data.frame('rcor' = rcor(dcc_opt_fit_n)[1,2,], 'time' = data['DT']), aes(x = DT, y = rcor)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Correlation') + ggtitle('Conditional Correlation from DCC(1,1) and optimal univariate models with normal distribution') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcor' = rcor(dcc_opt_fit_n)[1,3,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcor' = rcor(dcc_opt_fit_n)[2,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Correlation", labels = c("cor(EEM,SPY)", "cor(EEM,EZU)", 'cor(SPY,EZU)'))
# Conditional Covariance plot
DCC_opt_cov_n <- ggplot(data = data.frame('rcov' = rcov(dcc_opt_fit_n)[1,2,], 'time' = data['DT']), aes(x = DT, y = rcov)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Covariance') + ggtitle('Conditional Covariance from DCC(1,1) and optimal univariate models with normal distribution') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcov' = rcov(dcc_opt_fit_n)[1,3,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcov' = rcov(dcc_opt_fit_n)[2,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Covariance", labels = c("cov(EEM,SPY)", "cov(EEM,EZU)", 'cov(SPY,EZU)'))
# Asymetric DCC optimum 3 Stocks
DCC_opt_a_spec <- dccspec(multispec(c(uspec_opt1, uspec_opt2, uspec_opt3)), distribution = 'mvt', model = "aDCC")
dcc_opt_a_fit <- dccfit(DCC_opt_a_spec, data = data[,2:4])
varcovDCC_a_opt <- rcov(dcc_opt_a_fit)
cormatDCC_a_opt <- rcor(dcc_opt_a_fit)
# Model Summary
summaryDCC_a_opt <- show(dcc_opt_a_fit)
coefDCC_a_opt <- coef(dcc_opt_a_fit)
# Conditional Variance plot
DCC_a_opt_var <- ggplot(data = data.frame('rcov' = rcov(dcc_opt_a_fit)[1,1,], 'time' = data['DT']), aes(x = DT, y = rcov)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Variance') + ggtitle('Conditional Variance from Asymetric DCC(1,1) and optimal univariate models') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcov' = rcov(dcc_opt_a_fit)[2,2,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcov' = rcov(dcc_opt_a_fit)[3,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Variance", labels = c("EEM", "SPY", 'EZU'))
# Correlation Plot
DCC_a_opt_cor <- ggplot(data = data.frame('rcor' = rcor(dcc_opt_a_fit)[1,2,], 'time' = data['DT']), aes(x = DT, y = rcor)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Correlation') + ggtitle('Conditional Correlation from Asymetric DCC(1,1) and optimal univariate models') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcor' = rcor(dcc_opt_a_fit)[1,3,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcor' = rcor(dcc_opt_a_fit)[2,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Correlation", labels = c("cor(EEM,SPY)", "cor(EEM,EZU)", 'cor(SPY,EZU)'))
# Conditional Covariance plot
DCC_a_opt_cov <- ggplot(data = data.frame('rcov' = rcov(dcc_opt_a_fit)[1,2,], 'time' = data['DT']), aes(x = DT, y = rcov)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Covariance') + ggtitle('Conditional Covariance from Asymetric DCC(1,1) and optimal univariate models') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcov' = rcov(dcc_opt_a_fit)[1,3,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcov' = rcov(dcc_opt_a_fit)[2,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Covariance", labels = c("cov(EEM,SPY)", "cov(EEM,EZU)", 'cov(SPY,EZU)'))
# Flexbile DCC optimum 3 Stocks
DCC_opt_f_spec <- dccspec(multispec(c(uspec_opt1, uspec_opt2, uspec_opt3)), model = "FDCC", groups = c(1,2,3))
dcc_opt_f_fit <- dccfit(DCC_opt_f_spec, data = data[,2:4])
varcovDCC_f_opt <- rcov(dcc_opt_f_fit)
cormatDCC_f_opt <- rcor(dcc_opt_f_fit)
# Model Summary
summaryDCC_f_opt <- show(dcc_opt_f_fit)
coefDCC_f_opt <- coef(dcc_opt_f_fit)
# Conditional Variance plot
DCC_f_opt_var <- ggplot(data = data.frame('rcov' = rcov(dcc_opt_f_fit)[1,1,], 'time' = data['DT']), aes(x = DT, y = rcov)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Variance') + ggtitle('Conditional Variance from Flexible DCC(1,1) and optimal univariate models') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcov' = rcov(dcc_opt_f_fit)[2,2,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcov' = rcov(dcc_opt_f_fit)[3,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Variance", labels = c("EEM", "SPY", 'EZU'))
# Correlation Plot
DCC_f_opt_cor <- ggplot(data = data.frame('rcor' = rcor(dcc_opt_f_fit)[1,2,], 'time' = data['DT']), aes(x = DT, y = rcor)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Correlation') + ggtitle('Conditional Correlation from Flexible DCC(1,1) and optimal univariate models') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcor' = rcor(dcc_opt_f_fit)[1,3,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcor' = rcor(dcc_opt_f_fit)[2,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Correlation", labels = c("cor(EEM,SPY)", "cor(EEM,EZU)", 'cor(SPY,EZU)'))
# Conditional Covariance plot
DCC_f_opt_cov <- ggplot(data = data.frame('rcov' = rcov(dcc_opt_f_fit)[1,2,], 'time' = data['DT']), aes(x = DT, y = rcov)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Covariance') + ggtitle('Conditional Covariance from Flexible DCC(1,1) and optimal univariate models') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcov' = rcov(dcc_opt_f_fit)[1,3,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcov' = rcov(dcc_opt_f_fit)[2,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Covariance", labels = c("cov(EEM,SPY)", "cov(EEM,EZU)", 'cov(SPY,EZU)'))
# Copula DCC GARCH(1,1)
CGARCH11spec <- cgarchspec(multispec(c(uspec, uspec, uspec)), distribution.model = list(copula = c('mvt'), time.varying = T))
cgarch11_fit <- cgarchfit(CGARCH11spec, data = data[,2:4])
cgarch11_cov <- rcov(cgarch11_fit)
cgarch11_cor <- rcor(cgarch11_fit)
# Model Summary
summaryCGARCH11 <- show(cgarch11_fit)
coefCGARCH11 <- coef(cgarch11_fit)
# Conditional Variance plot
CGARCH11_var <- ggplot(data = data.frame('rcov' = rcov(cgarch11_fit)[1,1,], 'time' = data['DT']), aes(x = DT, y = rcov)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Variance') + ggtitle('Conditional Variance from Copula with GARCH(1,1)') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcov' = rcov(cgarch11_fit)[2,2,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcov' = rcov(cgarch11_fit)[3,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Variance", labels = c("EEM", "SPY", 'EZU'))
# Correlation Plot
CGARCH11_cor <- ggplot(data = data.frame('rcor' = rcor(cgarch11_fit)[1,2,], 'time' = data['DT']), aes(x = DT, y = rcor)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Correlation') + ggtitle('Conditional Correlation from Copula with GARCH(1,1)') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcor' = rcor(cgarch11_fit)[1,3,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcor' = rcor(cgarch11_fit)[2,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Correlation", labels = c("cor(EEM,SPY)", "cor(EEM,EZU)", 'cor(SPY,EZU)'))
# Conditional Covariance plot
CGARCH11_cov <- ggplot(data = data.frame('rcov' = rcov(cgarch11_fit)[1,2,], 'time' = data['DT']), aes(x = DT, y = rcov)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Covariance') + ggtitle('Conditional Covariance from Copula with GARCH(1,1)') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcov' = rcov(cgarch11_fit)[1,3,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcov' = rcov(cgarch11_fit)[2,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Covariance", labels = c("cov(EEM,SPY)", "cov(EEM,EZU)", 'cov(SPY,EZU)'))
# Copula DCC GARCH(1,1) with normal distribution
CGARCH11spec_n <- cgarchspec(multispec(c(uspec, uspec, uspec)), distribution.model = list(copula = c('mvnorm'), time.varying = T))
cgarch11_fit_n <- cgarchfit(CGARCH11spec_n, data = data[,2:4])
cgarch11_cov_n <- rcov(cgarch11_fit_n)
cgarch11_cor_n <- rcor(cgarch11_fit_n)
# Model Summary
summaryCGARCH11_n <- show(cgarch11_fit_n)
coefCGARCH11_n <- coef(cgarch11_fit_n)
# Conditional Variance plot
CGARCH11_var_n <- ggplot(data = data.frame('rcov' = rcov(cgarch11_fit_n)[1,1,], 'time' = data['DT']), aes(x = DT, y = rcov)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Variance') + ggtitle('Conditional Variance from Copula with GARCH(1,1) and normal distribution') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcov' = rcov(cgarch11_fit_n)[2,2,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcov' = rcov(cgarch11_fit_n)[3,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Variance", labels = c("EEM", "SPY", 'EZU'))
# Correlation Plot
CGARCH11_cor_n <- ggplot(data = data.frame('rcor' = rcor(cgarch11_fit_n)[1,2,], 'time' = data['DT']), aes(x = DT, y = rcor)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Correlation') + ggtitle('Conditional Correlation from Copula with GARCH(1,1) and normal distribution') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcor' = rcor(cgarch11_fit_n)[1,3,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcor' = rcor(cgarch11_fit_n)[2,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Correlation", labels = c("cor(EEM,SPY)", "cor(EEM,EZU)", 'cor(SPY,EZU)'))
# Conditional Covariance plot
CGARCH11_cov_n <- ggplot(data = data.frame('rcov' = rcov(cgarch11_fit_n)[1,2,], 'time' = data['DT']), aes(x = DT, y = rcov)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Covariance') + ggtitle('Conditional Covariance from Copula with GARCH(1,1) and normal distribution') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcov' = rcov(cgarch11_fit_n)[1,3,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcov' = rcov(cgarch11_fit_n)[2,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Covariance", labels = c("cov(EEM,SPY)", "cov(EEM,EZU)", 'cov(SPY,EZU)'))
# Asymetric Copula DCC GARCH(1,1)
CGARCH11spec_a <- cgarchspec(multispec(c(uspec, uspec, uspec)), distribution.model = list(copula = c('mvt'), time.varying = T, asymetric = T))
cgarch11_a_fit <- cgarchfit(CGARCH11spec_a, data = data[,2:4])
cgarch11_a_cov <- rcov(cgarch11_a_fit)
cgarch11_a_cor <- rcor(cgarch11_a_fit)
# Model Summary
summaryCGARCH11_a <- show(cgarch11_fit)
coefCGARCH11_a <- coef(cgarch11_fit)
# Conditional Variance plot
CGARCH11_a_var <- ggplot(data = data.frame('rcov' = rcov(cgarch11_a_fit)[1,1,], 'time' = data['DT']), aes(x = DT, y = rcov)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Variance') + ggtitle('Conditional Variance from Asymetric Copula with GARCH(1,1)') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcov' = rcov(cgarch11_a_fit)[2,2,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcov' = rcov(cgarch11_a_fit)[3,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Variance", labels = c("EEM", "SPY", 'EZU'))
# Correlation Plot
CGARCH11_a_cor <- ggplot(data = data.frame('rcor' = rcor(cgarch11_a_fit)[1,2,], 'time' = data['DT']), aes(x = DT, y = rcor)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Correlation') + ggtitle('Conditional Correlation from Asymetric Copula with GARCH(1,1)') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcor' = rcor(cgarch11_a_fit)[1,3,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcor' = rcor(cgarch11_a_fit)[2,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Correlation", labels = c("cor(EEM,SPY)", "cor(EEM,EZU)", 'cor(SPY,EZU)'))
# Conditional Covariance plot
CGARCH11_a_cov <- ggplot(data = data.frame('rcov' = rcov(cgarch11_a_fit)[1,2,], 'time' = data['DT']), aes(x = DT, y = rcov)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Covariance') + ggtitle('Conditional Covariance from Asymetric Copula with GARCH(1,1)') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcov' = rcov(cgarch11_a_fit)[1,3,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcov' = rcov(cgarch11_a_fit)[2,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Covariance", labels = c("cov(EEM,SPY)", "cov(EEM,EZU)", 'cov(SPY,EZU)'))
# Copula DCC with optimal models
CGARCH_opt_spec <- cgarchspec(multispec(c(uspec_opt1, uspec_opt2, uspec_opt3)), distribution.model = list(copula = c('mvt'), time.varying = T))
cgarch_opt_fit <- cgarchfit(CGARCH_opt_spec, data = data[,2:4])
cgarch_opt_cov <- rcov(cgarch_opt_fit)
cgarch_opt_cor <- rcor(cgarch_opt_fit)
# Model Summary
summaryCGARCH_opt <- show(cgarch_opt_fit)
coefCGARCH_opt <- coef(cgarch_opt_fit)
# Conditional Variance plot
CGARCH_opt_var <- ggplot(data = data.frame('rcov' = rcov(cgarch_opt_fit)[1,1,], 'time' = data['DT']), aes(x = DT, y = rcov)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Variance') + ggtitle('Conditional Variance from Copula with optimal univaraite models') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcov' = rcov(cgarch_opt_fit)[2,2,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcov' = rcov(cgarch_opt_fit)[3,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Variance", labels = c("EEM", "SPY", 'EZU'))
# Correlation Plot
CGARCH_opt_cor <- ggplot(data = data.frame('rcor' = rcor(cgarch_opt_fit)[1,2,], 'time' = data['DT']), aes(x = DT, y = rcor)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Correlation') + ggtitle('Conditional Correlation from Copula with optimal univaraite models') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcor' = rcor(cgarch_opt_fit)[1,3,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcor' = rcor(cgarch_opt_fit)[2,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Correlation", labels = c("cor(EEM,SPY)", "cor(EEM,EZU)", 'cor(SPY,EZU)'))
# Conditional Covariance plot
CGARCH_opt_cov <- ggplot(data = data.frame('rcov' = rcov(cgarch_opt_fit)[1,2,], 'time' = data['DT']), aes(x = DT, y = rcov)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Covariance') + ggtitle('Conditional Covariance from Copula with optimal univaraite models') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcov' = rcov(cgarch_opt_fit)[1,3,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcov' = rcov(cgarch_opt_fit)[2,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Covariance", labels = c("cov(EEM,SPY)", "cov(EEM,EZU)", 'cov(SPY,EZU)'))
# Copula DCC with optimal models
CGARCH_opt_spec_n <- cgarchspec(multispec(c(uspec_opt1, uspec_opt2, uspec_opt3)), distribution.model = list(copula = c('mvnorm'), time.varying = T))
cgarch_opt_fit_n <- cgarchfit(CGARCH_opt_spec_n, data = data[,2:4])
cgarch_opt_cov_n <- rcov(cgarch_opt_fit_n)
cgarch_opt_cor_n <- rcor(cgarch_opt_fit_n)
# Model Summary
summaryCGARCH_opt_n <- show(cgarch_opt_fit_n)
coefCGARCH_opt_n <- coef(cgarch_opt_fit_n)
# Conditional Variance plot
CGARCH_opt_var_n <- ggplot(data = data.frame('rcov' = rcov(cgarch_opt_fit_n)[1,1,], 'time' = data['DT']), aes(x = DT, y = rcov)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Variance') + ggtitle('Conditional Variance from Copula with optimal univaraite models and normal distribution') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcov' = rcov(cgarch_opt_fit_n)[2,2,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcov' = rcov(cgarch_opt_fit_n)[3,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Variance", labels = c("EEM", "SPY", 'EZU'))
# Correlation Plot
CGARCH_opt_cor_n <- ggplot(data = data.frame('rcor' = rcor(cgarch_opt_fit_n)[1,2,], 'time' = data['DT']), aes(x = DT, y = rcor)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Correlation') + ggtitle('Conditional Correlation from Copula with optimal univaraite models and normal distribution') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcor' = rcor(cgarch_opt_fit_n)[1,3,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcor' = rcor(cgarch_opt_fit_n)[2,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Correlation", labels = c("cor(EEM,SPY)", "cor(EEM,EZU)", 'cor(SPY,EZU)'))
# Conditional Covariance plot
CGARCH_opt_cov_N <- ggplot(data = data.frame('rcov' = rcov(cgarch_opt_fit_n)[1,2,], 'time' = data['DT']), aes(x = DT, y = rcov)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Covariance') + ggtitle('Conditional Covariance from Copula with optimal univaraite models and normal distribution') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcov' = rcov(cgarch_opt_fit_n)[1,3,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcov' = rcov(cgarch_opt_fit_n)[2,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Covariance", labels = c("cov(EEM,SPY)", "cov(EEM,EZU)", 'cov(SPY,EZU)'))
# Asymetric Copula DCC with optimal models
CGARCH_opt_a_spec <- cgarchspec(multispec(c(uspec_opt1, uspec_opt2, uspec_opt3)), distribution.model = list(copula = c('mvt'), time.varying = T, asymetric = T))
cgarch_opt_a_fit <- cgarchfit(CGARCH_opt_a_spec, data = data[,2:4])
cgarch_opt_a_cov <- rcov(cgarch_opt_a_fit)
cgarch_opt_a_cor <- rcor(cgarch_opt_a_fit)
# Model Summary
summaryCGARCH_opt_a <- show(cgarch11_fit)
coefCGARCH_opt_a <- coef(cgarch11_fit)
# Conditional Variance plot
CGARCH_opt_a_var <- ggplot(data = data.frame('rcov' = rcov(cgarch_opt_a_fit)[1,1,], 'time' = data['DT']), aes(x = DT, y = rcov)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Variance') + ggtitle('Conditional Variance from Asymetric Copula with optimal univaraite models') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcov' = rcov(cgarch_opt_fit)[2,2,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcov' = rcov(cgarch_opt_a_fit)[3,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Variance", labels = c("EEM", "SPY", 'EZU'))
# Correlation Plot
CGARCH_opt_a_cor <- ggplot(data = data.frame('rcor' = rcor(cgarch_opt_a_fit)[1,2,], 'time' = data['DT']), aes(x = DT, y = rcor)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Correlation') + ggtitle('Conditional Correlation from Asymetric Copula with optimal univaraite models') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcor' = rcor(cgarch_opt_fit)[1,3,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcor' = rcor(cgarch_opt_a_fit)[2,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Correlation", labels = c("cor(EEM,SPY)", "cor(EEM,EZU)", 'cor(SPY,EZU)'))
# Conditional Covariance plot
CGARCH_opt_a_cov <- ggplot(data = data.frame('rcov' = rcov(cgarch_opt_a_fit)[1,2,], 'time' = data['DT']), aes(x = DT, y = rcov)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Covariance') + ggtitle('Conditional Covariance from Asymetric Copula with optimal univaraite models') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcov' = rcov(cgarch_opt_fit)[1,3,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcov' = rcov(cgarch_opt_a_fit)[2,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Covariance", labels = c("cov(EEM,SPY)", "cov(EEM,EZU)", 'cov(SPY,EZU)'))
infoIC_table_sample <- matrix(NA, nrow = 15, ncol = 2)
colnames(infoIC_table_sample) <- c('BIC', 'AIC')
rownames(infoIC_table_sample) <- c('DCC11', 'DCC11_N','DCC11_F', 'DCC11_A', 'GGARCH11', 'DCC11_opt', 'DCC11_opt_n', 'DCC11_OPT_A','DCC11_OPT_F', 'CGARCH11', 'CGARCH11_n', 'CGARCH11_A', 'CGARCH11_opt', 'CGARCH11_opt_n', 'CGARCH11_opt_A')
infoIC_table_sample[1,1] <- infocriteria(dcc11fit)[2]
infoIC_table_sample[1,2] <- infocriteria(dcc11fit)[1]
infoIC_table_sample[2,1] <- infocriteria(dcc11fit_n)[2]
infoIC_table_sample[2,2] <- infocriteria(dcc11fit_n)[1]
infoIC_table_sample[3,1] <- infocriteria(dcc11fit_f)[2]
infoIC_table_sample[3,2] <- infocriteria(dcc11fit_f)[1]
infoIC_table_sample[4,1] <- infocriteria(dcc11fit_a)[2]
infoIC_table_sample[4,2] <- infocriteria(dcc11fit_a)[1]
infoIC_table_sample[5,2] <- 2*length(coef(GGARCHfit)) - 2*log(likelihood(GGARCHfit))
infoIC_table_sample[5,1] <- length(coef(GGARCHfit))*log(nrow(data)) - 2*log(likelihood(GGARCHfit))
infoIC_table_sample[6,1] <- infocriteria(dcc_opt_fit)[2]
infoIC_table_sample[6,2] <- infocriteria(dcc_opt_fit)[1]
infoIC_table_sample[7,1] <- infocriteria(dcc_opt_fit_n)[2]
infoIC_table_sample[7,2] <- infocriteria(dcc_opt_fit_n)[1]
infoIC_table_sample[8,1] <- infocriteria(dcc_opt_a_fit)[2]
infoIC_table_sample[8,2] <- infocriteria(dcc_opt_a_fit)[1]
infoIC_table_sample[9,1] <- infocriteria(dcc_opt_f_fit)[2]
infoIC_table_sample[9,2] <- infocriteria(dcc_opt_f_fit)[1]
infoIC_table_sample[10,1] <- -34.245 # as seen from the show() option as no infocriteria() available for Copula-Garch
infoIC_table_sample[10,2] <- -34.284
infoIC_table_sample[11,1] <- -34.217
infoIC_table_sample[11,2] <- -34.255
infoIC_table_sample[12,1] <- -34.245
infoIC_table_sample[12,2] <- -34.284
infoIC_table_sample[13,1] <- -34.883
infoIC_table_sample[13,2] <- -34.934
infoIC_table_sample[14,1] <- -34.842
infoIC_table_sample[14,2] <- -34.891
infoIC_table_sample[15,1] <- -34.883
infoIC_table_sample[15,2] <- -34.934
# Fit model with optional fit option
validation_models = list()
validation_models$dcc11fit_val <- dccfit(DCC11spec, data = data_val[,2:4], fit = dcc11fit)
validation_models$dcc11fit_val_n <- dccfit(DCC11spec_n, data = data_val[,2:4], fit = dcc11fit_n)
validation_models$dcc11fit_f_val <- dccfit(DCC11spec_f, data = data_val[,2:4], fit = dcc11fit_f)
validation_models$dcc11fit_a_val <- dccfit(DCC11spec_a, data = data_val[,2:4], fit = dcc11fit_a)
validation_models$GGARCHfit_val <-gogarchfit(GGARCHspec, data_val[,2:4])
validation_models$dcc_opt_fit_val <- dccfit(DCC_opt_spec, data = data_val[,2:4], fit = dcc_opt_fit)
validation_models$dcc_opt_fit_val_n <- dccfit(DCC_opt_spec_n, data = data_val[,2:4], fit = dcc_opt_fit_n)
validation_models$dcc_opt_a_fit_val <- dccfit(DCC_opt_a_spec, data = data_val[,2:4], fit = dcc_opt_a_fit)
validation_models$dcc_opt_f_fit_val <-dccfit(DCC_opt_f_spec, data = data_pred[, 2:4], fit = dcc_opt_f_fit)
validation_models$cgarch11_fit_val <- cgarchfit(CGARCH11spec, data = data_val[,2:4], fit = cgarch11_fit)
validation_models$cgarch11_fit_val_n <- cgarchfit(CGARCH11spec_n, data = data_val[,2:4], fit = cgarch11_fit_n)
validation_models$cgarch11_a_fit_val <- cgarchfit(CGARCH11spec_a, data = data_val[,2:4], fit = cgarch11_a_fit)
validation_models$cgarch_opt_fit_val <- cgarchfit(CGARCH_opt_spec, data = data_val[,2:4], fit = cgarch_opt_fit)
validation_models$cgarch_opt_fit_val_n <- cgarchfit(CGARCH_opt_spec_n, data = data_val[,2:4], fit = cgarch_opt_fit_n)
validation_models$cgarch_opt_a_fit_val <- cgarchfit(CGARCH_opt_a_spec, data = data_val[,2:4], fit = cgarch_opt_a_fit)
infoIC_table <- matrix(NA, nrow = 15, ncol = 2)
colnames(infoIC_table) <- c('BIC', 'AIC')
rownames(infoIC_table) <- c('DCC11', 'DCC11_N','DCC11_F', 'DCC11_A', 'GGARCH11', 'DCC11_opt', 'DCC11_opt_n', 'DCC11_OPT_A','DCC11_OPT_F', 'CGARCH11', 'CGARCH11_n', 'CGARCH11_A', 'CGARCH11_opt', 'CGARCH11_opt_n', 'CGARCH11_opt_A')
infoIC_table[1,1] <- infocriteria(validation_models$dcc11fit_val)[2]
infoIC_table[1,2] <- infocriteria(validation_models$dcc11fit_val)[1]
infoIC_table[2,1] <- infocriteria(validation_models$dcc11fit_val_n)[2]
infoIC_table[2,2] <- infocriteria(validation_models$dcc11fit_val_n)[1]
infoIC_table[3,1] <- infocriteria(validation_models$dcc11fit_f_val)[2]
infoIC_table[3,2] <- infocriteria(validation_models$dcc11fit_f_val)[1]
infoIC_table[4,1] <- infocriteria(validation_models$dcc11fit_a_val)[2]
infoIC_table[4,2] <- infocriteria(validation_models$dcc11fit_a_val)[1]
infoIC_table[5,2] <- 2*length(coef(validation_models$GGARCHfit_val)) - 2*log(likelihood(validation_models$GGARCHfit_val))
infoIC_table[5,1] <- length(coef(validation_models$GGARCHfit_val))*log(nrow(data_val)) - 2*log(likelihood(validation_models$GGARCHfit_val))
infoIC_table[6,1] <- infocriteria(validation_models$dcc_opt_fit_val)[2]
infoIC_table[6,2] <- infocriteria(validation_models$dcc_opt_fit_val)[1]
infoIC_table[7,1] <- infocriteria(validation_models$dcc_opt_fit_val_n)[2]
infoIC_table[7,2] <- infocriteria(validation_models$dcc_opt_fit_val_n)[1]
infoIC_table[8,1] <- infocriteria(validation_models$dcc_opt_a_fit_val)[2]
infoIC_table[8,2] <- infocriteria(validation_models$dcc_opt_a_fit_val)[1]
infoIC_table[9,1] <- infocriteria(validation_models$dcc_opt_f_fit_val)[2]
infoIC_table[9,2] <- infocriteria(validation_models$dcc_opt_f_fit_val)[1]
infoIC_table[10,1] <- -36.792 # as seen from the show() option as no infocriteria() available for Copula-Garch
infoIC_table[10,2] <- -36.969
infoIC_table[11,1] <- -36.794
infoIC_table[11,2] <- -36.962
infoIC_table[12,1] <- -36.792
infoIC_table[12,2] <- -36.969
infoIC_table[13,1] <- -37.174
infoIC_table[13,2] <- -37.402
infoIC_table[14,1] <- -37.162
infoIC_table[14,2] <- -37.381
infoIC_table[15,1] <- -37.174
infoIC_table[15,2] <- -37.402
# TOP 3 MODELS FOR PREDICTIONS
which(infoIC_table[,1] == Rfast::nth(infoIC_table[,1], 1, descending = F, na.rm = T))
which(infoIC_table[,1] == Rfast::nth(infoIC_table[,1], 2, descending = F, na.rm = T))
which(infoIC_table[,1] == Rfast::nth(infoIC_table[,1], 3, descending = F, na.rm = T))
which(infoIC_table[,1] == Rfast::nth(infoIC_table[,1], 4, descending = F, na.rm = T))
which(infoIC_table[,1] == Rfast::nth(infoIC_table[,1], 5, descending = F, na.rm = T))
which(infoIC_table[,1] == Rfast::nth(infoIC_table[,1], 6, descending = F, na.rm = T))
which(infoIC_table_sample[,1] == Rfast::nth(infoIC_table_sample[,1], 1, descending = F, na.rm = T))
which(infoIC_table_sample[,1] == Rfast::nth(infoIC_table_sample[,1], 2, descending = F, na.rm = T))
which(infoIC_table_sample[,1] == Rfast::nth(infoIC_table_sample[,1], 3, descending = F, na.rm = T))
which(infoIC_table_sample[,1] == Rfast::nth(infoIC_table_sample[,1], 4, descending = F, na.rm = T))
which(infoIC_table_sample[,1] == Rfast::nth(infoIC_table_sample[,1], 5, descending = F, na.rm = T))
which(infoIC_table_sample[,1] == Rfast::nth(infoIC_table_sample[,1], 6, descending = F, na.rm = T))
# In - Sample
# DCC 1,1 with optimal univariate models
# Asymetric DCC 1,1 with optimal univariate models
# DCC 1,1 with GARCH 1,1
# With AIC
# Copula DCC 1,1 with optimal univariate models
# Asymetric Copula DCC 1,1 with optimal univariate models
# DCC 1,1 with optimal univariate models
# Predictions
rolling_predictions1 <- data.frame(data = matrix(NA, nrow = nrow(data_pred), ncol = 9))
rolling_predictions2 <- data.frame(data = matrix(NA, nrow = nrow(data_pred), ncol = 9))
rolling_predictions3 <- data.frame(data = matrix(NA, nrow = nrow(data_pred), ncol = 9))
rolling_predictions1$Date <- data_pred$DT
rolling_predictions2$Date <- data_pred$DT
rolling_predictions3$Date <- data_pred$DT
colnames(rolling_predictions1) <- c('varEEM', 'varSPY', 'varEZU', 'cov(EEM,SPY)', 'cov(EEM, EZU)', 'cov(SPY, EZU)', 'cor(EEM,SPY)', 'cor(EEM, EZU)', 'cor(SPY, EZU)', 'DT')
colnames(rolling_predictions2) <- c('varEEM', 'varSPY', 'varEZU', 'cov(EEM,SPY)', 'cov(EEM, EZU)', 'cov(SPY, EZU)', 'cor(EEM,SPY)', 'cor(EEM, EZU)', 'cor(SPY, EZU)', 'DT')
colnames(rolling_predictions3) <- c('varEEM', 'varSPY', 'varEZU', 'cov(EEM,SPY)', 'cov(EEM, EZU)', 'cov(SPY, EZU)', 'cor(EEM,SPY)', 'cor(EEM, EZU)', 'cor(SPY, EZU)', 'DT')
start_time <- Sys.time()
for (i in 1:nrow(data_pred)){
out_sample <- nrow(data_pred) - i + 1
fit_pred1 <- dccfit(DCC_opt_spec, data = data_full[,2:4], out.sample = out_sample)
fit_pred2 <- dccfit(DCC_opt_a_spec, data = data_full[,2:4], out.sample = out_sample)
fit_pred3 <- dccfit(DCC11spec, data = data_full[,2:4], out.sample = out_sample)
rolling_predictions1[i,1] <- rcov(dccforecast(fit_pred1, n.ahead = 1))[[1]][1]
rolling_predictions1[i,2] <- rcov(dccforecast(fit_pred1, n.ahead = 1))[[1]][5]
rolling_predictions1[i,3] <- rcov(dccforecast(fit_pred1, n.ahead = 1))[[1]][9]
rolling_predictions1[i,4] <- rcov(dccforecast(fit_pred1, n.ahead = 1))[[1]][2]
rolling_predictions1[i,5] <- rcov(dccforecast(fit_pred1, n.ahead = 1))[[1]][3]
rolling_predictions1[i,6] <- rcov(dccforecast(fit_pred1, n.ahead = 1))[[1]][6]
rolling_predictions1[i,7] <- rcor(dccforecast(fit_pred1, n.ahead = 1))[[1]][2]
rolling_predictions1[i,8] <- rcor(dccforecast(fit_pred1, n.ahead = 1))[[1]][3]
rolling_predictions1[i,9] <- rcor(dccforecast(fit_pred1, n.ahead = 1))[[1]][6]
rolling_predictions2[i,1] <- rcov(dccforecast(fit_pred2, n.ahead = 1))[[1]][1]
rolling_predictions2[i,2] <- rcov(dccforecast(fit_pred2, n.ahead = 1))[[1]][5]
rolling_predictions2[i,3] <- rcov(dccforecast(fit_pred2, n.ahead = 1))[[1]][9]
rolling_predictions2[i,4] <- rcov(dccforecast(fit_pred2, n.ahead = 1))[[1]][2]
rolling_predictions2[i,5] <- rcov(dccforecast(fit_pred2, n.ahead = 1))[[1]][3]
rolling_predictions2[i,6] <- rcov(dccforecast(fit_pred2, n.ahead = 1))[[1]][6]
rolling_predictions2[i,7] <- rcor(dccforecast(fit_pred2, n.ahead = 1))[[1]][2]
rolling_predictions2[i,8] <- rcor(dccforecast(fit_pred2, n.ahead = 1))[[1]][3]
rolling_predictions2[i,9] <- rcor(dccforecast(fit_pred2, n.ahead = 1))[[1]][6]
rolling_predictions3[i,1] <- rcov(dccforecast(fit_pred3, n.ahead = 1))[[1]][1]
rolling_predictions3[i,2] <- rcov(dccforecast(fit_pred3, n.ahead = 1))[[1]][5]
rolling_predictions3[i,3] <- rcov(dccforecast(fit_pred3, n.ahead = 1))[[1]][9]
rolling_predictions3[i,4] <- rcov(dccforecast(fit_pred3, n.ahead = 1))[[1]][2]
rolling_predictions3[i,5] <- rcov(dccforecast(fit_pred3, n.ahead = 1))[[1]][3]
rolling_predictions3[i,6] <- rcov(dccforecast(fit_pred3, n.ahead = 1))[[1]][6]
rolling_predictions3[i,7] <- rcor(dccforecast(fit_pred3, n.ahead = 1))[[1]][2]
rolling_predictions3[i,8] <- rcor(dccforecast(fit_pred3, n.ahead = 1))[[1]][3]
rolling_predictions3[i,9] <- rcor(dccforecast(fit_pred3, n.ahead = 1))[[1]][6]
}
end_time <- Sys.time() # Approx: 3.4-3.5 hours
end_time - start_time
library(mvtnorm)
library(quadprog)
n1=0
n2=nrow(data_pred)
k=3 #Number of asssets
a=matrix(,n2,k)
b=matrix(,n2,k)
d=matrix(,n2,k)
perf=matrix(,n2,3)
for (i in (n1+1):(n1+n2))
{
a[i-n1,] = solve.QP(Dmat=array(c(rolling_predictions1[i,1], rolling_predictions1[i,4], rolling_predictions1[i,5], rolling_predictions1[i,4], rolling_predictions1[i,2], rolling_predictions1[i,6], rolling_predictions1[i,5], rolling_predictions1[i,6], rolling_predictions1[i,3]), dim = c(3,3)), dvec=array(0, dim = c(k,1)), Amat=t(array(1, dim = c(1,k))), bvec=1, meq = 1)$solution #Global minimum variance portfolio
b[i-n1,] = solve.QP(Dmat=array(c(rolling_predictions2[i,1], rolling_predictions2[i,4], rolling_predictions2[i,5], rolling_predictions2[i,4], rolling_predictions2[i,2], rolling_predictions2[i,6], rolling_predictions2[i,5], rolling_predictions2[i,6], rolling_predictions2[i,3]), dim = c(3,3)), dvec=array(0, dim = c(k,1)), Amat=t(array(1, dim = c(1,k))), bvec=1, meq = 1)$solution
d[i-n1,] = solve.QP(Dmat=array(c(rolling_predictions3[i,1], rolling_predictions3[i,4], rolling_predictions3[i,5], rolling_predictions3[i,4], rolling_predictions3[i,2], rolling_predictions3[i,6], rolling_predictions3[i,5], rolling_predictions3[i,6], rolling_predictions3[i,3]), dim = c(3,3)), dvec=array(0, dim = c(k,1)), Amat=t(array(1, dim = c(1,k))), bvec=1, meq = 1)$solution
}
# Simulations
simulation_predictions1 <- data.frame(data = matrix(NA, nrow = nrow(data_pred), ncol = 9))
simulation_predictions2 <- data.frame(data = matrix(NA, nrow = nrow(data_pred), ncol = 9))
simulation_predictions1$Date <- data_pred$DT
simulation_predictions2$Date <- data_pred$DT
colnames(simulation_predictions1) <- c('varEEM', 'varSPY', 'varEZU', 'cov(EEM,SPY)', 'cov(EEM, EZU)', 'cov(SPY, EZU)', 'cor(EEM,SPY)', 'cor(EEM, EZU)', 'cor(SPY, EZU)', 'DT')
colnames(simulation_predictions2) <- c('varEEM', 'varSPY', 'varEZU', 'cov(EEM,SPY)', 'cov(EEM, EZU)', 'cov(SPY, EZU)', 'cor(EEM,SPY)', 'cor(EEM, EZU)', 'cor(SPY, EZU)', 'DT')
start_time <- Sys.time()
data = rbind(data, data_val)
CGARCH_opt_fit <- cgarchfit(CGARCH_opt_spec, data[,2:4])
CGARCH_opt_fit_n <- cgarchfit(CGARCH_opt_spec_n, data[,2:4])
for (i in 1:nrow(data_pred)){
if (i == 1){
CGARCH_opt_sim_t <- cgarchsim(CGARCH_opt_fit, n.sim =1, m.sim = 1000, presigma = t(as.matrix(sqrt(c(rcov(CGARCH_opt_fit)[1,1,length(data)] , rcov(CGARCH_opt_fit)[2,2,length(data)] , rcov(CGARCH_opt_fit)[3,3,length(data)])))) , prereturns = as.matrix(data[nrow(data), 2:4]), preR = matrix(c(1,rcor(CGARCH_opt_fit)[1,2,length(data)], rcor(CGARCH_opt_fit)[1,3,length(data)], rcor(CGARCH_opt_fit)[2,1,length(data)], 1, rcor(CGARCH_opt_fit)[2,3,length(data)], rcor(CGARCH_opt_fit)[3,1,length(data)], rcor(CGARCH_opt_fit)[3,2,length(data)],1), nrow = 3, ncol =3), preQ = CGARCH_opt_fit@mfit$Qt[[length(CGARCH_opt_fit@mfit$Qt)]], preZ = tail(CGARCH_opt_fit@mfit$Z, 1), rseed = 8)
CGARCH_opt_sim_n <- cgarchsim(CGARCH_opt_fit_n, n.sim = 1, m.sim = 1000, presigma = t(as.matrix(sqrt(c(rcov(CGARCH_opt_fit_n)[1,1,length(data)] , rcov(CGARCH_opt_fit_n)[2,2,length(data)] , rcov(CGARCH_opt_fit_n)[3,3,length(data)])))), prereturns = as.matrix(data_pred[nrow(data), 2:4]), preR =matrix(c(1,rcor(CGARCH_opt_fit_n)[1,2,length(data)], rcor(CGARCH_opt_fit_n)[1,3,length(data)], rcor(CGARCH_opt_fit_n)[2,1,length(data)], 1, rcor(CGARCH_opt_fit_n)[2,3,length(data)], rcor(CGARCH_opt_fit_n)[3,1,length(data)], rcor(CGARCH_opt_fit_n)[3,2,length(data)],1), nrow = 3, ncol =3) , preQ = CGARCH_opt_fit_n@mfit$Qt[[length(CGARCH_opt_fit_n@mfit$Qt)]], preZ = tail(CGARCH_opt_fit_n@mfit$Z, 1), rseed = 8)
simulation_predictions1[1,1] <- rcov(CGARCH_opt_sim_t)[1,1,1]
simulation_predictions1[1,2] <- rcov(CGARCH_opt_sim_t)[2,2,1]
simulation_predictions1[1,3] <- rcov(CGARCH_opt_sim_t)[3,3,1]
simulation_predictions1[1,4] <- rcov(CGARCH_opt_sim_t)[1,2,1]
simulation_predictions1[1,5] <- rcov(CGARCH_opt_sim_t)[1,3,1]
simulation_predictions1[1,6] <- rcov(CGARCH_opt_sim_t)[2,3,1]
simulation_predictions1[1,7] <- rcor(CGARCH_opt_sim_t)[1,2,1]
simulation_predictions1[1,8] <- rcor(CGARCH_opt_sim_t)[1,3,1]
simulation_predictions1[1,9] <- rcor(CGARCH_opt_sim_t)[2,3,1]
simulation_predictions2[1,1] <- rcov(CGARCH_opt_sim_n)[1,1,1]
simulation_predictions2[1,2] <- rcov(CGARCH_opt_sim_n)[2,2,1]
simulation_predictions2[1,3] <- rcov(CGARCH_opt_sim_n)[3,3,1]
simulation_predictions2[1,4] <- rcov(CGARCH_opt_sim_n)[1,2,1]
simulation_predictions2[1,5] <- rcov(CGARCH_opt_sim_n)[1,3,1]
simulation_predictions2[1,6] <- rcov(CGARCH_opt_sim_n)[2,3,1]
simulation_predictions2[1,7] <- rcor(CGARCH_opt_sim_n)[1,2,1]
simulation_predictions2[1,8] <- rcor(CGARCH_opt_sim_n)[1,3,1]
simulation_predictions2[1,9] <- rcor(CGARCH_opt_sim_n)[2,3,1]
} else {
CGARCH_opt_fit <- cgarchfit(CGARCH_opt_spec, data = rbind(data[,2:4], data_pred[1:i,2:4]))
CGARCH_opt_fit_n <- cgarchfit(CGARCH_opt_spec_n, data = rbind(data[,2:4], data_pred[1:i,2:4]))
CGARCH_opt_sim_t <- cgarchsim(CGARCH_opt_fit, n.sim =1, m.sim = 1000, presigma = as.matrix(sqrt(simulation_predictions1[i-1,1:3])) , prereturns = as.matrix(data_pred[i-1, 2:4]), preR = matrix(c(1,simulation_predictions1[i-1,7], simulation_predictions1[i-1,8], simulation_predictions1[i-1,7], 1, simulation_predictions1[i-1,9], simulation_predictions1[i-1,8], simulation_predictions1[i-1,9],1), nrow = 3, ncol =3), preQ = CGARCH_opt_fit@mfit$Qt[[length(CGARCH_opt_fit@mfit$Qt)]], preZ = tail(CGARCH_opt_fit@mfit$Z, 1) , rseed = 8)
CGARCH_opt_sim_n <- cgarchsim(CGARCH_opt_fit_n, n.sim = 1, m.sim = 1000, presigma = as.matrix(sqrt(simulation_predictions2[i-1,1:3])), prereturns = as.matrix(data_pred[i-1, 2:4]), preR = matrix(c(1,simulation_predictions2[i-1,7], simulation_predictions2[i-1,8], simulation_predictions2[i-1,7], 1, simulation_predictions2[i-1,9], simulation_predictions2[i-1,8], simulation_predictions2[i-1,9],1), nrow = 3, ncol =3), preQ = CGARCH_opt_fit_n@mfit$Qt[[length(CGARCH_opt_fit_n@mfit$Qt)]], preZ = tail(CGARCH_opt_fit_n@mfit$Z, 1) , rseed = 8)
simulation_predictions1[i,1] <- rcov(CGARCH_opt_sim_t)[1,1,1]
simulation_predictions1[i,2] <- rcov(CGARCH_opt_sim_t)[2,2,1]
simulation_predictions1[i,3] <- rcov(CGARCH_opt_sim_t)[3,3,1]
simulation_predictions1[i,4] <- rcov(CGARCH_opt_sim_t)[1,2,1]
simulation_predictions1[i,5] <- rcov(CGARCH_opt_sim_t)[1,3,1]
simulation_predictions1[i,6] <- rcov(CGARCH_opt_sim_t)[2,3,1]
simulation_predictions1[i,7] <- rcor(CGARCH_opt_sim_t)[1,2,1]
simulation_predictions1[i,8] <- rcor(CGARCH_opt_sim_t)[1,3,1]
simulation_predictions1[i,9] <- rcor(CGARCH_opt_sim_t)[2,3,1]
simulation_predictions2[i,1] <- rcov(CGARCH_opt_sim_n)[1,1,1]
simulation_predictions2[i,2] <- rcov(CGARCH_opt_sim_n)[2,2,1]
simulation_predictions2[i,3] <- rcov(CGARCH_opt_sim_n)[3,3,1]
simulation_predictions2[i,4] <- rcov(CGARCH_opt_sim_n)[1,2,1]
simulation_predictions2[i,5] <- rcov(CGARCH_opt_sim_n)[1,3,1]
simulation_predictions2[i,6] <- rcov(CGARCH_opt_sim_n)[2,3,1]
simulation_predictions2[i,7] <- rcor(CGARCH_opt_sim_n)[1,2,1]
simulation_predictions2[i,8] <- rcor(CGARCH_opt_sim_n)[1,3,1]
simulation_predictions2[i,9] <- rcor(CGARCH_opt_sim_n)[2,3,1]
}
}
end_time <- Sys.time()
start_time - end_time # Approx 8 hours.
# Portfolio with Simulated Data
n1=0
n2=nrow(data_pred)
k=3 #Number of asssets
t=matrix(,n2,k)
n=matrix(,n2,k)
perf=matrix(,n2,3)
for (i in (n1+1):(n1+n2))
{
t[i-n1,] = solve.QP(Dmat=array(c(simulation_predictions1[i,1], simulation_predictions1[i,4], simulation_predictions1[i,5], simulation_predictions1[i,4], simulation_predictions1[i,2], simulation_predictions1[i,6], simulation_predictions1[i,5], simulation_predictions1[i,6], simulation_predictions1[i,3]), dim = c(3,3)), dvec=array(0, dim = c(k,1)), Amat=t(array(1, dim = c(1,k))), bvec=1, meq = 1)$solution #Global minimum variance portfolio
n[i-n1,] = solve.QP(Dmat=array(c(simulation_predictions2[i,1], simulation_predictions2[i,4], simulation_predictions2[i,5], simulation_predictions2[i,4], simulation_predictions2[i,2], simulation_predictions2[i,6], simulation_predictions2[i,5], simulation_predictions2[i,6], simulation_predictions2[i,3]), dim = c(3,3)), dvec=array(0, dim = c(k,1)), Amat=t(array(1, dim = c(1,k))), bvec=1, meq = 1)$solution
}
# Exporting data
library("writexl")
write_xlsx(rolling_predictions1,"C:/Users/Lazar/Desktop/Financial Volatility/Assignment/DCC_opt_spec.xlsx")
write_xlsx(rolling_predictions2,"C:/Users/Lazar/Desktop/Financial Volatility/Assignment/DCC_opt_a_spec.xlsx")
write_xlsx(rolling_predictions3,"C:/Users/Lazar/Desktop/Financial Volatility/Assignment/DCC11spec.xlsx")
data <- data_full[1:which(data_full$DT == '2019-10-31 17:00:00'),]
dcc_opt_fit_results <- data.frame(data = matrix(NA, nrow = nrow(data), ncol = 9))
dcc_opt_a_fit_results <- data.frame(data = matrix(NA, nrow = nrow(data), ncol = 9))
dcc11fit_results <-data.frame(data = matrix(NA, nrow = nrow(data), ncol = 9))
dcc_opt_fit_results$DT <- data$DT
dcc_opt_a_fit_results$DT <- data$DT
dcc11fit_results$DT <- data$DT
colnames(dcc_opt_fit_results) <- c('varEEM', 'varSPY', 'varEZU', 'cov(EEM,SPY)', 'cov(EEM, EZU)', 'cov(SPY, EZU)', 'cor(EEM,SPY)', 'cor(EEM, EZU)', 'cor(SPY, EZU)', 'DT')
colnames(dcc_opt_a_fit_results) <- c('varEEM', 'varSPY', 'varEZU', 'cov(EEM,SPY)', 'cov(EEM, EZU)', 'cov(SPY, EZU)', 'cor(EEM,SPY)', 'cor(EEM, EZU)', 'cor(SPY, EZU)', 'DT')
colnames(dcc11fit_results) <- c('varEEM', 'varSPY', 'varEZU', 'cov(EEM,SPY)', 'cov(EEM, EZU)', 'cov(SPY, EZU)', 'cor(EEM,SPY)', 'cor(EEM, EZU)', 'cor(SPY, EZU)', 'DT')
dcc_opt_fit_results$varEEM <- rcov(dcc_opt_fit)[1,1,]
dcc_opt_fit_results$varSPY <- rcov(dcc_opt_fit)[2,2,]
dcc_opt_fit_results$varEZU <- rcov(dcc_opt_fit)[3,3,]
dcc_opt_fit_results$`cov(EEM,SPY)` <- rcov(dcc_opt_fit)[1,2,]
dcc_opt_fit_results$`cov(EEM, EZU)` <- rcov(dcc_opt_fit)[1,3,]
dcc_opt_fit_results$`cov(SPY, EZU)` <- rcov(dcc_opt_fit)[2,3,]
dcc_opt_fit_results$`cor(EEM,SPY)` <- rcor(dcc_opt_fit)[1,2,]
dcc_opt_fit_results$`cor(EEM, EZU)` <- rcor(dcc_opt_fit)[1,3,]
dcc_opt_fit_results$`cor(SPY, EZU)` <- rcor(dcc_opt_fit)[2,3,]
dcc_opt_a_fit_results$varEEM <- rcov(dcc_opt_a_fit)[1,1,]
dcc_opt_a_fit_results$varSPY <- rcov(dcc_opt_a_fit)[2,2,]
dcc_opt_a_fit_results$varEZU <- rcov(dcc_opt_a_fit)[3,3,]
dcc_opt_a_fit_results$`cov(EEM,SPY)` <- rcov(dcc_opt_a_fit)[1,2,]
dcc_opt_a_fit_results$`cov(EEM, EZU)` <- rcov(dcc_opt_a_fit)[1,3,]
dcc_opt_a_fit_results$`cov(SPY, EZU)` <- rcov(dcc_opt_a_fit)[2,3,]
dcc_opt_a_fit_results$`cor(EEM,SPY)` <- rcor(dcc_opt_a_fit)[1,2,]
dcc_opt_a_fit_results$`cor(EEM, EZU)` <- rcor(dcc_opt_a_fit)[1,3,]
dcc_opt_a_fit_results$`cor(SPY, EZU)` <- rcor(dcc_opt_a_fit)[2,3,]
dcc11fit_results$varEEM <- rcov(dcc11fit)[1,1,]
dcc11fit_results$varSPY <- rcov(dcc11fit)[2,2,]
dcc11fit_results$varEZU <- rcov(dcc11fit)[3,3,]
dcc11fit_results$`cov(EEM,SPY)` <- rcov(dcc11fit)[1,2,]
dcc11fit_results$`cov(EEM, EZU)` <- rcov(dcc11fit)[1,3,]
dcc11fit_results$`cov(SPY, EZU)` <- rcov(dcc11fit)[2,3,]
dcc11fit_results$`cor(EEM,SPY)` <- rcor(dcc11fit)[1,2,]
dcc11fit_results$`cor(EEM, EZU)` <- rcor(dcc11fit)[1,3,]
dcc11fit_results$`cor(SPY, EZU)` <- rcor(dcc11fit)[2,3,]
write_xlsx(dcc_opt_fit_results,"C:/Users/Lazar/Desktop/Financial Volatility/Assignment/dcc_opt_fit_results.xlsx")
write_xlsx(dcc_opt_a_fit_results,"C:/Users/Lazar/Desktop/Financial Volatility/Assignment/dcc_opt_a_fit_results.xlsx")
write_xlsx(dcc11fit_results,"C:/Users/Lazar/Desktop/Financial Volatility/Assignment/dcc11fit_results.xlsx")
a <- (data.frame(data = a))
b <- (data.frame(data = b))
d <- (data.frame(data = d))
t <- (data.frame(data = t))
n <- (data.frame(data = n))
colnames(a) <- c('EEM', 'SPY', 'EZU')
colnames(b) <- c('EEM', 'SPY', 'EZU')
colnames(d) <- c('EEM', 'SPY', 'EZU')
colnames(t) <- c('EEM', 'SPY', 'EZU')
colnames(n) <- c('EEM', 'SPY', 'EZU')
write_xlsx(a,"C:/Users/Lazar/Desktop/Financial Volatility/Assignment/dcc_opt_weights.xlsx")
write_xlsx(b,"C:/Users/Lazar/Desktop/Financial Volatility/Assignment/dcc_opt_a_weights.xlsx")
write_xlsx(d,"C:/Users/Lazar/Desktop/Financial Volatility/Assignment/dcc11_weights.xlsx")
write_xlsx(t,"C:/Users/Lazar/Desktop/Financial Volatility/Assignment/simulation_predictions1_weights.xlsx")
write_xlsx(n,"C:/Users/Lazar/Desktop/Financial Volatility/Assignment/simulation_predictions2_weights.xlsx")
library("writexl")
write_xlsx(simulation_predictions1,"C:/Users/Lazar/Desktop/Financial Volatility/Assignment/simulation_predictions1.xlsx")
write_xlsx(simulation_predictions2,"C:/Users/Lazar/Desktop/Financial Volatility/Assignment/simulation_predictions2.xlsx")
| /R_models_code.R | permissive | patricklucescu/financial_volatility | R | false | false | 69,496 | r | library(arrow)
library(rugarch)
library(rmgarch)
#unlink(pkgFile)
path <- "C:/Users/Lazar/Desktop/Financial Volatility/Assignment/data.feather"
data_full <- arrow::read_feather(path)
data_full <- data.frame(data_full)
data <- data_full[1:which(data_full$DT == '2019-10-31 17:00:00'),]
data_val <- data_full[which(data_full$DT == '2019-11-01 11:00:00'):which(data_full$DT == '2019-11-29 17:00:00'),]
data_pred <- data_full[which(data_full$DT == '2019-12-02 11:00:00'):which(data_full$DT == '2019-12-31 17:00:00'),]
uspec <- ugarchspec(variance.model = list(model = 'sGARCH'))
# DCC 1,1 3 Stocks
number_ticks <- function(n) {function(limits) pretty(limits, n)}
DCC11spec <- dccspec(multispec(c(uspec, uspec, uspec)), distribution = 'mvt', model = 'DCC')
dcc11fit <- dccfit(DCC11spec, data = data[,2:4])
varcovDCC11 <- rcov(dcc11fit)
cormatDCC11 <- rcor(dcc11fit)
library(ggplot2)
library(xtable)
# Model Summary
summaryDCC11 <- show(dcc11fit)
coefDCC11 <- coef(dcc11fit)
# Conditional Variance plot
DCC11_var <- ggplot(data = data.frame('rcov' = rcov(dcc11fit)[1,1,], 'time' = data['DT']), aes(x = DT, y = rcov)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Variance') + ggtitle('Conditional Variance from DCC GARCH (1,1)') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcov' = rcov(dcc11fit)[2,2,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcov' = rcov(dcc11fit)[3,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Variance", labels = c("EEM", "SPY", 'EZU'))
# Correlation Plot
DCC11_cor <- ggplot(data = data.frame('rcor' = rcor(dcc11fit)[1,2,], 'time' = data['DT']), aes(x = DT, y = rcor)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Correlation') + ggtitle('Conditional Correlation from DCC GARCH (1,1)') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcor' = rcor(dcc11fit)[1,3,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcor' = rcor(dcc11fit)[2,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Correlation", labels = c("cor(EEM,SPY)", "cor(EEM,EZU)", 'cor(SPY,EZU)'))
# Conditional Covariance plot
DCC11_cov <- ggplot(data = data.frame('rcov' = rcov(dcc11fit)[1,2,], 'time' = data['DT']), aes(x = DT, y = rcov)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Covariance') + ggtitle('Conditional Covariance from DCC GARCH (1,1)') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcov' = rcov(dcc11fit)[1,3,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcov' = rcov(dcc11fit)[2,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Covariance", labels = c("cov(EEM,SPY)", "cov(EEM,EZU)", 'cov(SPY,EZU)'))
# DCC 1,1 Normal with GARCH 1,1
DCC11spec_n <- dccspec(multispec(c(uspec, uspec, uspec)), distribution = 'mvnorm', model = 'DCC')
dcc11fit_n <- dccfit(DCC11spec_n, data = data[,2:4])
varcovDCC11_n <- rcov(dcc11fit_n)
cormatDCC11_n <- rcor(dcc11fit_n)
# Model Summary
summaryDCC11_n <- show(dcc11fit_n)
coefDCC11_n <- coef(dcc11fit_n)
# Conditional Variance plot
DCC11_var_n <- ggplot(data = data.frame('rcov' = rcov(dcc11fit_n)[1,1,], 'time' = data['DT']), aes(x = DT, y = rcov)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Variance') + ggtitle('Conditional Variance from DCC GARCH (1,1) with normal distribution') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcov' = rcov(dcc11fit_n)[2,2,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcov' = rcov(dcc11fit_n)[3,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Variance", labels = c("EEM", "SPY", 'EZU'))
# Correlation Plot
DCC11_cor_n <- ggplot(data = data.frame('rcor' = rcor(dcc11fit_n)[1,2,], 'time' = data['DT']), aes(x = DT, y = rcor)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Correlation') + ggtitle('Conditional Correlation from DCC GARCH (1,1) with normal distribution') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcor' = rcor(dcc11fit_n)[1,3,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcor' = rcor(dcc11fit_n)[2,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Correlation", labels = c("cor(EEM,SPY)", "cor(EEM,EZU)", 'cor(SPY,EZU)'))
# Conditional Covariance plot
DCC11_cov_n <- ggplot(data = data.frame('rcov' = rcov(dcc11fit_n)[1,2,], 'time' = data['DT']), aes(x = DT, y = rcov)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Covariance') + ggtitle('Conditional Covariance from DCC GARCH (1,1) with normal distribution') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcov' = rcov(dcc11fit_n)[1,3,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcov' = rcov(dcc11fit_n)[2,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Covariance", labels = c("cov(EEM,SPY)", "cov(EEM,EZU)", 'cov(SPY,EZU)'))
# Flexible DCC GARCH 1,1 model
DCC11spec_f <- dccspec(multispec(c(uspec, uspec, uspec)), distribution = 'mvnorm', model = 'FDCC', groups = seq(1,3))
dcc11fit_f <- dccfit(DCC11spec_f, data = data[,2:4])
varcovDCC11_f <- rcov(dcc11fit_f)
cormatDCC11_f <- rcor(dcc11fit_f)
# Model Summary
summaryDCC11_f <- show(dcc11fit_f)
coefDCC11_f <- coef(dcc11fit_f)
# Conditional Variance plot
DCC11_f_var <- ggplot(data = data.frame('rcov' = rcov(dcc11fit_f)[1,1,], 'time' = data['DT']), aes(x = DT, y = rcov)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Variance') + ggtitle('Conditional Variance from Asymetric DCC GARCH (1,1)') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcov' = rcov(dcc11fit_f)[2,2,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcov' = rcov(dcc11fit_f)[3,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Variance", labels = c("EEM", "SPY", 'EZU'))
# Correlation Plot
DCC11_f_cor <- ggplot(data = data.frame('rcor' = rcor(dcc11fit_f)[1,2,], 'time' = data['DT']), aes(x = DT, y = rcor)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Correlation') + ggtitle('Conditional Correlation from Asymetric DCC GARCH (1,1)') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcor' = rcor(dcc11fit_f)[1,3,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcor' = rcor(dcc11fit_f)[2,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Correlation", labels = c("cor(EEM,SPY)", "cor(EEM,EZU)", 'cor(SPY,EZU)'))
# Conditional Covariance plot
DCC11_f_cov <- ggplot(data = data.frame('rcov' = rcov(dcc11fit_f)[1,2,], 'time' = data['DT']), aes(x = DT, y = rcov)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Covariance') + ggtitle('Conditional Covariance from Asymetric DCC GARCH (1,1)') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcov' = rcov(dcc11fit_f)[1,3,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcov' = rcov(dcc11fit_f)[2,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Covariance", labels = c("cov(EEM,SPY)", "cov(EEM,EZU)", 'cov(SPY,EZU)'))
# Asymetric DCC GARCH 1,1 model
DCC11spec_a <- dccspec(multispec(c(uspec, uspec, uspec)), distribution = 'mvt', model = "aDCC")
dcc11fit_a <- dccfit(DCC11spec_a, data = data[,2:4])
varcovDCC11_a <- rcov(dcc11fit_a)
cormatDCC11_a <- rcor(dcc11fit_a)
# Model Summary
summaryDCC11_a <- show(dcc11fit_a)
coefDCC11_a <- coef(dcc11fit_a)
# Conditional Variance plot
DCC11_a_var <- ggplot(data = data.frame('rcov' = rcov(dcc11fit_a)[1,1,], 'time' = data['DT']), aes(x = DT, y = rcov)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Variance') + ggtitle('Conditional Variance from Asymetric DCC GARCH (1,1)') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcov' = rcov(dcc11fit_a)[2,2,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcov' = rcov(dcc11fit_a)[3,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Variance", labels = c("EEM", "SPY", 'EZU'))
# Correlation Plot
DCC11_a_cor <- ggplot(data = data.frame('rcor' = rcor(dcc11fit_a)[1,2,], 'time' = data['DT']), aes(x = DT, y = rcor)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Correlation') + ggtitle('Conditional Correlation from Asymetric DCC GARCH (1,1)') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcor' = rcor(dcc11fit_a)[1,3,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcor' = rcor(dcc11fit_a)[2,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Correlation", labels = c("cor(EEM,SPY)", "cor(EEM,EZU)", 'cor(SPY,EZU)'))
# Conditional Covariance plot
DCC11_a_cov <- ggplot(data = data.frame('rcov' = rcov(dcc11fit_a)[1,2,], 'time' = data['DT']), aes(x = DT, y = rcov)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Covariance') + ggtitle('Conditional Covariance from Asymetric DCC GARCH (1,1)') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcov' = rcov(dcc11fit_a)[1,3,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcov' = rcov(dcc11fit_a)[2,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Covariance", labels = c("cov(EEM,SPY)", "cov(EEM,EZU)", 'cov(SPY,EZU)'))
#GO-GARCH (1,1)
GGARCHspec <- gogarchspec(mmean.model = 'AR')
GGARCHfit <-gogarchfit(GGARCHspec, data[,2:4])
varcovGGARCH <- rcov(GGARCHfit)
cormatGGARCH <- rcor(GGARCHfit)
#Model Summary
summaryGGARCH <- show(GGARCHfit)
coefGGARCH <- coef(GGARCHfit)
# Conditional Variance plot
GGARCH_var <- ggplot(data = data.frame('rcov' = rcov(GGARCHfit)[1,1,], 'time' = data['DT']), aes(x = DT, y = rcov)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Variance') + ggtitle('Conditional Variance from GO-GARCH') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcov' = rcov(GGARCHfit)[2,2,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcov' = rcov(GGARCHfit)[3,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Variance", labels = c("EEM", "SPY", 'EZU'))
# Correlation Plot
GGARCH_cor <- ggplot(data = data.frame('rcor' = rcor(GGARCHfit)[1,2,], 'time' = data['DT']), aes(x = DT, y = rcor)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Correlation') + ggtitle('Conditional Correlation from GO-GARCH') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcor' = rcor(GGARCHfit)[1,3,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcor' = rcor(GGARCHfit)[2,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Correlation", labels = c("cor(EEM,SPY)", "cor(EEM,EZU)", 'cor(SPY,EZU)'))
# Conditional Covariance plot
GGARCH_cov <- ggplot(data = data.frame('rcov' = rcov(GGARCHfit)[1,2,], 'time' = data['DT']), aes(x = DT, y = rcov)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Covariance') + ggtitle('Conditional Covariance from GO-GARCH') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcov' = rcov(GGARCHfit)[1,3,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcov' = rcov(GGARCHfit)[2,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Covariance", labels = c("cov(EEM,SPY)", "cov(EEM,EZU)", 'cov(SPY,EZU)'))
# Finding Optimal Univariate Settings as basis for multivariate models
uspec <- ugarchspec(variance.model = list(model = 'sGARCH'))
ugarchfit(spec = uspec, data[,2], distribution = 'mvt')
ugarchfit(spec = uspec, data[,3], distribution = 'mvt')
ugarchfit(spec = uspec, data[,4], distribution = 'mvt')
# Sign Bias in 1 Series ('EZU') and weak significance in 'EEM' (10%)
models_list = list(c('sGARCH','gjrGARCH', 'eGARCH', 'iGARCH', 'csGARCH', 'apARCH', 'fGARCH', 'fGARCH', 'fGARCH'))
submodels_list = list(c('GARCH','TGARCH','GJRGARCH'))
coef_sums <- matrix(NA, nrow = lengths(models_list), ncol = 3)
rownames(coef_sums) <- c('sGARCH','gjrGARCH', 'eGARCH', 'iGARCH', 'csGARCH', 'apARCH', 'fGARCH','fTGARCH','fGJRGARCH')
colnames(coef_sums) <- c('EEM', 'SPY', 'EZU')
BIC_mat <- matrix(NA, nrow = lengths(models_list), ncol = 3)
rownames(BIC_mat) <- c('sGARCH','gjrGARCH', 'eGARCH', 'iGARCH', 'csGARCH', 'apARCH', 'fGARCH','fTGARCH','fGJRGARCH')
colnames(BIC_mat) <- c('EEM', 'SPY', 'EZU')
for (y in 2:length(data)){
for (i in 1:lengths(models_list)){
if (i >= 7){
fit <- ugarchfit(ugarchspec(variance.model = list(model = models_list[[1]][i], submodel = submodels_list[[1]][i-6]), distribution.model = 'std', fixed.pars=list(omega=0)), data[,y])
coef_sums[i,y-1] <- sum(coef(fit)[-length(coef(fit))])
fit_val <- ugarchfit(ugarchspec(variance.model = list(model = models_list[[1]][i], submodel = submodels_list[[1]][i-6]), fixed.pars = list(coef(fit)), distribution.model = 'std'), data_val[,y])
BIC_mat[i,y-1] <- infocriteria(fit_val)[2]
} else if (i<7){
fit <- ugarchfit(ugarchspec(variance.model = list(model = models_list[[1]][i]), distribution.model = 'std'), data[,y])
fit_val <- ugarchfit(ugarchspec(variance.model = list(model = models_list[[1]][i]), fixed.pars = list(coef(fit)), distribution.model = 'std'), data_val[,y])
BIC_mat[i,y-1] <- infocriteria(fit_val)[2]
coef_sums[i,y-1] <- sum(coef(fit)[-length(coef(fit))])
}
}
}
coef_sums # Check Weak Staionrity
BIC_mat # Check BIC values
fit <- ugarchfit(ugarchspec(variance.model = list(model = 'fGARCH', submodel = 'TGARCH'), distribution.model = 'std'), data[,4])
(coef(fit)[5] - coef(fit)[7] + coef(fit)[6]) < 1 # TGARCH stationary (Zakoian version 1994)
fit <- ugarchfit(ugarchspec(variance.model = list(model = 'eGARCH'), distribution.model = 'std'), data[,2])
(coef(fit)[5] - coef(fit)[7] + coef(fit)[6]) < 1 # EGARCH stationary
fit <- ugarchfit(ugarchspec(variance.model = list(model = 'eGARCH'),, distribution.model = 'std'), data[,3])
(coef(fit)[5] - coef(fit)[7] + coef(fit)[6]) < 1 # EGARCH stationary
min(BIC_mat[,1]) # Optimal Stationary Specification: EGARCH
min(BIC_mat[,2]) # Optimal Stationary Specification: EGARCH
min(BIC_mat[,3]) # Optimal Stationary Specification: fGARCH
uspec_opt1 <- ugarchspec(variance.model = list(model = 'eGARCH'), distribution.model = 'std')
uspec_opt2 <- ugarchspec(variance.model = list(model = 'eGARCH'), distribution.model = 'std')
uspec_opt3 <- ugarchspec(variance.model = list(model = 'fGARCH', submodel = 'TGARCH'), distribution.model = 'std')
# DCC optimum 3 Stocks
DCC_opt_spec <- dccspec(multispec(c(uspec_opt1, uspec_opt2, uspec_opt3)), distribution = 'mvt', model = 'DCC')
dcc_opt_fit <- dccfit(DCC_opt_spec, data = data[,2:4])
varcovDCC_opt <- rcov(dcc_opt_fit)
cormatDCC_opt <- rcor(dcc_opt_fit)
# Model Summary
summaryDCC_opt <- show(dcc_opt_fit)
coefDCC_opt <- coef(dcc_opt_fit)
# Conditional Variance plot
DCC_opt_var <- ggplot(data = data.frame('rcov' = rcov(dcc_opt_fit)[1,1,], 'time' = data['DT']), aes(x = DT, y = rcov)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Variance') + ggtitle('Conditional Variance from DCC(1,1) and optimal univariate models') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcov' = rcov(dcc_opt_fit)[2,2,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcov' = rcov(dcc_opt_fit)[3,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Variance", labels = c("EEM", "SPY", 'EZU'))
# Correlation Plot
DCC_opt_cor <- ggplot(data = data.frame('rcor' = rcor(dcc_opt_fit)[1,2,], 'time' = data['DT']), aes(x = DT, y = rcor)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Correlation') + ggtitle('Conditional Correlation from DCC(1,1) and optimal univariate models') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcor' = rcor(dcc_opt_fit)[1,3,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcor' = rcor(dcc_opt_fit)[2,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Correlation", labels = c("cor(EEM,SPY)", "cor(EEM,EZU)", 'cor(SPY,EZU)'))
# Conditional Covariance plot
DCC_opt_cov <- ggplot(data = data.frame('rcov' = rcov(dcc_opt_fit)[1,2,], 'time' = data['DT']), aes(x = DT, y = rcov)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Covariance') + ggtitle('Conditional Covariance from DCC(1,1) and optimal univariate models') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcov' = rcov(dcc_opt_fit)[1,3,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcov' = rcov(dcc_opt_fit)[2,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Covariance", labels = c("cov(EEM,SPY)", "cov(EEM,EZU)", 'cov(SPY,EZU)'))
# DCC optimum 3 Stocks and Normal Distribution
DCC_opt_spec_n <- dccspec(multispec(c(uspec_opt1, uspec_opt2, uspec_opt3)), distribution = 'mvnorm', model = 'DCC')
dcc_opt_fit_n <- dccfit(DCC_opt_spec_n, data = data[,2:4])
varcovDCC_opt_n <- rcov(dcc_opt_fit_n)
cormatDCC_opt_n <- rcor(dcc_opt_fit_n)
# Model Summary
summaryDCC_opt_n <- show(dcc_opt_fit_n)
coefDCC_opt_n <- coef(dcc_opt_fit_n)
# Conditional Variance plot
DCC_opt_var_n <- ggplot(data = data.frame('rcov' = rcov(dcc_opt_fit_n)[1,1,], 'time' = data['DT']), aes(x = DT, y = rcov)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Variance') + ggtitle('Conditional Variance from DCC(1,1) and optimal univariate models with normal distribution') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcov' = rcov(dcc_opt_fit_n)[2,2,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcov' = rcov(dcc_opt_fit_n)[3,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Variance", labels = c("EEM", "SPY", 'EZU'))
# Correlation Plot
DCC_opt_cor_n <- ggplot(data = data.frame('rcor' = rcor(dcc_opt_fit_n)[1,2,], 'time' = data['DT']), aes(x = DT, y = rcor)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Correlation') + ggtitle('Conditional Correlation from DCC(1,1) and optimal univariate models with normal distribution') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcor' = rcor(dcc_opt_fit_n)[1,3,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcor' = rcor(dcc_opt_fit_n)[2,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Correlation", labels = c("cor(EEM,SPY)", "cor(EEM,EZU)", 'cor(SPY,EZU)'))
# Conditional Covariance plot
DCC_opt_cov_n <- ggplot(data = data.frame('rcov' = rcov(dcc_opt_fit_n)[1,2,], 'time' = data['DT']), aes(x = DT, y = rcov)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Covariance') + ggtitle('Conditional Covariance from DCC(1,1) and optimal univariate models with normal distribution') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcov' = rcov(dcc_opt_fit_n)[1,3,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcov' = rcov(dcc_opt_fit_n)[2,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Covariance", labels = c("cov(EEM,SPY)", "cov(EEM,EZU)", 'cov(SPY,EZU)'))
# Asymetric DCC optimum 3 Stocks
DCC_opt_a_spec <- dccspec(multispec(c(uspec_opt1, uspec_opt2, uspec_opt3)), distribution = 'mvt', model = "aDCC")
dcc_opt_a_fit <- dccfit(DCC_opt_a_spec, data = data[,2:4])
varcovDCC_a_opt <- rcov(dcc_opt_a_fit)
cormatDCC_a_opt <- rcor(dcc_opt_a_fit)
# Model Summary
summaryDCC_a_opt <- show(dcc_opt_a_fit)
coefDCC_a_opt <- coef(dcc_opt_a_fit)
# Conditional Variance plot
DCC_a_opt_var <- ggplot(data = data.frame('rcov' = rcov(dcc_opt_a_fit)[1,1,], 'time' = data['DT']), aes(x = DT, y = rcov)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Variance') + ggtitle('Conditional Variance from Asymetric DCC(1,1) and optimal univariate models') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcov' = rcov(dcc_opt_a_fit)[2,2,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcov' = rcov(dcc_opt_a_fit)[3,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Variance", labels = c("EEM", "SPY", 'EZU'))
# Correlation Plot
DCC_a_opt_cor <- ggplot(data = data.frame('rcor' = rcor(dcc_opt_a_fit)[1,2,], 'time' = data['DT']), aes(x = DT, y = rcor)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Correlation') + ggtitle('Conditional Correlation from Asymetric DCC(1,1) and optimal univariate models') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcor' = rcor(dcc_opt_a_fit)[1,3,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcor' = rcor(dcc_opt_a_fit)[2,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Correlation", labels = c("cor(EEM,SPY)", "cor(EEM,EZU)", 'cor(SPY,EZU)'))
# Conditional Covariance plot
DCC_a_opt_cov <- ggplot(data = data.frame('rcov' = rcov(dcc_opt_a_fit)[1,2,], 'time' = data['DT']), aes(x = DT, y = rcov)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Covariance') + ggtitle('Conditional Covariance from Asymetric DCC(1,1) and optimal univariate models') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcov' = rcov(dcc_opt_a_fit)[1,3,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcov' = rcov(dcc_opt_a_fit)[2,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Covariance", labels = c("cov(EEM,SPY)", "cov(EEM,EZU)", 'cov(SPY,EZU)'))
# Flexbile DCC optimum 3 Stocks
DCC_opt_f_spec <- dccspec(multispec(c(uspec_opt1, uspec_opt2, uspec_opt3)), model = "FDCC", groups = c(1,2,3))
dcc_opt_f_fit <- dccfit(DCC_opt_f_spec, data = data[,2:4])
varcovDCC_f_opt <- rcov(dcc_opt_f_fit)
cormatDCC_f_opt <- rcor(dcc_opt_f_fit)
# Model Summary
summaryDCC_f_opt <- show(dcc_opt_f_fit)
coefDCC_f_opt <- coef(dcc_opt_f_fit)
# Conditional Variance plot
DCC_f_opt_var <- ggplot(data = data.frame('rcov' = rcov(dcc_opt_f_fit)[1,1,], 'time' = data['DT']), aes(x = DT, y = rcov)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Variance') + ggtitle('Conditional Variance from Flexible DCC(1,1) and optimal univariate models') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcov' = rcov(dcc_opt_f_fit)[2,2,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcov' = rcov(dcc_opt_f_fit)[3,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Variance", labels = c("EEM", "SPY", 'EZU'))
# Correlation Plot
DCC_f_opt_cor <- ggplot(data = data.frame('rcor' = rcor(dcc_opt_f_fit)[1,2,], 'time' = data['DT']), aes(x = DT, y = rcor)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Correlation') + ggtitle('Conditional Correlation from Flexible DCC(1,1) and optimal univariate models') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcor' = rcor(dcc_opt_f_fit)[1,3,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcor' = rcor(dcc_opt_f_fit)[2,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Correlation", labels = c("cor(EEM,SPY)", "cor(EEM,EZU)", 'cor(SPY,EZU)'))
# Conditional Covariance plot
DCC_f_opt_cov <- ggplot(data = data.frame('rcov' = rcov(dcc_opt_f_fit)[1,2,], 'time' = data['DT']), aes(x = DT, y = rcov)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Covariance') + ggtitle('Conditional Covariance from Flexible DCC(1,1) and optimal univariate models') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcov' = rcov(dcc_opt_f_fit)[1,3,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcov' = rcov(dcc_opt_f_fit)[2,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Covariance", labels = c("cov(EEM,SPY)", "cov(EEM,EZU)", 'cov(SPY,EZU)'))
# Copula DCC GARCH(1,1)
CGARCH11spec <- cgarchspec(multispec(c(uspec, uspec, uspec)), distribution.model = list(copula = c('mvt'), time.varying = T))
cgarch11_fit <- cgarchfit(CGARCH11spec, data = data[,2:4])
cgarch11_cov <- rcov(cgarch11_fit)
cgarch11_cor <- rcor(cgarch11_fit)
# Model Summary
summaryCGARCH11 <- show(cgarch11_fit)
coefCGARCH11 <- coef(cgarch11_fit)
# Conditional Variance plot
CGARCH11_var <- ggplot(data = data.frame('rcov' = rcov(cgarch11_fit)[1,1,], 'time' = data['DT']), aes(x = DT, y = rcov)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Variance') + ggtitle('Conditional Variance from Copula with GARCH(1,1)') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcov' = rcov(cgarch11_fit)[2,2,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcov' = rcov(cgarch11_fit)[3,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Variance", labels = c("EEM", "SPY", 'EZU'))
# Correlation Plot
CGARCH11_cor <- ggplot(data = data.frame('rcor' = rcor(cgarch11_fit)[1,2,], 'time' = data['DT']), aes(x = DT, y = rcor)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Correlation') + ggtitle('Conditional Correlation from Copula with GARCH(1,1)') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcor' = rcor(cgarch11_fit)[1,3,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcor' = rcor(cgarch11_fit)[2,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Correlation", labels = c("cor(EEM,SPY)", "cor(EEM,EZU)", 'cor(SPY,EZU)'))
# Conditional Covariance plot
CGARCH11_cov <- ggplot(data = data.frame('rcov' = rcov(cgarch11_fit)[1,2,], 'time' = data['DT']), aes(x = DT, y = rcov)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Covariance') + ggtitle('Conditional Covariance from Copula with GARCH(1,1)') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcov' = rcov(cgarch11_fit)[1,3,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcov' = rcov(cgarch11_fit)[2,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Covariance", labels = c("cov(EEM,SPY)", "cov(EEM,EZU)", 'cov(SPY,EZU)'))
# Copula DCC GARCH(1,1) with normal distribution
CGARCH11spec_n <- cgarchspec(multispec(c(uspec, uspec, uspec)), distribution.model = list(copula = c('mvnorm'), time.varying = T))
cgarch11_fit_n <- cgarchfit(CGARCH11spec_n, data = data[,2:4])
cgarch11_cov_n <- rcov(cgarch11_fit_n)
cgarch11_cor_n <- rcor(cgarch11_fit_n)
# Model Summary
summaryCGARCH11_n <- show(cgarch11_fit_n)
coefCGARCH11_n <- coef(cgarch11_fit_n)
# Conditional Variance plot
CGARCH11_var_n <- ggplot(data = data.frame('rcov' = rcov(cgarch11_fit_n)[1,1,], 'time' = data['DT']), aes(x = DT, y = rcov)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Variance') + ggtitle('Conditional Variance from Copula with GARCH(1,1) and normal distribution') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcov' = rcov(cgarch11_fit_n)[2,2,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcov' = rcov(cgarch11_fit_n)[3,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Variance", labels = c("EEM", "SPY", 'EZU'))
# Correlation Plot
CGARCH11_cor_n <- ggplot(data = data.frame('rcor' = rcor(cgarch11_fit_n)[1,2,], 'time' = data['DT']), aes(x = DT, y = rcor)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Correlation') + ggtitle('Conditional Correlation from Copula with GARCH(1,1) and normal distribution') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcor' = rcor(cgarch11_fit_n)[1,3,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcor' = rcor(cgarch11_fit_n)[2,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Correlation", labels = c("cor(EEM,SPY)", "cor(EEM,EZU)", 'cor(SPY,EZU)'))
# Conditional Covariance plot
CGARCH11_cov_n <- ggplot(data = data.frame('rcov' = rcov(cgarch11_fit_n)[1,2,], 'time' = data['DT']), aes(x = DT, y = rcov)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Covariance') + ggtitle('Conditional Covariance from Copula with GARCH(1,1) and normal distribution') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcov' = rcov(cgarch11_fit_n)[1,3,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcov' = rcov(cgarch11_fit_n)[2,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Covariance", labels = c("cov(EEM,SPY)", "cov(EEM,EZU)", 'cov(SPY,EZU)'))
# Asymetric Copula DCC GARCH(1,1)
CGARCH11spec_a <- cgarchspec(multispec(c(uspec, uspec, uspec)), distribution.model = list(copula = c('mvt'), time.varying = T, asymetric = T))
cgarch11_a_fit <- cgarchfit(CGARCH11spec_a, data = data[,2:4])
cgarch11_a_cov <- rcov(cgarch11_a_fit)
cgarch11_a_cor <- rcor(cgarch11_a_fit)
# Model Summary
summaryCGARCH11_a <- show(cgarch11_fit)
coefCGARCH11_a <- coef(cgarch11_fit)
# Conditional Variance plot
CGARCH11_a_var <- ggplot(data = data.frame('rcov' = rcov(cgarch11_a_fit)[1,1,], 'time' = data['DT']), aes(x = DT, y = rcov)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Variance') + ggtitle('Conditional Variance from Asymetric Copula with GARCH(1,1)') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcov' = rcov(cgarch11_a_fit)[2,2,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcov' = rcov(cgarch11_a_fit)[3,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Variance", labels = c("EEM", "SPY", 'EZU'))
# Correlation Plot
CGARCH11_a_cor <- ggplot(data = data.frame('rcor' = rcor(cgarch11_a_fit)[1,2,], 'time' = data['DT']), aes(x = DT, y = rcor)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Correlation') + ggtitle('Conditional Correlation from Asymetric Copula with GARCH(1,1)') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcor' = rcor(cgarch11_a_fit)[1,3,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcor' = rcor(cgarch11_a_fit)[2,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Correlation", labels = c("cor(EEM,SPY)", "cor(EEM,EZU)", 'cor(SPY,EZU)'))
# Conditional Covariance plot
CGARCH11_a_cov <- ggplot(data = data.frame('rcov' = rcov(cgarch11_a_fit)[1,2,], 'time' = data['DT']), aes(x = DT, y = rcov)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Covariance') + ggtitle('Conditional Covariance from Asymetric Copula with GARCH(1,1)') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcov' = rcov(cgarch11_a_fit)[1,3,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcov' = rcov(cgarch11_a_fit)[2,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Covariance", labels = c("cov(EEM,SPY)", "cov(EEM,EZU)", 'cov(SPY,EZU)'))
# Copula DCC with optimal models
CGARCH_opt_spec <- cgarchspec(multispec(c(uspec_opt1, uspec_opt2, uspec_opt3)), distribution.model = list(copula = c('mvt'), time.varying = T))
cgarch_opt_fit <- cgarchfit(CGARCH_opt_spec, data = data[,2:4])
cgarch_opt_cov <- rcov(cgarch_opt_fit)
cgarch_opt_cor <- rcor(cgarch_opt_fit)
# Model Summary
summaryCGARCH_opt <- show(cgarch_opt_fit)
coefCGARCH_opt <- coef(cgarch_opt_fit)
# Conditional Variance plot
CGARCH_opt_var <- ggplot(data = data.frame('rcov' = rcov(cgarch_opt_fit)[1,1,], 'time' = data['DT']), aes(x = DT, y = rcov)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Variance') + ggtitle('Conditional Variance from Copula with optimal univaraite models') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcov' = rcov(cgarch_opt_fit)[2,2,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcov' = rcov(cgarch_opt_fit)[3,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Variance", labels = c("EEM", "SPY", 'EZU'))
# Correlation Plot
CGARCH_opt_cor <- ggplot(data = data.frame('rcor' = rcor(cgarch_opt_fit)[1,2,], 'time' = data['DT']), aes(x = DT, y = rcor)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Correlation') + ggtitle('Conditional Correlation from Copula with optimal univaraite models') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcor' = rcor(cgarch_opt_fit)[1,3,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcor' = rcor(cgarch_opt_fit)[2,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Correlation", labels = c("cor(EEM,SPY)", "cor(EEM,EZU)", 'cor(SPY,EZU)'))
# Conditional Covariance plot
CGARCH_opt_cov <- ggplot(data = data.frame('rcov' = rcov(cgarch_opt_fit)[1,2,], 'time' = data['DT']), aes(x = DT, y = rcov)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Covariance') + ggtitle('Conditional Covariance from Copula with optimal univaraite models') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcov' = rcov(cgarch_opt_fit)[1,3,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcov' = rcov(cgarch_opt_fit)[2,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Covariance", labels = c("cov(EEM,SPY)", "cov(EEM,EZU)", 'cov(SPY,EZU)'))
# Copula DCC with optimal models
CGARCH_opt_spec_n <- cgarchspec(multispec(c(uspec_opt1, uspec_opt2, uspec_opt3)), distribution.model = list(copula = c('mvnorm'), time.varying = T))
cgarch_opt_fit_n <- cgarchfit(CGARCH_opt_spec_n, data = data[,2:4])
cgarch_opt_cov_n <- rcov(cgarch_opt_fit_n)
cgarch_opt_cor_n <- rcor(cgarch_opt_fit_n)
# Model Summary
summaryCGARCH_opt_n <- show(cgarch_opt_fit_n)
coefCGARCH_opt_n <- coef(cgarch_opt_fit_n)
# Conditional Variance plot
CGARCH_opt_var_n <- ggplot(data = data.frame('rcov' = rcov(cgarch_opt_fit_n)[1,1,], 'time' = data['DT']), aes(x = DT, y = rcov)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Variance') + ggtitle('Conditional Variance from Copula with optimal univaraite models and normal distribution') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcov' = rcov(cgarch_opt_fit_n)[2,2,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcov' = rcov(cgarch_opt_fit_n)[3,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Variance", labels = c("EEM", "SPY", 'EZU'))
# Correlation Plot
CGARCH_opt_cor_n <- ggplot(data = data.frame('rcor' = rcor(cgarch_opt_fit_n)[1,2,], 'time' = data['DT']), aes(x = DT, y = rcor)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Correlation') + ggtitle('Conditional Correlation from Copula with optimal univaraite models and normal distribution') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcor' = rcor(cgarch_opt_fit_n)[1,3,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcor' = rcor(cgarch_opt_fit_n)[2,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Correlation", labels = c("cor(EEM,SPY)", "cor(EEM,EZU)", 'cor(SPY,EZU)'))
# Conditional Covariance plot
CGARCH_opt_cov_N <- ggplot(data = data.frame('rcov' = rcov(cgarch_opt_fit_n)[1,2,], 'time' = data['DT']), aes(x = DT, y = rcov)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Covariance') + ggtitle('Conditional Covariance from Copula with optimal univaraite models and normal distribution') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcov' = rcov(cgarch_opt_fit_n)[1,3,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcov' = rcov(cgarch_opt_fit_n)[2,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Covariance", labels = c("cov(EEM,SPY)", "cov(EEM,EZU)", 'cov(SPY,EZU)'))
# Asymetric Copula DCC with optimal models
CGARCH_opt_a_spec <- cgarchspec(multispec(c(uspec_opt1, uspec_opt2, uspec_opt3)), distribution.model = list(copula = c('mvt'), time.varying = T, asymetric = T))
cgarch_opt_a_fit <- cgarchfit(CGARCH_opt_a_spec, data = data[,2:4])
cgarch_opt_a_cov <- rcov(cgarch_opt_a_fit)
cgarch_opt_a_cor <- rcor(cgarch_opt_a_fit)
# Model Summary
summaryCGARCH_opt_a <- show(cgarch11_fit)
coefCGARCH_opt_a <- coef(cgarch11_fit)
# Conditional Variance plot
CGARCH_opt_a_var <- ggplot(data = data.frame('rcov' = rcov(cgarch_opt_a_fit)[1,1,], 'time' = data['DT']), aes(x = DT, y = rcov)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Variance') + ggtitle('Conditional Variance from Asymetric Copula with optimal univaraite models') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcov' = rcov(cgarch_opt_fit)[2,2,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcov' = rcov(cgarch_opt_a_fit)[3,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Variance", labels = c("EEM", "SPY", 'EZU'))
# Correlation Plot
CGARCH_opt_a_cor <- ggplot(data = data.frame('rcor' = rcor(cgarch_opt_a_fit)[1,2,], 'time' = data['DT']), aes(x = DT, y = rcor)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Correlation') + ggtitle('Conditional Correlation from Asymetric Copula with optimal univaraite models') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcor' = rcor(cgarch_opt_fit)[1,3,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcor' = rcor(cgarch_opt_a_fit)[2,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Correlation", labels = c("cor(EEM,SPY)", "cor(EEM,EZU)", 'cor(SPY,EZU)'))
# Conditional Covariance plot
CGARCH_opt_a_cov <- ggplot(data = data.frame('rcov' = rcov(cgarch_opt_a_fit)[1,2,], 'time' = data['DT']), aes(x = DT, y = rcov)) + geom_line(aes(colour = 'red')) + xlab('Date') + ylab('Conditional Covariance') + ggtitle('Conditional Covariance from Asymetric Copula with optimal univaraite models') +
theme(plot.title = element_text(hjust = 0.5)) + scale_x_datetime(limits = c(min(data$DT), max(data$DT)), breaks=number_ticks(10)) + geom_line(data = data.frame('rcov' = rcov(cgarch_opt_fit)[1,3,], 'time' = data['DT']), aes(colour = 'darkgreen'), alpha = 0.5) +
geom_line(data = data.frame('rcov' = rcov(cgarch_opt_a_fit)[2,3,], 'time' = data['DT']), aes(colour = 'blue'), alpha = 0.5) + scale_color_discrete(name = "Conditional Covariance", labels = c("cov(EEM,SPY)", "cov(EEM,EZU)", 'cov(SPY,EZU)'))
infoIC_table_sample <- matrix(NA, nrow = 15, ncol = 2)
colnames(infoIC_table_sample) <- c('BIC', 'AIC')
rownames(infoIC_table_sample) <- c('DCC11', 'DCC11_N','DCC11_F', 'DCC11_A', 'GGARCH11', 'DCC11_opt', 'DCC11_opt_n', 'DCC11_OPT_A','DCC11_OPT_F', 'CGARCH11', 'CGARCH11_n', 'CGARCH11_A', 'CGARCH11_opt', 'CGARCH11_opt_n', 'CGARCH11_opt_A')
infoIC_table_sample[1,1] <- infocriteria(dcc11fit)[2]
infoIC_table_sample[1,2] <- infocriteria(dcc11fit)[1]
infoIC_table_sample[2,1] <- infocriteria(dcc11fit_n)[2]
infoIC_table_sample[2,2] <- infocriteria(dcc11fit_n)[1]
infoIC_table_sample[3,1] <- infocriteria(dcc11fit_f)[2]
infoIC_table_sample[3,2] <- infocriteria(dcc11fit_f)[1]
infoIC_table_sample[4,1] <- infocriteria(dcc11fit_a)[2]
infoIC_table_sample[4,2] <- infocriteria(dcc11fit_a)[1]
infoIC_table_sample[5,2] <- 2*length(coef(GGARCHfit)) - 2*log(likelihood(GGARCHfit))
infoIC_table_sample[5,1] <- length(coef(GGARCHfit))*log(nrow(data)) - 2*log(likelihood(GGARCHfit))
infoIC_table_sample[6,1] <- infocriteria(dcc_opt_fit)[2]
infoIC_table_sample[6,2] <- infocriteria(dcc_opt_fit)[1]
infoIC_table_sample[7,1] <- infocriteria(dcc_opt_fit_n)[2]
infoIC_table_sample[7,2] <- infocriteria(dcc_opt_fit_n)[1]
infoIC_table_sample[8,1] <- infocriteria(dcc_opt_a_fit)[2]
infoIC_table_sample[8,2] <- infocriteria(dcc_opt_a_fit)[1]
infoIC_table_sample[9,1] <- infocriteria(dcc_opt_f_fit)[2]
infoIC_table_sample[9,2] <- infocriteria(dcc_opt_f_fit)[1]
infoIC_table_sample[10,1] <- -34.245 # as seen from the show() option as no infocriteria() available for Copula-Garch
infoIC_table_sample[10,2] <- -34.284
infoIC_table_sample[11,1] <- -34.217
infoIC_table_sample[11,2] <- -34.255
infoIC_table_sample[12,1] <- -34.245
infoIC_table_sample[12,2] <- -34.284
infoIC_table_sample[13,1] <- -34.883
infoIC_table_sample[13,2] <- -34.934
infoIC_table_sample[14,1] <- -34.842
infoIC_table_sample[14,2] <- -34.891
infoIC_table_sample[15,1] <- -34.883
infoIC_table_sample[15,2] <- -34.934
# Fit model with optional fit option
validation_models = list()
validation_models$dcc11fit_val <- dccfit(DCC11spec, data = data_val[,2:4], fit = dcc11fit)
validation_models$dcc11fit_val_n <- dccfit(DCC11spec_n, data = data_val[,2:4], fit = dcc11fit_n)
validation_models$dcc11fit_f_val <- dccfit(DCC11spec_f, data = data_val[,2:4], fit = dcc11fit_f)
validation_models$dcc11fit_a_val <- dccfit(DCC11spec_a, data = data_val[,2:4], fit = dcc11fit_a)
validation_models$GGARCHfit_val <-gogarchfit(GGARCHspec, data_val[,2:4])
validation_models$dcc_opt_fit_val <- dccfit(DCC_opt_spec, data = data_val[,2:4], fit = dcc_opt_fit)
validation_models$dcc_opt_fit_val_n <- dccfit(DCC_opt_spec_n, data = data_val[,2:4], fit = dcc_opt_fit_n)
validation_models$dcc_opt_a_fit_val <- dccfit(DCC_opt_a_spec, data = data_val[,2:4], fit = dcc_opt_a_fit)
validation_models$dcc_opt_f_fit_val <-dccfit(DCC_opt_f_spec, data = data_pred[, 2:4], fit = dcc_opt_f_fit)
validation_models$cgarch11_fit_val <- cgarchfit(CGARCH11spec, data = data_val[,2:4], fit = cgarch11_fit)
validation_models$cgarch11_fit_val_n <- cgarchfit(CGARCH11spec_n, data = data_val[,2:4], fit = cgarch11_fit_n)
validation_models$cgarch11_a_fit_val <- cgarchfit(CGARCH11spec_a, data = data_val[,2:4], fit = cgarch11_a_fit)
validation_models$cgarch_opt_fit_val <- cgarchfit(CGARCH_opt_spec, data = data_val[,2:4], fit = cgarch_opt_fit)
validation_models$cgarch_opt_fit_val_n <- cgarchfit(CGARCH_opt_spec_n, data = data_val[,2:4], fit = cgarch_opt_fit_n)
validation_models$cgarch_opt_a_fit_val <- cgarchfit(CGARCH_opt_a_spec, data = data_val[,2:4], fit = cgarch_opt_a_fit)
infoIC_table <- matrix(NA, nrow = 15, ncol = 2)
colnames(infoIC_table) <- c('BIC', 'AIC')
rownames(infoIC_table) <- c('DCC11', 'DCC11_N','DCC11_F', 'DCC11_A', 'GGARCH11', 'DCC11_opt', 'DCC11_opt_n', 'DCC11_OPT_A','DCC11_OPT_F', 'CGARCH11', 'CGARCH11_n', 'CGARCH11_A', 'CGARCH11_opt', 'CGARCH11_opt_n', 'CGARCH11_opt_A')
infoIC_table[1,1] <- infocriteria(validation_models$dcc11fit_val)[2]
infoIC_table[1,2] <- infocriteria(validation_models$dcc11fit_val)[1]
infoIC_table[2,1] <- infocriteria(validation_models$dcc11fit_val_n)[2]
infoIC_table[2,2] <- infocriteria(validation_models$dcc11fit_val_n)[1]
infoIC_table[3,1] <- infocriteria(validation_models$dcc11fit_f_val)[2]
infoIC_table[3,2] <- infocriteria(validation_models$dcc11fit_f_val)[1]
infoIC_table[4,1] <- infocriteria(validation_models$dcc11fit_a_val)[2]
infoIC_table[4,2] <- infocriteria(validation_models$dcc11fit_a_val)[1]
infoIC_table[5,2] <- 2*length(coef(validation_models$GGARCHfit_val)) - 2*log(likelihood(validation_models$GGARCHfit_val))
infoIC_table[5,1] <- length(coef(validation_models$GGARCHfit_val))*log(nrow(data_val)) - 2*log(likelihood(validation_models$GGARCHfit_val))
infoIC_table[6,1] <- infocriteria(validation_models$dcc_opt_fit_val)[2]
infoIC_table[6,2] <- infocriteria(validation_models$dcc_opt_fit_val)[1]
infoIC_table[7,1] <- infocriteria(validation_models$dcc_opt_fit_val_n)[2]
infoIC_table[7,2] <- infocriteria(validation_models$dcc_opt_fit_val_n)[1]
infoIC_table[8,1] <- infocriteria(validation_models$dcc_opt_a_fit_val)[2]
infoIC_table[8,2] <- infocriteria(validation_models$dcc_opt_a_fit_val)[1]
infoIC_table[9,1] <- infocriteria(validation_models$dcc_opt_f_fit_val)[2]
infoIC_table[9,2] <- infocriteria(validation_models$dcc_opt_f_fit_val)[1]
infoIC_table[10,1] <- -36.792 # as seen from the show() option as no infocriteria() available for Copula-Garch
infoIC_table[10,2] <- -36.969
infoIC_table[11,1] <- -36.794
infoIC_table[11,2] <- -36.962
infoIC_table[12,1] <- -36.792
infoIC_table[12,2] <- -36.969
infoIC_table[13,1] <- -37.174
infoIC_table[13,2] <- -37.402
infoIC_table[14,1] <- -37.162
infoIC_table[14,2] <- -37.381
infoIC_table[15,1] <- -37.174
infoIC_table[15,2] <- -37.402
# TOP 3 MODELS FOR PREDICTIONS
which(infoIC_table[,1] == Rfast::nth(infoIC_table[,1], 1, descending = F, na.rm = T))
which(infoIC_table[,1] == Rfast::nth(infoIC_table[,1], 2, descending = F, na.rm = T))
which(infoIC_table[,1] == Rfast::nth(infoIC_table[,1], 3, descending = F, na.rm = T))
which(infoIC_table[,1] == Rfast::nth(infoIC_table[,1], 4, descending = F, na.rm = T))
which(infoIC_table[,1] == Rfast::nth(infoIC_table[,1], 5, descending = F, na.rm = T))
which(infoIC_table[,1] == Rfast::nth(infoIC_table[,1], 6, descending = F, na.rm = T))
which(infoIC_table_sample[,1] == Rfast::nth(infoIC_table_sample[,1], 1, descending = F, na.rm = T))
which(infoIC_table_sample[,1] == Rfast::nth(infoIC_table_sample[,1], 2, descending = F, na.rm = T))
which(infoIC_table_sample[,1] == Rfast::nth(infoIC_table_sample[,1], 3, descending = F, na.rm = T))
which(infoIC_table_sample[,1] == Rfast::nth(infoIC_table_sample[,1], 4, descending = F, na.rm = T))
which(infoIC_table_sample[,1] == Rfast::nth(infoIC_table_sample[,1], 5, descending = F, na.rm = T))
which(infoIC_table_sample[,1] == Rfast::nth(infoIC_table_sample[,1], 6, descending = F, na.rm = T))
# In - Sample
# DCC 1,1 with optimal univariate models
# Asymetric DCC 1,1 with optimal univariate models
# DCC 1,1 with GARCH 1,1
# With AIC
# Copula DCC 1,1 with optimal univariate models
# Asymetric Copula DCC 1,1 with optimal univariate models
# DCC 1,1 with optimal univariate models
# Predictions
rolling_predictions1 <- data.frame(data = matrix(NA, nrow = nrow(data_pred), ncol = 9))
rolling_predictions2 <- data.frame(data = matrix(NA, nrow = nrow(data_pred), ncol = 9))
rolling_predictions3 <- data.frame(data = matrix(NA, nrow = nrow(data_pred), ncol = 9))
rolling_predictions1$Date <- data_pred$DT
rolling_predictions2$Date <- data_pred$DT
rolling_predictions3$Date <- data_pred$DT
colnames(rolling_predictions1) <- c('varEEM', 'varSPY', 'varEZU', 'cov(EEM,SPY)', 'cov(EEM, EZU)', 'cov(SPY, EZU)', 'cor(EEM,SPY)', 'cor(EEM, EZU)', 'cor(SPY, EZU)', 'DT')
colnames(rolling_predictions2) <- c('varEEM', 'varSPY', 'varEZU', 'cov(EEM,SPY)', 'cov(EEM, EZU)', 'cov(SPY, EZU)', 'cor(EEM,SPY)', 'cor(EEM, EZU)', 'cor(SPY, EZU)', 'DT')
colnames(rolling_predictions3) <- c('varEEM', 'varSPY', 'varEZU', 'cov(EEM,SPY)', 'cov(EEM, EZU)', 'cov(SPY, EZU)', 'cor(EEM,SPY)', 'cor(EEM, EZU)', 'cor(SPY, EZU)', 'DT')
start_time <- Sys.time()
for (i in 1:nrow(data_pred)){
out_sample <- nrow(data_pred) - i + 1
fit_pred1 <- dccfit(DCC_opt_spec, data = data_full[,2:4], out.sample = out_sample)
fit_pred2 <- dccfit(DCC_opt_a_spec, data = data_full[,2:4], out.sample = out_sample)
fit_pred3 <- dccfit(DCC11spec, data = data_full[,2:4], out.sample = out_sample)
rolling_predictions1[i,1] <- rcov(dccforecast(fit_pred1, n.ahead = 1))[[1]][1]
rolling_predictions1[i,2] <- rcov(dccforecast(fit_pred1, n.ahead = 1))[[1]][5]
rolling_predictions1[i,3] <- rcov(dccforecast(fit_pred1, n.ahead = 1))[[1]][9]
rolling_predictions1[i,4] <- rcov(dccforecast(fit_pred1, n.ahead = 1))[[1]][2]
rolling_predictions1[i,5] <- rcov(dccforecast(fit_pred1, n.ahead = 1))[[1]][3]
rolling_predictions1[i,6] <- rcov(dccforecast(fit_pred1, n.ahead = 1))[[1]][6]
rolling_predictions1[i,7] <- rcor(dccforecast(fit_pred1, n.ahead = 1))[[1]][2]
rolling_predictions1[i,8] <- rcor(dccforecast(fit_pred1, n.ahead = 1))[[1]][3]
rolling_predictions1[i,9] <- rcor(dccforecast(fit_pred1, n.ahead = 1))[[1]][6]
rolling_predictions2[i,1] <- rcov(dccforecast(fit_pred2, n.ahead = 1))[[1]][1]
rolling_predictions2[i,2] <- rcov(dccforecast(fit_pred2, n.ahead = 1))[[1]][5]
rolling_predictions2[i,3] <- rcov(dccforecast(fit_pred2, n.ahead = 1))[[1]][9]
rolling_predictions2[i,4] <- rcov(dccforecast(fit_pred2, n.ahead = 1))[[1]][2]
rolling_predictions2[i,5] <- rcov(dccforecast(fit_pred2, n.ahead = 1))[[1]][3]
rolling_predictions2[i,6] <- rcov(dccforecast(fit_pred2, n.ahead = 1))[[1]][6]
rolling_predictions2[i,7] <- rcor(dccforecast(fit_pred2, n.ahead = 1))[[1]][2]
rolling_predictions2[i,8] <- rcor(dccforecast(fit_pred2, n.ahead = 1))[[1]][3]
rolling_predictions2[i,9] <- rcor(dccforecast(fit_pred2, n.ahead = 1))[[1]][6]
rolling_predictions3[i,1] <- rcov(dccforecast(fit_pred3, n.ahead = 1))[[1]][1]
rolling_predictions3[i,2] <- rcov(dccforecast(fit_pred3, n.ahead = 1))[[1]][5]
rolling_predictions3[i,3] <- rcov(dccforecast(fit_pred3, n.ahead = 1))[[1]][9]
rolling_predictions3[i,4] <- rcov(dccforecast(fit_pred3, n.ahead = 1))[[1]][2]
rolling_predictions3[i,5] <- rcov(dccforecast(fit_pred3, n.ahead = 1))[[1]][3]
rolling_predictions3[i,6] <- rcov(dccforecast(fit_pred3, n.ahead = 1))[[1]][6]
rolling_predictions3[i,7] <- rcor(dccforecast(fit_pred3, n.ahead = 1))[[1]][2]
rolling_predictions3[i,8] <- rcor(dccforecast(fit_pred3, n.ahead = 1))[[1]][3]
rolling_predictions3[i,9] <- rcor(dccforecast(fit_pred3, n.ahead = 1))[[1]][6]
}
end_time <- Sys.time() # Approx: 3.4-3.5 hours
end_time - start_time
library(mvtnorm)
library(quadprog)
n1=0
n2=nrow(data_pred)
k=3 #Number of asssets
a=matrix(,n2,k)
b=matrix(,n2,k)
d=matrix(,n2,k)
perf=matrix(,n2,3)
for (i in (n1+1):(n1+n2))
{
a[i-n1,] = solve.QP(Dmat=array(c(rolling_predictions1[i,1], rolling_predictions1[i,4], rolling_predictions1[i,5], rolling_predictions1[i,4], rolling_predictions1[i,2], rolling_predictions1[i,6], rolling_predictions1[i,5], rolling_predictions1[i,6], rolling_predictions1[i,3]), dim = c(3,3)), dvec=array(0, dim = c(k,1)), Amat=t(array(1, dim = c(1,k))), bvec=1, meq = 1)$solution #Global minimum variance portfolio
b[i-n1,] = solve.QP(Dmat=array(c(rolling_predictions2[i,1], rolling_predictions2[i,4], rolling_predictions2[i,5], rolling_predictions2[i,4], rolling_predictions2[i,2], rolling_predictions2[i,6], rolling_predictions2[i,5], rolling_predictions2[i,6], rolling_predictions2[i,3]), dim = c(3,3)), dvec=array(0, dim = c(k,1)), Amat=t(array(1, dim = c(1,k))), bvec=1, meq = 1)$solution
d[i-n1,] = solve.QP(Dmat=array(c(rolling_predictions3[i,1], rolling_predictions3[i,4], rolling_predictions3[i,5], rolling_predictions3[i,4], rolling_predictions3[i,2], rolling_predictions3[i,6], rolling_predictions3[i,5], rolling_predictions3[i,6], rolling_predictions3[i,3]), dim = c(3,3)), dvec=array(0, dim = c(k,1)), Amat=t(array(1, dim = c(1,k))), bvec=1, meq = 1)$solution
}
# Simulations
simulation_predictions1 <- data.frame(data = matrix(NA, nrow = nrow(data_pred), ncol = 9))
simulation_predictions2 <- data.frame(data = matrix(NA, nrow = nrow(data_pred), ncol = 9))
simulation_predictions1$Date <- data_pred$DT
simulation_predictions2$Date <- data_pred$DT
colnames(simulation_predictions1) <- c('varEEM', 'varSPY', 'varEZU', 'cov(EEM,SPY)', 'cov(EEM, EZU)', 'cov(SPY, EZU)', 'cor(EEM,SPY)', 'cor(EEM, EZU)', 'cor(SPY, EZU)', 'DT')
colnames(simulation_predictions2) <- c('varEEM', 'varSPY', 'varEZU', 'cov(EEM,SPY)', 'cov(EEM, EZU)', 'cov(SPY, EZU)', 'cor(EEM,SPY)', 'cor(EEM, EZU)', 'cor(SPY, EZU)', 'DT')
start_time <- Sys.time()
data = rbind(data, data_val)
CGARCH_opt_fit <- cgarchfit(CGARCH_opt_spec, data[,2:4])
CGARCH_opt_fit_n <- cgarchfit(CGARCH_opt_spec_n, data[,2:4])
for (i in 1:nrow(data_pred)){
if (i == 1){
CGARCH_opt_sim_t <- cgarchsim(CGARCH_opt_fit, n.sim =1, m.sim = 1000, presigma = t(as.matrix(sqrt(c(rcov(CGARCH_opt_fit)[1,1,length(data)] , rcov(CGARCH_opt_fit)[2,2,length(data)] , rcov(CGARCH_opt_fit)[3,3,length(data)])))) , prereturns = as.matrix(data[nrow(data), 2:4]), preR = matrix(c(1,rcor(CGARCH_opt_fit)[1,2,length(data)], rcor(CGARCH_opt_fit)[1,3,length(data)], rcor(CGARCH_opt_fit)[2,1,length(data)], 1, rcor(CGARCH_opt_fit)[2,3,length(data)], rcor(CGARCH_opt_fit)[3,1,length(data)], rcor(CGARCH_opt_fit)[3,2,length(data)],1), nrow = 3, ncol =3), preQ = CGARCH_opt_fit@mfit$Qt[[length(CGARCH_opt_fit@mfit$Qt)]], preZ = tail(CGARCH_opt_fit@mfit$Z, 1), rseed = 8)
CGARCH_opt_sim_n <- cgarchsim(CGARCH_opt_fit_n, n.sim = 1, m.sim = 1000, presigma = t(as.matrix(sqrt(c(rcov(CGARCH_opt_fit_n)[1,1,length(data)] , rcov(CGARCH_opt_fit_n)[2,2,length(data)] , rcov(CGARCH_opt_fit_n)[3,3,length(data)])))), prereturns = as.matrix(data_pred[nrow(data), 2:4]), preR =matrix(c(1,rcor(CGARCH_opt_fit_n)[1,2,length(data)], rcor(CGARCH_opt_fit_n)[1,3,length(data)], rcor(CGARCH_opt_fit_n)[2,1,length(data)], 1, rcor(CGARCH_opt_fit_n)[2,3,length(data)], rcor(CGARCH_opt_fit_n)[3,1,length(data)], rcor(CGARCH_opt_fit_n)[3,2,length(data)],1), nrow = 3, ncol =3) , preQ = CGARCH_opt_fit_n@mfit$Qt[[length(CGARCH_opt_fit_n@mfit$Qt)]], preZ = tail(CGARCH_opt_fit_n@mfit$Z, 1), rseed = 8)
simulation_predictions1[1,1] <- rcov(CGARCH_opt_sim_t)[1,1,1]
simulation_predictions1[1,2] <- rcov(CGARCH_opt_sim_t)[2,2,1]
simulation_predictions1[1,3] <- rcov(CGARCH_opt_sim_t)[3,3,1]
simulation_predictions1[1,4] <- rcov(CGARCH_opt_sim_t)[1,2,1]
simulation_predictions1[1,5] <- rcov(CGARCH_opt_sim_t)[1,3,1]
simulation_predictions1[1,6] <- rcov(CGARCH_opt_sim_t)[2,3,1]
simulation_predictions1[1,7] <- rcor(CGARCH_opt_sim_t)[1,2,1]
simulation_predictions1[1,8] <- rcor(CGARCH_opt_sim_t)[1,3,1]
simulation_predictions1[1,9] <- rcor(CGARCH_opt_sim_t)[2,3,1]
simulation_predictions2[1,1] <- rcov(CGARCH_opt_sim_n)[1,1,1]
simulation_predictions2[1,2] <- rcov(CGARCH_opt_sim_n)[2,2,1]
simulation_predictions2[1,3] <- rcov(CGARCH_opt_sim_n)[3,3,1]
simulation_predictions2[1,4] <- rcov(CGARCH_opt_sim_n)[1,2,1]
simulation_predictions2[1,5] <- rcov(CGARCH_opt_sim_n)[1,3,1]
simulation_predictions2[1,6] <- rcov(CGARCH_opt_sim_n)[2,3,1]
simulation_predictions2[1,7] <- rcor(CGARCH_opt_sim_n)[1,2,1]
simulation_predictions2[1,8] <- rcor(CGARCH_opt_sim_n)[1,3,1]
simulation_predictions2[1,9] <- rcor(CGARCH_opt_sim_n)[2,3,1]
} else {
CGARCH_opt_fit <- cgarchfit(CGARCH_opt_spec, data = rbind(data[,2:4], data_pred[1:i,2:4]))
CGARCH_opt_fit_n <- cgarchfit(CGARCH_opt_spec_n, data = rbind(data[,2:4], data_pred[1:i,2:4]))
CGARCH_opt_sim_t <- cgarchsim(CGARCH_opt_fit, n.sim =1, m.sim = 1000, presigma = as.matrix(sqrt(simulation_predictions1[i-1,1:3])) , prereturns = as.matrix(data_pred[i-1, 2:4]), preR = matrix(c(1,simulation_predictions1[i-1,7], simulation_predictions1[i-1,8], simulation_predictions1[i-1,7], 1, simulation_predictions1[i-1,9], simulation_predictions1[i-1,8], simulation_predictions1[i-1,9],1), nrow = 3, ncol =3), preQ = CGARCH_opt_fit@mfit$Qt[[length(CGARCH_opt_fit@mfit$Qt)]], preZ = tail(CGARCH_opt_fit@mfit$Z, 1) , rseed = 8)
CGARCH_opt_sim_n <- cgarchsim(CGARCH_opt_fit_n, n.sim = 1, m.sim = 1000, presigma = as.matrix(sqrt(simulation_predictions2[i-1,1:3])), prereturns = as.matrix(data_pred[i-1, 2:4]), preR = matrix(c(1,simulation_predictions2[i-1,7], simulation_predictions2[i-1,8], simulation_predictions2[i-1,7], 1, simulation_predictions2[i-1,9], simulation_predictions2[i-1,8], simulation_predictions2[i-1,9],1), nrow = 3, ncol =3), preQ = CGARCH_opt_fit_n@mfit$Qt[[length(CGARCH_opt_fit_n@mfit$Qt)]], preZ = tail(CGARCH_opt_fit_n@mfit$Z, 1) , rseed = 8)
simulation_predictions1[i,1] <- rcov(CGARCH_opt_sim_t)[1,1,1]
simulation_predictions1[i,2] <- rcov(CGARCH_opt_sim_t)[2,2,1]
simulation_predictions1[i,3] <- rcov(CGARCH_opt_sim_t)[3,3,1]
simulation_predictions1[i,4] <- rcov(CGARCH_opt_sim_t)[1,2,1]
simulation_predictions1[i,5] <- rcov(CGARCH_opt_sim_t)[1,3,1]
simulation_predictions1[i,6] <- rcov(CGARCH_opt_sim_t)[2,3,1]
simulation_predictions1[i,7] <- rcor(CGARCH_opt_sim_t)[1,2,1]
simulation_predictions1[i,8] <- rcor(CGARCH_opt_sim_t)[1,3,1]
simulation_predictions1[i,9] <- rcor(CGARCH_opt_sim_t)[2,3,1]
simulation_predictions2[i,1] <- rcov(CGARCH_opt_sim_n)[1,1,1]
simulation_predictions2[i,2] <- rcov(CGARCH_opt_sim_n)[2,2,1]
simulation_predictions2[i,3] <- rcov(CGARCH_opt_sim_n)[3,3,1]
simulation_predictions2[i,4] <- rcov(CGARCH_opt_sim_n)[1,2,1]
simulation_predictions2[i,5] <- rcov(CGARCH_opt_sim_n)[1,3,1]
simulation_predictions2[i,6] <- rcov(CGARCH_opt_sim_n)[2,3,1]
simulation_predictions2[i,7] <- rcor(CGARCH_opt_sim_n)[1,2,1]
simulation_predictions2[i,8] <- rcor(CGARCH_opt_sim_n)[1,3,1]
simulation_predictions2[i,9] <- rcor(CGARCH_opt_sim_n)[2,3,1]
}
}
end_time <- Sys.time()
start_time - end_time # Approx 8 hours.
# Portfolio with Simulated Data
n1=0
n2=nrow(data_pred)
k=3 #Number of asssets
t=matrix(,n2,k)
n=matrix(,n2,k)
perf=matrix(,n2,3)
for (i in (n1+1):(n1+n2))
{
t[i-n1,] = solve.QP(Dmat=array(c(simulation_predictions1[i,1], simulation_predictions1[i,4], simulation_predictions1[i,5], simulation_predictions1[i,4], simulation_predictions1[i,2], simulation_predictions1[i,6], simulation_predictions1[i,5], simulation_predictions1[i,6], simulation_predictions1[i,3]), dim = c(3,3)), dvec=array(0, dim = c(k,1)), Amat=t(array(1, dim = c(1,k))), bvec=1, meq = 1)$solution #Global minimum variance portfolio
n[i-n1,] = solve.QP(Dmat=array(c(simulation_predictions2[i,1], simulation_predictions2[i,4], simulation_predictions2[i,5], simulation_predictions2[i,4], simulation_predictions2[i,2], simulation_predictions2[i,6], simulation_predictions2[i,5], simulation_predictions2[i,6], simulation_predictions2[i,3]), dim = c(3,3)), dvec=array(0, dim = c(k,1)), Amat=t(array(1, dim = c(1,k))), bvec=1, meq = 1)$solution
}
# Exporting data
library("writexl")
write_xlsx(rolling_predictions1,"C:/Users/Lazar/Desktop/Financial Volatility/Assignment/DCC_opt_spec.xlsx")
write_xlsx(rolling_predictions2,"C:/Users/Lazar/Desktop/Financial Volatility/Assignment/DCC_opt_a_spec.xlsx")
write_xlsx(rolling_predictions3,"C:/Users/Lazar/Desktop/Financial Volatility/Assignment/DCC11spec.xlsx")
data <- data_full[1:which(data_full$DT == '2019-10-31 17:00:00'),]
dcc_opt_fit_results <- data.frame(data = matrix(NA, nrow = nrow(data), ncol = 9))
dcc_opt_a_fit_results <- data.frame(data = matrix(NA, nrow = nrow(data), ncol = 9))
dcc11fit_results <-data.frame(data = matrix(NA, nrow = nrow(data), ncol = 9))
dcc_opt_fit_results$DT <- data$DT
dcc_opt_a_fit_results$DT <- data$DT
dcc11fit_results$DT <- data$DT
colnames(dcc_opt_fit_results) <- c('varEEM', 'varSPY', 'varEZU', 'cov(EEM,SPY)', 'cov(EEM, EZU)', 'cov(SPY, EZU)', 'cor(EEM,SPY)', 'cor(EEM, EZU)', 'cor(SPY, EZU)', 'DT')
colnames(dcc_opt_a_fit_results) <- c('varEEM', 'varSPY', 'varEZU', 'cov(EEM,SPY)', 'cov(EEM, EZU)', 'cov(SPY, EZU)', 'cor(EEM,SPY)', 'cor(EEM, EZU)', 'cor(SPY, EZU)', 'DT')
colnames(dcc11fit_results) <- c('varEEM', 'varSPY', 'varEZU', 'cov(EEM,SPY)', 'cov(EEM, EZU)', 'cov(SPY, EZU)', 'cor(EEM,SPY)', 'cor(EEM, EZU)', 'cor(SPY, EZU)', 'DT')
dcc_opt_fit_results$varEEM <- rcov(dcc_opt_fit)[1,1,]
dcc_opt_fit_results$varSPY <- rcov(dcc_opt_fit)[2,2,]
dcc_opt_fit_results$varEZU <- rcov(dcc_opt_fit)[3,3,]
dcc_opt_fit_results$`cov(EEM,SPY)` <- rcov(dcc_opt_fit)[1,2,]
dcc_opt_fit_results$`cov(EEM, EZU)` <- rcov(dcc_opt_fit)[1,3,]
dcc_opt_fit_results$`cov(SPY, EZU)` <- rcov(dcc_opt_fit)[2,3,]
dcc_opt_fit_results$`cor(EEM,SPY)` <- rcor(dcc_opt_fit)[1,2,]
dcc_opt_fit_results$`cor(EEM, EZU)` <- rcor(dcc_opt_fit)[1,3,]
dcc_opt_fit_results$`cor(SPY, EZU)` <- rcor(dcc_opt_fit)[2,3,]
dcc_opt_a_fit_results$varEEM <- rcov(dcc_opt_a_fit)[1,1,]
dcc_opt_a_fit_results$varSPY <- rcov(dcc_opt_a_fit)[2,2,]
dcc_opt_a_fit_results$varEZU <- rcov(dcc_opt_a_fit)[3,3,]
dcc_opt_a_fit_results$`cov(EEM,SPY)` <- rcov(dcc_opt_a_fit)[1,2,]
dcc_opt_a_fit_results$`cov(EEM, EZU)` <- rcov(dcc_opt_a_fit)[1,3,]
dcc_opt_a_fit_results$`cov(SPY, EZU)` <- rcov(dcc_opt_a_fit)[2,3,]
dcc_opt_a_fit_results$`cor(EEM,SPY)` <- rcor(dcc_opt_a_fit)[1,2,]
dcc_opt_a_fit_results$`cor(EEM, EZU)` <- rcor(dcc_opt_a_fit)[1,3,]
dcc_opt_a_fit_results$`cor(SPY, EZU)` <- rcor(dcc_opt_a_fit)[2,3,]
dcc11fit_results$varEEM <- rcov(dcc11fit)[1,1,]
dcc11fit_results$varSPY <- rcov(dcc11fit)[2,2,]
dcc11fit_results$varEZU <- rcov(dcc11fit)[3,3,]
dcc11fit_results$`cov(EEM,SPY)` <- rcov(dcc11fit)[1,2,]
dcc11fit_results$`cov(EEM, EZU)` <- rcov(dcc11fit)[1,3,]
dcc11fit_results$`cov(SPY, EZU)` <- rcov(dcc11fit)[2,3,]
dcc11fit_results$`cor(EEM,SPY)` <- rcor(dcc11fit)[1,2,]
dcc11fit_results$`cor(EEM, EZU)` <- rcor(dcc11fit)[1,3,]
dcc11fit_results$`cor(SPY, EZU)` <- rcor(dcc11fit)[2,3,]
write_xlsx(dcc_opt_fit_results,"C:/Users/Lazar/Desktop/Financial Volatility/Assignment/dcc_opt_fit_results.xlsx")
write_xlsx(dcc_opt_a_fit_results,"C:/Users/Lazar/Desktop/Financial Volatility/Assignment/dcc_opt_a_fit_results.xlsx")
write_xlsx(dcc11fit_results,"C:/Users/Lazar/Desktop/Financial Volatility/Assignment/dcc11fit_results.xlsx")
a <- (data.frame(data = a))
b <- (data.frame(data = b))
d <- (data.frame(data = d))
t <- (data.frame(data = t))
n <- (data.frame(data = n))
colnames(a) <- c('EEM', 'SPY', 'EZU')
colnames(b) <- c('EEM', 'SPY', 'EZU')
colnames(d) <- c('EEM', 'SPY', 'EZU')
colnames(t) <- c('EEM', 'SPY', 'EZU')
colnames(n) <- c('EEM', 'SPY', 'EZU')
write_xlsx(a,"C:/Users/Lazar/Desktop/Financial Volatility/Assignment/dcc_opt_weights.xlsx")
write_xlsx(b,"C:/Users/Lazar/Desktop/Financial Volatility/Assignment/dcc_opt_a_weights.xlsx")
write_xlsx(d,"C:/Users/Lazar/Desktop/Financial Volatility/Assignment/dcc11_weights.xlsx")
write_xlsx(t,"C:/Users/Lazar/Desktop/Financial Volatility/Assignment/simulation_predictions1_weights.xlsx")
write_xlsx(n,"C:/Users/Lazar/Desktop/Financial Volatility/Assignment/simulation_predictions2_weights.xlsx")
library("writexl")
write_xlsx(simulation_predictions1,"C:/Users/Lazar/Desktop/Financial Volatility/Assignment/simulation_predictions1.xlsx")
write_xlsx(simulation_predictions2,"C:/Users/Lazar/Desktop/Financial Volatility/Assignment/simulation_predictions2.xlsx")
|
install.packages("e1071")
library(e1071)
getwd()
b<-read.csv("bayes.csv")
b
str(b)
class(b)
testset<-data.frame(Age="<=30", Income="Medium", JobSatisfaction="No", Desire="Fair", Enrolls="")
testset
b<-rbind(b, testset)
b
traindata<-as.data.frame(b[1:14,])
testdata<-as.data.frame(b[15,])
traindata
testdata
bayesmodel<-naiveBayes(Enrolls ~ Age + Income + JobSatisfaction + Desire, traindata)
bayesmodel
results <-predict(bayesmodel, testdata)
results
| /LAB 9/dsr-prog_1.R | no_license | shiva807/1BM17CS096_DSR | R | false | false | 451 | r | install.packages("e1071")
library(e1071)
getwd()
b<-read.csv("bayes.csv")
b
str(b)
class(b)
testset<-data.frame(Age="<=30", Income="Medium", JobSatisfaction="No", Desire="Fair", Enrolls="")
testset
b<-rbind(b, testset)
b
traindata<-as.data.frame(b[1:14,])
testdata<-as.data.frame(b[15,])
traindata
testdata
bayesmodel<-naiveBayes(Enrolls ~ Age + Income + JobSatisfaction + Desire, traindata)
bayesmodel
results <-predict(bayesmodel, testdata)
results
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/extract.R
\name{hourly_to_daily}
\alias{hourly_to_daily}
\title{Convert hourly to daily frequency}
\usage{
hourly_to_daily(txt)
}
\arguments{
\item{txt}{String of the form 'every n hours'}
}
\value{
An equivalent string of the form 'x / day'
}
\description{
Convert hourly to daily frequency
}
| /man/hourly_to_daily.Rd | permissive | mhatrep/doseminer | R | false | true | 372 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/extract.R
\name{hourly_to_daily}
\alias{hourly_to_daily}
\title{Convert hourly to daily frequency}
\usage{
hourly_to_daily(txt)
}
\arguments{
\item{txt}{String of the form 'every n hours'}
}
\value{
An equivalent string of the form 'x / day'
}
\description{
Convert hourly to daily frequency
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/enumeration_units.R
\name{school_districts}
\alias{school_districts}
\title{Download a school district shapefile into R}
\usage{
school_districts(state, type = "unified", cb = FALSE, year = NULL, ...)
}
\arguments{
\item{state}{The two-digit FIPS code (string) of the state you want. Can also
be state name or state abbreviation.}
\item{type}{Specify whether you want to return a unified school district (the default, \code{'unified'}),
an elementary school district (\code{'elementary'}), or a secondary school district (\code{'secondary'}).
Please note: elementary and secondary school districts do not exist in all states}
\item{cb}{if TRUE, download a generalized (1:500k)
school districts file. Defaults to FALSE (the most detailed TIGER/Line file)}
\item{year}{the data year; defaults to 2018}
\item{...}{arguments to be passed to the underlying `load_tiger` function, which is not exported.
Options include \code{class}, which can be set to \code{"sp"} (the default) or \code{"sf"} to
request sp or sf class objects, and \code{refresh}, which specifies whether or
not to re-download shapefiles (defaults to \code{FALSE}).}
}
\description{
From the US Census Bureau (see link for source):
School Districts are single-purpose administrative units within which local officials provide public
educational services for the area's residents. The Census Bureau obtains school district boundaries,
names, local education agency codes, grade ranges, and school district levels biennially from state
education officials. The Census Bureau collects this information for the primary purpose of providing the
U.S. Department of Education with annual estimates of the number of children in poverty within each
school district, county, and state. This information serves as the basis for the Department of Education to
determine the annual allocation of Title I funding to states and school districts.
}
\details{
The Census Bureau creates pseudo-unified school districts for areas in which unified school districts do
not exist. Additionally, elementary and secondary school districts do not exist in all states.
Please see the link for more information on how the Census Bureau creates the school district shapefiles.
}
\examples{
\dontrun{
library(tigris)
library(leaflet)
schools <- school_districts("Maine")
leaflet(schools) \%>\%
addProviderTiles("CartoDB.Positron") \%>\%
addPolygons(fillColor = "white",
color = "black",
weight = 0.5)
}
}
\seealso{
\url{http://www2.census.gov/geo/pdfs/maps-data/data/tiger/tgrshp2018/TGRSHP2018_TechDoc.pdf}
Other general area functions:
\code{\link{block_groups}()},
\code{\link{blocks}()},
\code{\link{counties}()},
\code{\link{county_subdivisions}()},
\code{\link{places}()},
\code{\link{pumas}()},
\code{\link{states}()},
\code{\link{tracts}()},
\code{\link{zctas}()}
}
\concept{general area functions}
| /man/school_districts.Rd | no_license | kuriwaki/tigris | R | false | true | 2,961 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/enumeration_units.R
\name{school_districts}
\alias{school_districts}
\title{Download a school district shapefile into R}
\usage{
school_districts(state, type = "unified", cb = FALSE, year = NULL, ...)
}
\arguments{
\item{state}{The two-digit FIPS code (string) of the state you want. Can also
be state name or state abbreviation.}
\item{type}{Specify whether you want to return a unified school district (the default, \code{'unified'}),
an elementary school district (\code{'elementary'}), or a secondary school district (\code{'secondary'}).
Please note: elementary and secondary school districts do not exist in all states}
\item{cb}{if TRUE, download a generalized (1:500k)
school districts file. Defaults to FALSE (the most detailed TIGER/Line file)}
\item{year}{the data year; defaults to 2018}
\item{...}{arguments to be passed to the underlying `load_tiger` function, which is not exported.
Options include \code{class}, which can be set to \code{"sp"} (the default) or \code{"sf"} to
request sp or sf class objects, and \code{refresh}, which specifies whether or
not to re-download shapefiles (defaults to \code{FALSE}).}
}
\description{
From the US Census Bureau (see link for source):
School Districts are single-purpose administrative units within which local officials provide public
educational services for the area's residents. The Census Bureau obtains school district boundaries,
names, local education agency codes, grade ranges, and school district levels biennially from state
education officials. The Census Bureau collects this information for the primary purpose of providing the
U.S. Department of Education with annual estimates of the number of children in poverty within each
school district, county, and state. This information serves as the basis for the Department of Education to
determine the annual allocation of Title I funding to states and school districts.
}
\details{
The Census Bureau creates pseudo-unified school districts for areas in which unified school districts do
not exist. Additionally, elementary and secondary school districts do not exist in all states.
Please see the link for more information on how the Census Bureau creates the school district shapefiles.
}
\examples{
\dontrun{
library(tigris)
library(leaflet)
schools <- school_districts("Maine")
leaflet(schools) \%>\%
addProviderTiles("CartoDB.Positron") \%>\%
addPolygons(fillColor = "white",
color = "black",
weight = 0.5)
}
}
\seealso{
\url{http://www2.census.gov/geo/pdfs/maps-data/data/tiger/tgrshp2018/TGRSHP2018_TechDoc.pdf}
Other general area functions:
\code{\link{block_groups}()},
\code{\link{blocks}()},
\code{\link{counties}()},
\code{\link{county_subdivisions}()},
\code{\link{places}()},
\code{\link{pumas}()},
\code{\link{states}()},
\code{\link{tracts}()},
\code{\link{zctas}()}
}
\concept{general area functions}
|
% Generated by roxygen2 (4.1.0.9001): do not edit by hand
% Please edit documentation in R/twoParamPlot.R
\name{twoParamPlot}
\alias{twoParamPlot}
\title{Used to plot the ZOI and AUC results}
\usage{
twoParamPlot(projectName, type, ZOI = "ZOI20", AUC = "fAUC20",
ZOImin = 30, tolMax = 100, width = 6, height = 4, xlabels = "line",
xlabAngle = NA, order = NA, orderFactor = "line", overwrite = TRUE,
savePDF = TRUE, popUp = TRUE, barplot = TRUE)
}
\arguments{
\item{projectName}{the short name to be used for the project}
\item{ZOI}{specify the ZOI parameter to be plotted ("ZOI20", "ZOI50" or "ZOI80"), default = "ZOI20".}
\item{AUC}{specify the AUC parameterto be plotted ("fAUC20", "fAUC50" or "fAUC80"), default = "fAUC20".}
\item{ZOImin}{minimum distance from the disk for resistance plot (minimum y axis value), default = 30.}
\item{tolMax}{maximum y axis value for tolerance plot. Note tolerance is coverted to a perent, default = 100.}
\item{width}{a numeric value indicating the width of the pdf file generated}
\item{height}{a numeric value indicating the height of the pdf file generated}
\item{xlabels}{either a vector containing the desired x-axis labels, or a single value indicating the column name that contains the values to use (likely either the 'line' column or one of the type columns), default = "line".}
\item{xlabAngle}{indicates whether to print the x axis labels on a angle, if a number is provided this will be the angle used. The defauilt is not to plot on an angle, default = NA.}
\item{order}{can be either "factor" or "custom". If custom, supply a numberial vector the same length as the dataframe to indicate the desired order. If factor, supply the column name in \code{ordeFactor} to be used to factor.}
\item{orderFactor}{if \code{order = "factor"} supply the column name to be used to factor.}
\item{overwrite}{a logical value indicating whether to overwrite existing figures created on the same day for the same project name}
\item{savePDF}{a logical value indicating whether to save a PDF file or open a new quartz. Defaults to TRUE.}
\item{popUp}{a logical value indicating whether to pop up the figure after it has been created}
\item{barplot}{whether to plot tolerance as a barplot (barplot = TRUE) or dotplot (barplot = FALSE), default = TRUE. Only possible when \code{type = "ag"}}
\item{ZOImin}{minimum distance from the disk for resistance plot (minimum y axis value), default = 30.}
}
\value{
Either a pdf figure figure (projectName_ZOI-fAUC.pdf) saved to the 'figures' directory or a figure on screen
}
\description{
This function creates a pdf figure of plots showing the results of the imageJ analysis for resistance (ZOI) and tolerance (AUC).
}
\details{
Basic parameter plotting functions to plot ZOI and fAUC parameter plots. Input can be the dataframe from either \code{\link{createDataframe}} \code{type="df"} or from \code{\link{aggregateData}} \code{type=="ag"}. The default is to plot ZOI as a dotplot and tolerance as a barplot, though tolerance can also be plotted as a dotplot with \code{barplot=FALSE} (currently there is not support to plot ZOI as a barplot in this framework).
}
\author{
Aleeza C. Gerstein
}
| /man/twoParamPlot.Rd | no_license | yoavram/diskImageR | R | false | false | 3,193 | rd | % Generated by roxygen2 (4.1.0.9001): do not edit by hand
% Please edit documentation in R/twoParamPlot.R
\name{twoParamPlot}
\alias{twoParamPlot}
\title{Used to plot the ZOI and AUC results}
\usage{
twoParamPlot(projectName, type, ZOI = "ZOI20", AUC = "fAUC20",
ZOImin = 30, tolMax = 100, width = 6, height = 4, xlabels = "line",
xlabAngle = NA, order = NA, orderFactor = "line", overwrite = TRUE,
savePDF = TRUE, popUp = TRUE, barplot = TRUE)
}
\arguments{
\item{projectName}{the short name to be used for the project}
\item{ZOI}{specify the ZOI parameter to be plotted ("ZOI20", "ZOI50" or "ZOI80"), default = "ZOI20".}
\item{AUC}{specify the AUC parameterto be plotted ("fAUC20", "fAUC50" or "fAUC80"), default = "fAUC20".}
\item{ZOImin}{minimum distance from the disk for resistance plot (minimum y axis value), default = 30.}
\item{tolMax}{maximum y axis value for tolerance plot. Note tolerance is coverted to a perent, default = 100.}
\item{width}{a numeric value indicating the width of the pdf file generated}
\item{height}{a numeric value indicating the height of the pdf file generated}
\item{xlabels}{either a vector containing the desired x-axis labels, or a single value indicating the column name that contains the values to use (likely either the 'line' column or one of the type columns), default = "line".}
\item{xlabAngle}{indicates whether to print the x axis labels on a angle, if a number is provided this will be the angle used. The defauilt is not to plot on an angle, default = NA.}
\item{order}{can be either "factor" or "custom". If custom, supply a numberial vector the same length as the dataframe to indicate the desired order. If factor, supply the column name in \code{ordeFactor} to be used to factor.}
\item{orderFactor}{if \code{order = "factor"} supply the column name to be used to factor.}
\item{overwrite}{a logical value indicating whether to overwrite existing figures created on the same day for the same project name}
\item{savePDF}{a logical value indicating whether to save a PDF file or open a new quartz. Defaults to TRUE.}
\item{popUp}{a logical value indicating whether to pop up the figure after it has been created}
\item{barplot}{whether to plot tolerance as a barplot (barplot = TRUE) or dotplot (barplot = FALSE), default = TRUE. Only possible when \code{type = "ag"}}
\item{ZOImin}{minimum distance from the disk for resistance plot (minimum y axis value), default = 30.}
}
\value{
Either a pdf figure figure (projectName_ZOI-fAUC.pdf) saved to the 'figures' directory or a figure on screen
}
\description{
This function creates a pdf figure of plots showing the results of the imageJ analysis for resistance (ZOI) and tolerance (AUC).
}
\details{
Basic parameter plotting functions to plot ZOI and fAUC parameter plots. Input can be the dataframe from either \code{\link{createDataframe}} \code{type="df"} or from \code{\link{aggregateData}} \code{type=="ag"}. The default is to plot ZOI as a dotplot and tolerance as a barplot, though tolerance can also be plotted as a dotplot with \code{barplot=FALSE} (currently there is not support to plot ZOI as a barplot in this framework).
}
\author{
Aleeza C. Gerstein
}
|
vcov.heckit5rob <-
function(object, ...)
{
ret=list()
ret$regime1=object$vcov1
ret$regime2=object$vcov2
return(ret)
}
| /R/vcov.heckit5rob.R | no_license | cran/ssmrob | R | false | false | 134 | r | vcov.heckit5rob <-
function(object, ...)
{
ret=list()
ret$regime1=object$vcov1
ret$regime2=object$vcov2
return(ret)
}
|
library(argoFloats)
options(warn=1)
data(indexSynthetic)
n <- 500
s <- subset(index, 1:n)
p <- getProfiles(s)
a <- readProfiles(p)
cat("\n\n\n")
cat("+--------------------------------------------------+\n")
cat("| 1. Discover names of things in first netcdf file |\n")
cat("+--------------------------------------------------+\n")
cat("\n\n\n")
f <- a[[1]][["filename"]]
library(ncdf4)
n <- nc_open(f)
print(n)
nc_close(n)
cat("\n\n\n")
cat("+-----------------------------+\n")
cat("| 2. Table of some properties |\n")
cat("+-----------------------------+\n")
cat("\n\n\n")
filename <- sapply(a[["profile"]], function(x) gsub("^.*/argo/", "", x[["filename"]][1]))
df <- data.frame(filename=filename,
isRealtime=grepl("^[SM]{0,1}R", filename),
dataMode=sapply(a[["profile"]], function(x) x[["dataMode"]][1]),
allNAp=sapply(a[["profile"]], function(x) all(is.na(x[["pressureAdjusted"]]))),
allNAS=sapply(a[["profile"]], function(x) all(is.na(x[["salinityAdjusted"]]))),
allNAT=sapply(a[["profile"]], function(x) all(is.na(x[["temperatureAdjusted"]]))),
allNAO=sapply(a[["profile"]], function(x) all(is.na(x[["oxygenAdjusted"]]))),
havePDM=unlist(lapply(a[["profile"]], function(x) !is.null(x[["PARAMETER_DATA_MODE"]]))))
options(width=150)
print(df)
| /sandbox/dek/09_adjust/09_adjust_05.R | no_license | chandra04/argoFloats | R | false | false | 1,370 | r | library(argoFloats)
options(warn=1)
data(indexSynthetic)
n <- 500
s <- subset(index, 1:n)
p <- getProfiles(s)
a <- readProfiles(p)
cat("\n\n\n")
cat("+--------------------------------------------------+\n")
cat("| 1. Discover names of things in first netcdf file |\n")
cat("+--------------------------------------------------+\n")
cat("\n\n\n")
f <- a[[1]][["filename"]]
library(ncdf4)
n <- nc_open(f)
print(n)
nc_close(n)
cat("\n\n\n")
cat("+-----------------------------+\n")
cat("| 2. Table of some properties |\n")
cat("+-----------------------------+\n")
cat("\n\n\n")
filename <- sapply(a[["profile"]], function(x) gsub("^.*/argo/", "", x[["filename"]][1]))
df <- data.frame(filename=filename,
isRealtime=grepl("^[SM]{0,1}R", filename),
dataMode=sapply(a[["profile"]], function(x) x[["dataMode"]][1]),
allNAp=sapply(a[["profile"]], function(x) all(is.na(x[["pressureAdjusted"]]))),
allNAS=sapply(a[["profile"]], function(x) all(is.na(x[["salinityAdjusted"]]))),
allNAT=sapply(a[["profile"]], function(x) all(is.na(x[["temperatureAdjusted"]]))),
allNAO=sapply(a[["profile"]], function(x) all(is.na(x[["oxygenAdjusted"]]))),
havePDM=unlist(lapply(a[["profile"]], function(x) !is.null(x[["PARAMETER_DATA_MODE"]]))))
options(width=150)
print(df)
|
library(devtools)
library(data.table)
#install.packages("dplyr")
library(dplyr)
#install.packages("tidyr")
library(tidyr)
#install.packages("tibble")
library(tibble)
#devtools::install_github("hadley/tidyverse")
library(tidyverse)
library(plyr)
library(ggplot2)
## install_github("vqv/ggbiplot")
library(ggbiplot)
### setwd('~/Dropbox/tese_fabio/dados/')
library(readxl)
div_meso_emp_06 <- read_excel("div_meso_emp_06.xls")
View(div_meso_emp_06)
str(div_meso_emp_06)
div_meso_06 <- as.data.frame(div_meso_emp_06)
attributes(div_meso_06)
head(div_meso_06)
View(div_meso_06)
rownames(div_meso_06) <- div_meso_06[,1]
attributes(div_meso_06)
str(div_meso_06)
excluir <- c("CNAE 2.0 Div", "{ñ class}", drop = FALSE)
div_meso_06 <- div_meso_06[,!(names(div_meso_06)%in% excluir)]
#div_meso_06 <- div_meso_06[-88, ]
rownames(div_meso_06)
colnames(div_meso_06)
# índice de especialização: quociente locacional (QL)
quociente_loc1 <- (div_meso_06[ , ] / div_meso_06[ ,'Total'])
quociente_loc1t <- t(quociente_loc1)
quociente_loc1t <- as.data.frame(quociente_loc1t)
quociente_loc2 <- (div_meso_06['Total', ] / div_meso_06[88,138])
quociente_loc2t <- t(quociente_loc2)
quociente_loc2t <- as.data.frame(quociente_loc2t)
quociente_loc <- ((quociente_loc1t[ , ]) / quociente_loc2t[ , ])
quociente_loc
quociente_loc <- t(quociente_loc)
quociente_loc <- as.data.frame(quociente_loc)
# índice de Hirschman-Herfindahl modificado (HHm)
hhm1 <- (div_meso_06[ , ] / div_meso_06[ ,'Total'])
hhm1t <- t(hhm1)
hhm1 <- as.data.frame(hhm1)
hhm2 <- (div_meso_06['Total', ] / div_meso_06[88,138])
hhm2t <- t(hhm2)
hhm2t <- as.data.frame(hhm2t)
hhm <- ((hhm1t[ , ]) - hhm2t[ , ])
hhm
hhm <- t(hhm)
hhm <- as.data.frame(hhm)
# índice de participação relativa do emprego (PR)
pr <- ((div_meso_06[ , ]) / div_meso_06[ ,'Total'])
pr
# análise multivariada de componentes principais
icn <- rbind(quociente_loc['26', ], hhm['26', ], pr['26', ])
icn
pca_icn <- t(icn)
pca_icn <- as.data.frame(pca_icn)
pca_icn <- pca_icn[-138, ]
#pca_icn <- pca_icn %>% slice(137:n())
pca_icn
icn_pca <- prcomp(pca_icn, center = TRUE, scale. = TRUE)
icn_pca
summary(icn_pca)
ggbiplot(icn_pca)
### Salvando o Plot
dev.copy(png,'Figures/icn_pca.png')
dev.off()
str(icn_pca)
pca_icn2 <- princomp(pca_icn, scores=TRUE, cor=TRUE)
pca_icn2
summary(pca_icn2)
# Loadings of principal components
loadings(pca_icn2)
pca_icn2$loadings
# Scree plot of eigenvalues
plot(pca_icn2)
dev.copy(png,'Figures/pca_icn2.png')
dev.off()
screeplot(pca_icn2, type="line", main="Scree Plot")
dev.copy(png,'Figures/scrplt_pca_icn2.png')
dev.off()
# Biplot of score variables
biplot(pca_icn2)
dev.copy(png,'Figures/bplt_pca_icn2.png')
dev.off()
# Scores of the components
pca_icn2$scores[1:137, ]
| /rotina_pca_icn.R | no_license | fabiomourao/rotina_pca_icn | R | false | false | 2,757 | r | library(devtools)
library(data.table)
#install.packages("dplyr")
library(dplyr)
#install.packages("tidyr")
library(tidyr)
#install.packages("tibble")
library(tibble)
#devtools::install_github("hadley/tidyverse")
library(tidyverse)
library(plyr)
library(ggplot2)
## install_github("vqv/ggbiplot")
library(ggbiplot)
### setwd('~/Dropbox/tese_fabio/dados/')
library(readxl)
div_meso_emp_06 <- read_excel("div_meso_emp_06.xls")
View(div_meso_emp_06)
str(div_meso_emp_06)
div_meso_06 <- as.data.frame(div_meso_emp_06)
attributes(div_meso_06)
head(div_meso_06)
View(div_meso_06)
rownames(div_meso_06) <- div_meso_06[,1]
attributes(div_meso_06)
str(div_meso_06)
excluir <- c("CNAE 2.0 Div", "{ñ class}", drop = FALSE)
div_meso_06 <- div_meso_06[,!(names(div_meso_06)%in% excluir)]
#div_meso_06 <- div_meso_06[-88, ]
rownames(div_meso_06)
colnames(div_meso_06)
# índice de especialização: quociente locacional (QL)
quociente_loc1 <- (div_meso_06[ , ] / div_meso_06[ ,'Total'])
quociente_loc1t <- t(quociente_loc1)
quociente_loc1t <- as.data.frame(quociente_loc1t)
quociente_loc2 <- (div_meso_06['Total', ] / div_meso_06[88,138])
quociente_loc2t <- t(quociente_loc2)
quociente_loc2t <- as.data.frame(quociente_loc2t)
quociente_loc <- ((quociente_loc1t[ , ]) / quociente_loc2t[ , ])
quociente_loc
quociente_loc <- t(quociente_loc)
quociente_loc <- as.data.frame(quociente_loc)
# índice de Hirschman-Herfindahl modificado (HHm)
hhm1 <- (div_meso_06[ , ] / div_meso_06[ ,'Total'])
hhm1t <- t(hhm1)
hhm1 <- as.data.frame(hhm1)
hhm2 <- (div_meso_06['Total', ] / div_meso_06[88,138])
hhm2t <- t(hhm2)
hhm2t <- as.data.frame(hhm2t)
hhm <- ((hhm1t[ , ]) - hhm2t[ , ])
hhm
hhm <- t(hhm)
hhm <- as.data.frame(hhm)
# índice de participação relativa do emprego (PR)
pr <- ((div_meso_06[ , ]) / div_meso_06[ ,'Total'])
pr
# análise multivariada de componentes principais
icn <- rbind(quociente_loc['26', ], hhm['26', ], pr['26', ])
icn
pca_icn <- t(icn)
pca_icn <- as.data.frame(pca_icn)
pca_icn <- pca_icn[-138, ]
#pca_icn <- pca_icn %>% slice(137:n())
pca_icn
icn_pca <- prcomp(pca_icn, center = TRUE, scale. = TRUE)
icn_pca
summary(icn_pca)
ggbiplot(icn_pca)
### Salvando o Plot
dev.copy(png,'Figures/icn_pca.png')
dev.off()
str(icn_pca)
pca_icn2 <- princomp(pca_icn, scores=TRUE, cor=TRUE)
pca_icn2
summary(pca_icn2)
# Loadings of principal components
loadings(pca_icn2)
pca_icn2$loadings
# Scree plot of eigenvalues
plot(pca_icn2)
dev.copy(png,'Figures/pca_icn2.png')
dev.off()
screeplot(pca_icn2, type="line", main="Scree Plot")
dev.copy(png,'Figures/scrplt_pca_icn2.png')
dev.off()
# Biplot of score variables
biplot(pca_icn2)
dev.copy(png,'Figures/bplt_pca_icn2.png')
dev.off()
# Scores of the components
pca_icn2$scores[1:137, ]
|
source("db/src/db_connection.R")
projects <- jsonlite::read_json("db/data/projects.json")
con <- get_db_conn()
for (i in 1:length(projects)) {
# DROP (IF EXISTS) AND CREATE DB GROUP
group <- projects[[i]]$project
drop_group_sql <- paste0("DROP GROUP IF EXISTS ", group)
DBI::dbGetQuery(con, drop_group_sql)
create_group_sql <- paste0("CREATE GROUP ", group)
create_group_result <- DBI::dbGetQuery(con, create_group_sql)
if (is.null(create_group_result) == TRUE) {
print("PROBLEM WITH CREATE GROUP")
} else {
print(paste0("Creation of group ", group, " successful!"))
}
# ASSIGN SCHEMA PERMISSIONS TO GROUP
for (k in 1:length(projects[[i]]$schemas)) {
schema <- projects[[i]]$schemas[[k]]
# CREATE SCHEMA IF NOT EXISTS
schema_create_sql <- paste0("CREATE SCHEMA IF NOT EXISTS ", schema)
create_schema_result <- DBI::dbGetQuery(con, schema_create_sql)
# ASSIGN PERMISSIONS
schema_permissions_sql <- paste0("GRANT ALL ON SCHEMA ", schema, " TO ", group)
schema_permissions_result <- DBI::dbGetQuery(con, schema_permissions_sql)
schema_table_permissions_sql <- paste0("GRANT ALL ON ALL TABLES IN SCHEMA ", schema, " TO ", group)
schema_table_permissions_result <- DBI::dbGetQuery(con, schema_table_permissions_sql)
}
# ASSIGN USERS TO DB GROUP
for (j in 1:length(projects[[i]]$members)) {
user <- projects[[i]]$members[[j]]
# CREATE USER IF NOT EXISTS
user_exists_sql <- paste0("SELECT 1 FROM pg_roles WHERE rolname='", user, "'")
user_exists <- DBI::dbGetQuery(con, user_exists_sql)
if (nrow(user_exists) > 0) {
print(paste0("User ", user, " already exists."))
} else {
create_user_sql <- paste0("CREATE USER ", user," WITH PASSWORD '", user, "'")
create_user_result <- DBI::dbGetQuery(con, create_user_sql)
if (is.null(create_group_result) == TRUE) {
print("PROBLEM WITH CREATE USER")
} else {
print(paste0("Creation of user ", user, " successful!"))
}
}
# ASSIGN USER TO GROUP
assign_group_sql <- paste0("GRANT ", group, " TO ", user)
assign_group_result <- DBI::dbGetQuery(con, assign_group_sql)
if (is.null(assign_group_result) == TRUE) {
print("PROBLEM WITH ASSIGN USER")
} else {
print(paste0("User ", user, " assigned to group ", group))
}
}
}
DBI::dbDisconnect(con)
| /db/src/create_db_groups.R | no_license | uva-bi-sdad/infrastructure | R | false | false | 2,370 | r | source("db/src/db_connection.R")
projects <- jsonlite::read_json("db/data/projects.json")
con <- get_db_conn()
for (i in 1:length(projects)) {
# DROP (IF EXISTS) AND CREATE DB GROUP
group <- projects[[i]]$project
drop_group_sql <- paste0("DROP GROUP IF EXISTS ", group)
DBI::dbGetQuery(con, drop_group_sql)
create_group_sql <- paste0("CREATE GROUP ", group)
create_group_result <- DBI::dbGetQuery(con, create_group_sql)
if (is.null(create_group_result) == TRUE) {
print("PROBLEM WITH CREATE GROUP")
} else {
print(paste0("Creation of group ", group, " successful!"))
}
# ASSIGN SCHEMA PERMISSIONS TO GROUP
for (k in 1:length(projects[[i]]$schemas)) {
schema <- projects[[i]]$schemas[[k]]
# CREATE SCHEMA IF NOT EXISTS
schema_create_sql <- paste0("CREATE SCHEMA IF NOT EXISTS ", schema)
create_schema_result <- DBI::dbGetQuery(con, schema_create_sql)
# ASSIGN PERMISSIONS
schema_permissions_sql <- paste0("GRANT ALL ON SCHEMA ", schema, " TO ", group)
schema_permissions_result <- DBI::dbGetQuery(con, schema_permissions_sql)
schema_table_permissions_sql <- paste0("GRANT ALL ON ALL TABLES IN SCHEMA ", schema, " TO ", group)
schema_table_permissions_result <- DBI::dbGetQuery(con, schema_table_permissions_sql)
}
# ASSIGN USERS TO DB GROUP
for (j in 1:length(projects[[i]]$members)) {
user <- projects[[i]]$members[[j]]
# CREATE USER IF NOT EXISTS
user_exists_sql <- paste0("SELECT 1 FROM pg_roles WHERE rolname='", user, "'")
user_exists <- DBI::dbGetQuery(con, user_exists_sql)
if (nrow(user_exists) > 0) {
print(paste0("User ", user, " already exists."))
} else {
create_user_sql <- paste0("CREATE USER ", user," WITH PASSWORD '", user, "'")
create_user_result <- DBI::dbGetQuery(con, create_user_sql)
if (is.null(create_group_result) == TRUE) {
print("PROBLEM WITH CREATE USER")
} else {
print(paste0("Creation of user ", user, " successful!"))
}
}
# ASSIGN USER TO GROUP
assign_group_sql <- paste0("GRANT ", group, " TO ", user)
assign_group_result <- DBI::dbGetQuery(con, assign_group_sql)
if (is.null(assign_group_result) == TRUE) {
print("PROBLEM WITH ASSIGN USER")
} else {
print(paste0("User ", user, " assigned to group ", group))
}
}
}
DBI::dbDisconnect(con)
|
#----------------------------------------------------------------------
# Purpose: This test exercises HDFS operations from R.
#----------------------------------------------------------------------
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source('../h2o-runit.R')
#----------------------------------------------------------------------
# Parameters for the test.
#----------------------------------------------------------------------
# Check if we are running inside the H2O network by seeing if we can touch
# the namenode.
running_inside_h2o = is.running.internal.to.h2o()
if (running_inside_h2o) {
hdfs_name_node = H2O_INTERNAL_HDFS_NAME_NODE
hdfs_iris_file = "/datasets/runit/iris_wheader.csv"
hdfs_iris_dir = "/datasets/runit/iris_test_train"
} else {
stop("Not running on H2O internal network. No access to HDFS.")
}
#----------------------------------------------------------------------
heading("BEGIN TEST")
check.hdfs_basic <- function(conn) {
#----------------------------------------------------------------------
# Single file cases.
#----------------------------------------------------------------------
heading("Testing single file importHDFS")
url <- sprintf("hdfs://%s%s", hdfs_name_node, hdfs_iris_file)
iris.hex <- h2o.importFile(conn, url)
head(iris.hex)
tail(iris.hex)
n <- nrow(iris.hex)
print(n)
if (n != 150) {
stop("nrows is wrong")
}
if (class(iris.hex) != "H2OFrame") {
stop("iris.hex is the wrong type")
}
print ("Import worked")
#----------------------------------------------------------------------
# Directory file cases.
#----------------------------------------------------------------------
heading("Testing directory importHDFS")
url <- sprintf("hdfs://%s%s", hdfs_name_node, hdfs_iris_dir)
iris.dir.hex <- h2o.importFile(conn, url)
head(iris.dir.hex)
tail(iris.dir.hex)
n <- nrow(iris.dir.hex)
print(n)
if (n != 150) {
stop("nrows is wrong")
}
if (class(iris.dir.hex) != "H2OFrame") {
stop("iris.dir.hex is the wrong type")
}
print ("Import worked")
testEnd()
}
doTest("HDFS operations", check.hdfs_basic)
| /h2o-r/tests/testdir_hdfs/runit_HDFS_basic.R | permissive | mrgloom/h2o-3 | R | false | false | 2,194 | r | #----------------------------------------------------------------------
# Purpose: This test exercises HDFS operations from R.
#----------------------------------------------------------------------
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source('../h2o-runit.R')
#----------------------------------------------------------------------
# Parameters for the test.
#----------------------------------------------------------------------
# Check if we are running inside the H2O network by seeing if we can touch
# the namenode.
running_inside_h2o = is.running.internal.to.h2o()
if (running_inside_h2o) {
hdfs_name_node = H2O_INTERNAL_HDFS_NAME_NODE
hdfs_iris_file = "/datasets/runit/iris_wheader.csv"
hdfs_iris_dir = "/datasets/runit/iris_test_train"
} else {
stop("Not running on H2O internal network. No access to HDFS.")
}
#----------------------------------------------------------------------
heading("BEGIN TEST")
check.hdfs_basic <- function(conn) {
#----------------------------------------------------------------------
# Single file cases.
#----------------------------------------------------------------------
heading("Testing single file importHDFS")
url <- sprintf("hdfs://%s%s", hdfs_name_node, hdfs_iris_file)
iris.hex <- h2o.importFile(conn, url)
head(iris.hex)
tail(iris.hex)
n <- nrow(iris.hex)
print(n)
if (n != 150) {
stop("nrows is wrong")
}
if (class(iris.hex) != "H2OFrame") {
stop("iris.hex is the wrong type")
}
print ("Import worked")
#----------------------------------------------------------------------
# Directory file cases.
#----------------------------------------------------------------------
heading("Testing directory importHDFS")
url <- sprintf("hdfs://%s%s", hdfs_name_node, hdfs_iris_dir)
iris.dir.hex <- h2o.importFile(conn, url)
head(iris.dir.hex)
tail(iris.dir.hex)
n <- nrow(iris.dir.hex)
print(n)
if (n != 150) {
stop("nrows is wrong")
}
if (class(iris.dir.hex) != "H2OFrame") {
stop("iris.dir.hex is the wrong type")
}
print ("Import worked")
testEnd()
}
doTest("HDFS operations", check.hdfs_basic)
|
# convert_small_to_large | convert_large_to_small | convert_down_across
i01 <- plate_coords(plate_to = 1536,
data_from = data.frame(y = 1:1536),
data_format = "across")
i02 <- plate_coords(plate_to = 1536,
data_from = data.frame(y = 1:1536),
data_format = "down")
# across to across
i11 <- convert_large_to_small(plate_from = 1536,
plate_to = 384,
data_from = data.frame(y = 1:1536),
in_data_flow = 'across',
out_data_flow = 'across',
is_plate_coords = FALSE)
i11 <- convert_small_to_large(plate_from = 384,
plate_to = 1536,
data_from = i11,
in_data_flow = 'across',
out_data_flow = 'across',
is_plate_coords = TRUE)
# down to down
i21 <- convert_large_to_small(plate_from = 1536,
plate_to = 384,
data_from = data.frame(y = 1:1536),
in_data_flow = 'down',
out_data_flow = 'down',
is_plate_coords = FALSE)
i21 <- convert_small_to_large(plate_from = 384,
plate_to = 1536,
data_from = i21,
in_data_flow = 'down',
out_data_flow = 'down',
is_plate_coords = TRUE)
# across to down
i31 <- convert_large_to_small(plate_from = 1536,
plate_to = 384,
data_from = data.frame(y = 1:1536),
in_data_flow = 'across',
out_data_flow = 'down',
is_plate_coords = FALSE)
i31 <- convert_down_across(plateformat = 384,
data_from = i31,
is_plate_coords = FALSE,
in_data_flow = 'down',
out_data_flow = 'across')
i31 <- convert_small_to_large(plate_from = 384,
plate_to = 1536,
data_from = i31,
in_data_flow = 'across',
out_data_flow = 'across',
is_plate_coords = TRUE)
# down to across
i41 <- convert_large_to_small(plate_from = 1536,
plate_to = 384,
data_from = data.frame(y = 1:1536),
in_data_flow = 'down',
out_data_flow = 'across',
is_plate_coords = FALSE)
i41 <- convert_down_across(plateformat = 384,
data_from = i41,
is_plate_coords = FALSE,
in_data_flow = 'across',
out_data_flow = 'down')
i41 <- convert_small_to_large(plate_from = 384,
plate_to = 1536,
data_from = i41,
in_data_flow = 'down',
out_data_flow = 'down',
is_plate_coords = TRUE)
| /tests/testthat/helper_convert_sl_ls_ad_da.R | permissive | sathishsrinivasank/pinerrordetector | R | false | false | 3,442 | r | # convert_small_to_large | convert_large_to_small | convert_down_across
i01 <- plate_coords(plate_to = 1536,
data_from = data.frame(y = 1:1536),
data_format = "across")
i02 <- plate_coords(plate_to = 1536,
data_from = data.frame(y = 1:1536),
data_format = "down")
# across to across
i11 <- convert_large_to_small(plate_from = 1536,
plate_to = 384,
data_from = data.frame(y = 1:1536),
in_data_flow = 'across',
out_data_flow = 'across',
is_plate_coords = FALSE)
i11 <- convert_small_to_large(plate_from = 384,
plate_to = 1536,
data_from = i11,
in_data_flow = 'across',
out_data_flow = 'across',
is_plate_coords = TRUE)
# down to down
i21 <- convert_large_to_small(plate_from = 1536,
plate_to = 384,
data_from = data.frame(y = 1:1536),
in_data_flow = 'down',
out_data_flow = 'down',
is_plate_coords = FALSE)
i21 <- convert_small_to_large(plate_from = 384,
plate_to = 1536,
data_from = i21,
in_data_flow = 'down',
out_data_flow = 'down',
is_plate_coords = TRUE)
# across to down
i31 <- convert_large_to_small(plate_from = 1536,
plate_to = 384,
data_from = data.frame(y = 1:1536),
in_data_flow = 'across',
out_data_flow = 'down',
is_plate_coords = FALSE)
i31 <- convert_down_across(plateformat = 384,
data_from = i31,
is_plate_coords = FALSE,
in_data_flow = 'down',
out_data_flow = 'across')
i31 <- convert_small_to_large(plate_from = 384,
plate_to = 1536,
data_from = i31,
in_data_flow = 'across',
out_data_flow = 'across',
is_plate_coords = TRUE)
# down to across
i41 <- convert_large_to_small(plate_from = 1536,
plate_to = 384,
data_from = data.frame(y = 1:1536),
in_data_flow = 'down',
out_data_flow = 'across',
is_plate_coords = FALSE)
i41 <- convert_down_across(plateformat = 384,
data_from = i41,
is_plate_coords = FALSE,
in_data_flow = 'across',
out_data_flow = 'down')
i41 <- convert_small_to_large(plate_from = 384,
plate_to = 1536,
data_from = i41,
in_data_flow = 'down',
out_data_flow = 'down',
is_plate_coords = TRUE)
|
library(dplyr)
library(ggplot2)
library(sf)
library(DT)
library(plotly)
library(leaflet)
library(jsonlite)
junco_vulcani <-
st_read(
"https://raw.githubusercontent.com/gf0604-procesamientodatosgeograficos/2021i-datos/main/gbif/junco_vulcani-cr-registros.csv",
options = c(
"X_POSSIBLE_NAMES=decimalLongitude",
"Y_POSSIBLE_NAMES=decimalLatitude"
),
quiet = TRUE
)
# Asignación de CRS
st_crs(junco_vulcani) = 4326
# Capa geespacial de cantones
cantones <-
st_read(
"https://raw.githubusercontent.com/gf0604-procesamientodatosgeograficos/2021i-datos/main/ign/delimitacion-territorial-administrativa/cr_cantones_simp_wgs84.geojson",
quiet = TRUE
)
# Cruce espacial con la tabla de cantones, para obtener el nombre del cantón
junco_vulcani <-
junco_vulcani %>%
st_join(cantones["canton"])
# Tabla de registros de presencia
junco_vulcani %>%
st_drop_geometry() %>%
select(stateProvince, canton, species, family, eventDate) %>%
datatable(
colnames = c("Provincia", "Cantón", "Especies", "Familia", "Fecha"),
options = list(
searchHighlight = TRUE,
language = list(url = '//cdn.datatables.net/plug-ins/1.10.11/i18n/Spanish.json'),
pageLength = 5
)
)
options = list(
searchHighlight = TRUE,
language = list(url = '//cdn.datatables.net/plug-ins/1.10.11/i18n/Spanish.json')
)
# Gráfico de estacionalidad
junco_vulcani %>%
st_drop_geometry() %>%
group_by(mes = format(as.Date(eventDate, "%Y-%m-%d"), "%m")) %>%
summarize(suma_registros = n()) %>%
filter(!is.na(mes)) %>%
plot_ly(x = ~ mes,
y = ~ suma_registros,
type="scatter", mode="markers", fill = "tozeroy", fillcolor = "green") %>%
layout(title = "Estacionalidad",
xaxis = list(title = "Mes"),
yaxis = list(title = "Cantidad de registros"))
# Gráfico Pastel
View(junco_vulcani)
ex_primates_cr <- data.frame("Categoria"=rownames(junco_vulcani), junco_vulcani)
primates_cr_data <- ex_junco_vulcani[,c('Categoria','species')]
fig <-
plot_ly(
labels = ~ c("Ateles geoffroyi", "Cebus capucinus", "", ""),
values = ~ c(1994, 599, 453, 1463),
type = 'pie') %>%
config(locale = "es") %>% layout(
title = 'Especies de Junco Volcanico',
xaxis = list(
showgrid = FALSE,
zeroline = FALSE,
showticklabels = FALSE
),
yaxis = list(
showgrid = FALSE,
zeroline = FALSE,
showticklabels = FALSE
)
)
fig
# Mapa de registros de presencia
junco_vulcani %>%
select(stateProvince,
canton,
locality,
eventDate,
decimalLongitude,
decimalLatitude) %>%
leaflet() %>%
addProviderTiles(providers$OpenStreetMap.Mapnik, group = "OpenStreetMap") %>%
addProviderTiles(providers$Stamen.TonerLite, group = "Stamen Toner Lite") %>%
addProviderTiles(providers$Esri.WorldImagery, group = "Imágenes de ESRI") %>%
addCircleMarkers(
stroke = F,
radius = 4,
fillColor = 'gray',
fillOpacity = 1,
popup = paste(
junco_vulcani$stateProvince,
junco_vulcani$canton,
junco_vulcani$locality,
junco_vulcani$eventDate,
junco_vulcani$decimalLongitude,
junco_vulcani$decimalLatitude,
sep = '<br/>'
),
group = "Junco vulcani"
) %>%
addLayersControl(
baseGroups = c("OpenStreetMap", "Stamen Toner Lite", "Imágenes de ESRI"),
overlayGroups = c("Junco vulcani")
) %>%
addMiniMap(
tiles = providers$Stamen.OpenStreetMap.Mapnik,
position = "bottomleft",
toggleDisplay = TRUE
)
# Mapa de registros de presencia
primates_cr %>%
select(stateProvince,
canton,
locality,
eventDate,
decimalLongitude,
decimalLatitude) %>%
leaflet() %>%
addProviderTiles(providers$OpenStreetMap.Mapnik, group = "OpenStreetMap") %>%
addProviderTiles(providers$Stamen.TonerLite, group = "Stamen Toner Lite") %>%
addProviderTiles(providers$Esri.WorldImagery, group = "Imágenes de ESRI") %>%
addCircleMarkers(
stroke = F,
radius = 4,
fillColor = 'gray',
fillOpacity = 1,
popup = paste(
primates_cr$stateProvince,
primates_cr$canton,
primates_cr$locality,
primates_cr$eventDate,
primates_cr$decimalLongitude,
primates_cr$decimalLatitude,
sep = '<br/>'
),
group = "Primates"
) %>%
addLayersControl(
baseGroups = c("OpenStreetMap", "Stamen Toner Lite", "Imágenes de ESRI"),
overlayGroups = c("Primates")
) %>%
addMiniMap(
tiles = providers$Stamen.OpenStreetMap.Mapnik,
position = "bottomleft",
toggleDisplay = TRUE
)
| /Tarea #3.R | no_license | AndresQF88/Tarea_3 | R | false | false | 4,778 | r | library(dplyr)
library(ggplot2)
library(sf)
library(DT)
library(plotly)
library(leaflet)
library(jsonlite)
junco_vulcani <-
st_read(
"https://raw.githubusercontent.com/gf0604-procesamientodatosgeograficos/2021i-datos/main/gbif/junco_vulcani-cr-registros.csv",
options = c(
"X_POSSIBLE_NAMES=decimalLongitude",
"Y_POSSIBLE_NAMES=decimalLatitude"
),
quiet = TRUE
)
# Asignación de CRS
st_crs(junco_vulcani) = 4326
# Capa geespacial de cantones
cantones <-
st_read(
"https://raw.githubusercontent.com/gf0604-procesamientodatosgeograficos/2021i-datos/main/ign/delimitacion-territorial-administrativa/cr_cantones_simp_wgs84.geojson",
quiet = TRUE
)
# Cruce espacial con la tabla de cantones, para obtener el nombre del cantón
junco_vulcani <-
junco_vulcani %>%
st_join(cantones["canton"])
# Tabla de registros de presencia
junco_vulcani %>%
st_drop_geometry() %>%
select(stateProvince, canton, species, family, eventDate) %>%
datatable(
colnames = c("Provincia", "Cantón", "Especies", "Familia", "Fecha"),
options = list(
searchHighlight = TRUE,
language = list(url = '//cdn.datatables.net/plug-ins/1.10.11/i18n/Spanish.json'),
pageLength = 5
)
)
options = list(
searchHighlight = TRUE,
language = list(url = '//cdn.datatables.net/plug-ins/1.10.11/i18n/Spanish.json')
)
# Gráfico de estacionalidad
junco_vulcani %>%
st_drop_geometry() %>%
group_by(mes = format(as.Date(eventDate, "%Y-%m-%d"), "%m")) %>%
summarize(suma_registros = n()) %>%
filter(!is.na(mes)) %>%
plot_ly(x = ~ mes,
y = ~ suma_registros,
type="scatter", mode="markers", fill = "tozeroy", fillcolor = "green") %>%
layout(title = "Estacionalidad",
xaxis = list(title = "Mes"),
yaxis = list(title = "Cantidad de registros"))
# Gráfico Pastel
View(junco_vulcani)
ex_primates_cr <- data.frame("Categoria"=rownames(junco_vulcani), junco_vulcani)
primates_cr_data <- ex_junco_vulcani[,c('Categoria','species')]
fig <-
plot_ly(
labels = ~ c("Ateles geoffroyi", "Cebus capucinus", "", ""),
values = ~ c(1994, 599, 453, 1463),
type = 'pie') %>%
config(locale = "es") %>% layout(
title = 'Especies de Junco Volcanico',
xaxis = list(
showgrid = FALSE,
zeroline = FALSE,
showticklabels = FALSE
),
yaxis = list(
showgrid = FALSE,
zeroline = FALSE,
showticklabels = FALSE
)
)
fig
# Mapa de registros de presencia
junco_vulcani %>%
select(stateProvince,
canton,
locality,
eventDate,
decimalLongitude,
decimalLatitude) %>%
leaflet() %>%
addProviderTiles(providers$OpenStreetMap.Mapnik, group = "OpenStreetMap") %>%
addProviderTiles(providers$Stamen.TonerLite, group = "Stamen Toner Lite") %>%
addProviderTiles(providers$Esri.WorldImagery, group = "Imágenes de ESRI") %>%
addCircleMarkers(
stroke = F,
radius = 4,
fillColor = 'gray',
fillOpacity = 1,
popup = paste(
junco_vulcani$stateProvince,
junco_vulcani$canton,
junco_vulcani$locality,
junco_vulcani$eventDate,
junco_vulcani$decimalLongitude,
junco_vulcani$decimalLatitude,
sep = '<br/>'
),
group = "Junco vulcani"
) %>%
addLayersControl(
baseGroups = c("OpenStreetMap", "Stamen Toner Lite", "Imágenes de ESRI"),
overlayGroups = c("Junco vulcani")
) %>%
addMiniMap(
tiles = providers$Stamen.OpenStreetMap.Mapnik,
position = "bottomleft",
toggleDisplay = TRUE
)
# Mapa de registros de presencia
primates_cr %>%
select(stateProvince,
canton,
locality,
eventDate,
decimalLongitude,
decimalLatitude) %>%
leaflet() %>%
addProviderTiles(providers$OpenStreetMap.Mapnik, group = "OpenStreetMap") %>%
addProviderTiles(providers$Stamen.TonerLite, group = "Stamen Toner Lite") %>%
addProviderTiles(providers$Esri.WorldImagery, group = "Imágenes de ESRI") %>%
addCircleMarkers(
stroke = F,
radius = 4,
fillColor = 'gray',
fillOpacity = 1,
popup = paste(
primates_cr$stateProvince,
primates_cr$canton,
primates_cr$locality,
primates_cr$eventDate,
primates_cr$decimalLongitude,
primates_cr$decimalLatitude,
sep = '<br/>'
),
group = "Primates"
) %>%
addLayersControl(
baseGroups = c("OpenStreetMap", "Stamen Toner Lite", "Imágenes de ESRI"),
overlayGroups = c("Primates")
) %>%
addMiniMap(
tiles = providers$Stamen.OpenStreetMap.Mapnik,
position = "bottomleft",
toggleDisplay = TRUE
)
|
#' Calculate perceptual-distance between two (sets of) colors
#'
#' This returns the distance, according to the `method`,
#' between corresponding hex-colors in `hex` and `hex_ref`.
#'
#' The vectors `hex` and `hex_ref` must be the same length.
#'
#' @param hex `character` vector of hex-colors
#' @param hex_ref `character` vector of hex-colors
#' @param method `character` method to use for distance calculation,
#' passed to `farver::compare_color()`.
#' One of: `"euclidean"`, `"cie1976"`, `"cie94"`, `"cie2000"`, or `"cmc"`.
#'
#' @return `numerical` vector, same length as `hex` and `hex_ref`.
#' @examples
#' pev_hex_distance("#000000", "#FFFFFF")
#' pev_hex_distance(c("#000000", "#FFFFFF"), c("#000000", "#000000"))
#' pev_hex_distance(c("#000000", "#FFFFFF"), "#000000")
#' @export
#'
pev_hex_distance <- function(hex, hex_ref, method = "cie2000") {
assertthat::assert_that(
is_hexcolor(hex),
is_hexcolor(hex_ref),
method %in% c("euclidean", "cie1976", "cie94", "cie2000", "cmc")
)
# recycle
if (identical(length(hex), 1L)) {
hex <- rep(hex, length(hex_ref))
}
if (identical(length(hex_ref), 1L)) {
hex_ref <- rep(hex_ref, length(hex))
}
if (!identical(length(hex), length(hex_ref))) {
stop("Cannot reconcile length of `hex` and `hex_ref`", call. = FALSE)
}
list_rgb <- function(x) {
purrr::map(x, ~t(grDevices::col2rgb(.x)))
}
rgb <- list_rgb(hex)
rgb_ref <- list_rgb(hex_ref)
distance <-
purrr::map2_dbl(
rgb,
rgb_ref,
farver::compare_colour,
from_space = "rgb",
method = method
)
distance
}
#' Calculate perceptual-derivative for sequence of hex-colors
#'
#' This assumes that `hex` reperesents colors on a continuous-scale
#' where the domain varies uniformly from 0 to 1.
#'
#' @inheritParams pev_hex_distance
#'
#' @return `numeric`
#' @examples
#' pev_fcont("Viridis")(seq(0, 1, by = 0.025)) %>%
#' pev_hex_derivative()
#' @export
#'
pev_hex_derivative <- function(hex, method = "cie2000") {
# validate arguments
assertthat::assert_that(
all(is_hexcolor(hex)),
method %in% c("euclidean", "cie1976", "cie94", "cie2000", "cmc")
)
n <- length(hex)
d_distance <- numeric(n)
d_x <- 1 / (n - 1)
i <- seq(2, n -1)
dist <- function(hex, hex_ref) {
pev_hex_distance(hex, hex_ref, method = method)
}
d_distance[1] <- 4 * dist(hex[2], hex[1]) - dist(hex[3], hex[1])
d_distance[i] <- dist(hex[i + 1], hex[i - 1])
d_distance[n] <- 4 * dist(hex[n], hex[n - 1]) - dist(hex[n], hex[n - 2])
d_distance_d_x <- d_distance / (2 * d_x)
d_distance_d_x
}
| /R/hex-distance.R | permissive | ijlyttle/paleval | R | false | false | 2,619 | r | #' Calculate perceptual-distance between two (sets of) colors
#'
#' This returns the distance, according to the `method`,
#' between corresponding hex-colors in `hex` and `hex_ref`.
#'
#' The vectors `hex` and `hex_ref` must be the same length.
#'
#' @param hex `character` vector of hex-colors
#' @param hex_ref `character` vector of hex-colors
#' @param method `character` method to use for distance calculation,
#' passed to `farver::compare_color()`.
#' One of: `"euclidean"`, `"cie1976"`, `"cie94"`, `"cie2000"`, or `"cmc"`.
#'
#' @return `numerical` vector, same length as `hex` and `hex_ref`.
#' @examples
#' pev_hex_distance("#000000", "#FFFFFF")
#' pev_hex_distance(c("#000000", "#FFFFFF"), c("#000000", "#000000"))
#' pev_hex_distance(c("#000000", "#FFFFFF"), "#000000")
#' @export
#'
pev_hex_distance <- function(hex, hex_ref, method = "cie2000") {
assertthat::assert_that(
is_hexcolor(hex),
is_hexcolor(hex_ref),
method %in% c("euclidean", "cie1976", "cie94", "cie2000", "cmc")
)
# recycle
if (identical(length(hex), 1L)) {
hex <- rep(hex, length(hex_ref))
}
if (identical(length(hex_ref), 1L)) {
hex_ref <- rep(hex_ref, length(hex))
}
if (!identical(length(hex), length(hex_ref))) {
stop("Cannot reconcile length of `hex` and `hex_ref`", call. = FALSE)
}
list_rgb <- function(x) {
purrr::map(x, ~t(grDevices::col2rgb(.x)))
}
rgb <- list_rgb(hex)
rgb_ref <- list_rgb(hex_ref)
distance <-
purrr::map2_dbl(
rgb,
rgb_ref,
farver::compare_colour,
from_space = "rgb",
method = method
)
distance
}
#' Calculate perceptual-derivative for sequence of hex-colors
#'
#' This assumes that `hex` reperesents colors on a continuous-scale
#' where the domain varies uniformly from 0 to 1.
#'
#' @inheritParams pev_hex_distance
#'
#' @return `numeric`
#' @examples
#' pev_fcont("Viridis")(seq(0, 1, by = 0.025)) %>%
#' pev_hex_derivative()
#' @export
#'
pev_hex_derivative <- function(hex, method = "cie2000") {
# validate arguments
assertthat::assert_that(
all(is_hexcolor(hex)),
method %in% c("euclidean", "cie1976", "cie94", "cie2000", "cmc")
)
n <- length(hex)
d_distance <- numeric(n)
d_x <- 1 / (n - 1)
i <- seq(2, n -1)
dist <- function(hex, hex_ref) {
pev_hex_distance(hex, hex_ref, method = method)
}
d_distance[1] <- 4 * dist(hex[2], hex[1]) - dist(hex[3], hex[1])
d_distance[i] <- dist(hex[i + 1], hex[i - 1])
d_distance[n] <- 4 * dist(hex[n], hex[n - 1]) - dist(hex[n], hex[n - 2])
d_distance_d_x <- d_distance / (2 * d_x)
d_distance_d_x
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/printCrudeAndAdjustedModel.R
\name{printCrudeAndAdjustedModel}
\alias{printCrudeAndAdjustedModel}
\alias{rbind.printCrudeAndAdjusted}
\alias{print.printCrudeAndAdjusted}
\alias{htmlTable.printCrudeAndAdjusted}
\alias{[.printCrudeAndAdjusted}
\alias{cbind.printCrudeAndAdjusted}
\alias{knit_print.printCrudeAndAdjusted}
\alias{latex.printCrudeAndAdjusted}
\title{Output crude and adjusted model data}
\usage{
printCrudeAndAdjustedModel(model, order, digits = 2, ci_lim = c(-Inf,
Inf), sprintf_ci_str = getOption("sprintf_ci_str", "\%s to \%s"),
add_references, add_references_pos, reference_zero_effect, groups,
rowname.fn, use_labels = TRUE, desc_column = FALSE,
desc_args = caDescribeOpts(digits = digits), impute_args, ...)
\method{rbind}{printCrudeAndAdjusted}(..., alt.names, deparse.level = 1)
\method{print}{printCrudeAndAdjusted}(x, css.rgroup = "", ...)
\method{htmlTable}{printCrudeAndAdjusted}(x, css.rgroup = "", ...)
\method{[}{printCrudeAndAdjusted}(x, i, j, ...)
\method{cbind}{printCrudeAndAdjusted}(..., alt.names, deparse.level = 1)
\method{knit_print}{printCrudeAndAdjusted}(x, css.rgroup = "", ...)
\method{latex}{printCrudeAndAdjusted}(object, ...)
}
\arguments{
\item{model}{A regression model fit, i.e. the returned object from your
regression function, or the output from \code{\link{getCrudeAndAdjustedModelData}()}}
\item{order}{A vector with regular expressions for each group, use if youe
want to reorder the groups in another way than what you've used in your original
function. You can also use this in order to skip certain variables from the output.}
\item{digits}{The number of digits to round to}
\item{ci_lim}{A limit vector number that specifies if any values should be
abbreviated above or below this value, for instance a value of 1000
would give a value of \code{> -1000} for a value of 1001. This gives
a prettier table when you have very wide confidence intervals.}
\item{sprintf_ci_str}{A string according to \code{\link{sprintf}()} to
write the confidence interval where the first \%s is the lower and
the second the upper. You can choose to set this through setting the option
\code{sprintf_ci_str}, e.g. \code{options(sprintf_ci_str = "\%s - \%s")}.}
\item{add_references}{True if it should use the data set to look for
references, otherwise supply the function with a vector with names.
Sometimes you want to indicate the reference row for each group.
This needs to be just as many as the groups as the order identified.
Use NA if you don't want to have a reference for that particular group.}
\item{add_references_pos}{The position where a reference should be added.
Sometimes you don't want the reference to be at the top, for instance
if you have age groups then you may have < 25, 25-39, 40-55, > 55 and
you have the reference to be 25-39 then you should set the reference
list for \code{age_groups} as \code{add_references_pos = list(age_groups = 2)}
so that you have the second group as the position for the reference.}
\item{reference_zero_effect}{Used with references, tells if zero effect
is in exponential form, i.e. \code{exp(0) = 1}, or in regular format,
i.e. \code{0 = 0} (can be set to any value)}
\item{groups}{If you wish to have other than the default \code{rgroup} names
for the grouping parameter}
\item{rowname.fn}{A function that takes a row name and sees if it needs
beautifying. The function has only one parameter the coefficients name and
should return a string or expression.}
\item{use_labels}{If the rowname.fn function doesn't change the name then
the label should be used instead of the name, that is if there is a
label and it isn't a factor.}
\item{desc_column}{Add descriptive column to the crude and adjusted table}
\item{desc_args}{The description arguments that are to be used for the
the description columns. The options/arguments should be generated by the
\code{\link{caDescribeOpts}} function.}
\item{impute_args}{A list with additional arguments if the provided input is
a imputed object. Currently the list options \code{coef_change} and
\code{variance.inflation} are supported. If you want both columns then
the simplest way is to provide the list:
\code{list(coef_change=TRUE, variance.inflation=TRUE)}.
The \code{coef_change} adds a column with the change in coefficients due to
the imputation, the the "raw" model is subtracted from the imputed results.
The "raw" model is the unimputed model, \code{coef(imputed_model) - coef(raw_model)}.
The \code{variance.inflation} adds the \code{variance.inflation.impute} from the
\code{\link[Hmisc]{fit.mult.impute}()} to a separate column. See the description
for the \code{variance.inflation.impute} in in the \code{\link[Hmisc]{fit.mult.impute}()}
description.
Both arguments can be customized by providing a \code{list}. The list can have
the elements \code{type}, \code{name}, \code{out_str}, and/or \code{digits}.
The \code{type} can for \code{coef_change}/\code{variance.impute} be either
"percent" or "ratio", note that \code{variance.inflation.impute} was not
originally intended to be interpreted as \%. The default for \code{coef_change} is to
have "diff", that gives the absolute difference in the coefficient.
The \code{name} provides the column name, the \code{out_str} should be a string
that is compatible with \code{\link[base]{sprintf}()} and also contains an argument
for accepting a float value, e.g. "%.0f%%" is used by default iun the coef_change
column. The \code{digits} can be used if you are not using the \code{out_str}
argument, it simply specifies the number of digits to show. See the example
for how for a working example.
\emph{Note} that currently only the \code{\link[Hmisc]{fit.mult.impute}()}
is supported by this option.}
\item{...}{Passed onto the Hmisc::\code{\link[Hmisc]{latex}()} function, or to
the \code{\link[htmlTable]{htmlTable}()} via the \code{\link[base]{print}()} call. Any variables that match
the formals of \code{\link{getCrudeAndAdjustedModelData}()} are identified
and passed on in case you have provided a model and not the returned element
from the \code{\link{getCrudeAndAdjustedModelData}()} call.}
\item{alt.names}{If you don't want to use named arguments for the tspanner attribute in the rbind
or the cgroup in the cbind but a vector with names then use this argument.}
\item{deparse.level}{backward compatibility}
\item{x}{The output object from the printCrudeAndAdjustedModel function}
\item{css.rgroup}{Css style for the rgorup, if different styles are wanted for each of the
rgroups you can just specify a vector with the number of elements. Passed on to \code{\link{htmlTable}()}.}
\item{object}{The output object from the printCrudeAndAdjustedModel function}
\item{...}{outputs from printCrudeAndAdjusted. If mixed then it defaults to rbind.data.frame}
}
\value{
\code{matrix} Returns a matrix of class printCrudeAndAdjusted that
has a default print method associated with
}
\description{
Prints table for a fitted object. It prints by default a latex table but can
also be converted into a HTML table that should be more compatible with common
word processors. For details run \code{vignette("printCrudeAndAdjustedModel")}
}
\section{Warning}{
If you call this function and you've changed any of the variables
used in the original call, i.e. the premises are changed, this function will not
remember the original values and the statistics will be faulty!
}
\examples{
# simulated data to use
set.seed(10)
ds <- data.frame(
ftime = rexp(200),
fstatus = sample(0:1,200,replace=TRUE),
Variable1 = runif(200),
Variable2 = runif(200),
Variable3 = runif(200),
Variable4 = factor(sample(LETTERS[1:4], size=200, replace=TRUE)))
library(rms)
dd <- datadist(ds)
options(datadist="dd")
fit <- cph(Surv(ftime, fstatus) ~ Variable1 + Variable3 + Variable2 + Variable4,
data=ds, x=TRUE, y=TRUE)
printCrudeAndAdjustedModel(fit, order = c("Variable[12]", "Variable3"))
printCrudeAndAdjustedModel(fit,
order=c("Variable3", "Variable4"),
add_references = TRUE,
desc_column=TRUE)
# Now to a missing example
n <- 500
ds <- data.frame(
x1 = factor(sample(LETTERS[1:4], size = n, replace = TRUE)),
x2 = rnorm(n, mean = 3, 2),
x3 = factor(sample(letters[1:3], size = n, replace = TRUE)))
ds$Missing_var1 <- factor(sample(letters[1:4], size=n, replace=TRUE))
ds$Missing_var2 <- factor(sample(letters[1:4], size=n, replace=TRUE))
ds$y <- rnorm(nrow(ds)) +
(as.numeric(ds$x1)-1) * 1 +
(as.numeric(ds$Missing_var1)-1)*1 +
(as.numeric(ds$Missing_var2)-1)*.5
# Create a messy missing variable
non_random_missing <- sample(which(ds$Missing_var1 \%in\% c("b", "d")),
size = 150, replace=FALSE)
# Restrict the non-random number on the x2 variables
non_random_missing <- non_random_missing[non_random_missing \%in\%
which(ds$x2 > mean(ds$x2)*1.5) &
non_random_missing \%in\%
which(ds$x2 > mean(ds$y))]
ds$Missing_var1[non_random_missing] <- NA
# Simple missing variable
ds$Missing_var2[sample(1:nrow(ds), size=50)] <- NA
# Setup the rms environment
ddist <- datadist(ds)
options(datadist = "ddist")
impute_formula <-
as.formula(paste("~",
paste(colnames(ds),
collapse="+")))
imp_ds <- aregImpute(impute_formula, data = ds, n.impute = 10)
fmult <- fit.mult.impute(y ~ x1 + x2 + x3 +
Missing_var1 + Missing_var2,
fitter = ols, xtrans = imp_ds, data = ds)
printCrudeAndAdjustedModel(fmult,
impute_args = list(variance.inflation=TRUE,
coef_change=list(type="diff",
digits=3)))
# Use some labels to prettify the output
# fro the mtcars dataset
data("mtcars")
label(mtcars$mpg) <- "Gas"
units(mtcars$mpg) <- "Miles/(US) gallon"
label(mtcars$wt) <- "Weight"
units(mtcars$wt) <- "10^3 kg" # not sure the unit is correct
mtcars$am <- factor(mtcars$am, levels=0:1, labels=c("Automatic", "Manual"))
label(mtcars$am) <- "Transmission"
mtcars$gear <- factor(mtcars$gear)
label(mtcars$gear) <- "Gears"
# Make up some data for making it slightly more interesting
mtcars$col <- factor(sample(c("red", "black", "silver"), size=NROW(mtcars), replace=TRUE))
label(mtcars$col) <- "Car color"
require(splines)
fit_mtcar <- lm(mpg ~ wt + gear + col, data=mtcars)
printCrudeAndAdjustedModel(fit_mtcar,
add_references=TRUE,
ctable=TRUE,
desc_column = TRUE,
digits=1,
desc_args = caDescribeOpts(digits = 1,
colnames = c("Avg.")))
printCrudeAndAdjustedModel(fit_mtcar,
add_references=TRUE,
desc_column=TRUE,
order=c("Interc", "gear"))
# Alterntive print - just an example, doesn't make sense to skip reference
printCrudeAndAdjustedModel(fit_mtcar,
order=c("col", "gear"),
groups=c("Color", "Gears"),
add_references=c("black", NA),
ctable=TRUE)
# Now we can also combine models into one table using rbind()
mpg_model <- printCrudeAndAdjustedModel(lm(mpg ~ wt + gear + col, data=mtcars),
add_references=TRUE,
ctable=TRUE,
desc_column = TRUE,
digits=1,
desc_args = caDescribeOpts(digits = 1,
colnames = c("Avg.")))
wt_model <- printCrudeAndAdjustedModel(lm(wt ~ mpg + gear + col, data=mtcars),
add_references=TRUE,
ctable=TRUE,
desc_column = TRUE,
digits=1,
desc_args = caDescribeOpts(digits = 1,
colnames = c("Avg.")))
library(magrittr)
rbind(Miles = mpg_model, Weight = wt_model) \%>\%
htmlTable(caption="Combining models together with a table spanner element separating each model")
}
\seealso{
\code{\link[Hmisc]{latex}()} for details.
Other printCrudeAndAdjusted functions: \code{\link{prCaAddRefAndStat}},
\code{\link{prCaAddReference}},
\code{\link{prCaAddUserReferences}},
\code{\link{prCaGetImputationCols}},
\code{\link{prCaGetRowname}},
\code{\link{prCaGetVnStats}},
\code{\link{prCaPrepareCrudeAndAdjusted}},
\code{\link{prCaReorderReferenceDescribe}},
\code{\link{prCaReorder}},
\code{\link{prCaSelectAndOrderVars}},
\code{\link{prCaSetRownames}}
}
\concept{printCrudeAndAdjusted functions}
\keyword{internal}
| /man/printCrudeAndAdjustedModel.Rd | no_license | guhjy/Greg | R | false | true | 13,246 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/printCrudeAndAdjustedModel.R
\name{printCrudeAndAdjustedModel}
\alias{printCrudeAndAdjustedModel}
\alias{rbind.printCrudeAndAdjusted}
\alias{print.printCrudeAndAdjusted}
\alias{htmlTable.printCrudeAndAdjusted}
\alias{[.printCrudeAndAdjusted}
\alias{cbind.printCrudeAndAdjusted}
\alias{knit_print.printCrudeAndAdjusted}
\alias{latex.printCrudeAndAdjusted}
\title{Output crude and adjusted model data}
\usage{
printCrudeAndAdjustedModel(model, order, digits = 2, ci_lim = c(-Inf,
Inf), sprintf_ci_str = getOption("sprintf_ci_str", "\%s to \%s"),
add_references, add_references_pos, reference_zero_effect, groups,
rowname.fn, use_labels = TRUE, desc_column = FALSE,
desc_args = caDescribeOpts(digits = digits), impute_args, ...)
\method{rbind}{printCrudeAndAdjusted}(..., alt.names, deparse.level = 1)
\method{print}{printCrudeAndAdjusted}(x, css.rgroup = "", ...)
\method{htmlTable}{printCrudeAndAdjusted}(x, css.rgroup = "", ...)
\method{[}{printCrudeAndAdjusted}(x, i, j, ...)
\method{cbind}{printCrudeAndAdjusted}(..., alt.names, deparse.level = 1)
\method{knit_print}{printCrudeAndAdjusted}(x, css.rgroup = "", ...)
\method{latex}{printCrudeAndAdjusted}(object, ...)
}
\arguments{
\item{model}{A regression model fit, i.e. the returned object from your
regression function, or the output from \code{\link{getCrudeAndAdjustedModelData}()}}
\item{order}{A vector with regular expressions for each group, use if youe
want to reorder the groups in another way than what you've used in your original
function. You can also use this in order to skip certain variables from the output.}
\item{digits}{The number of digits to round to}
\item{ci_lim}{A limit vector number that specifies if any values should be
abbreviated above or below this value, for instance a value of 1000
would give a value of \code{> -1000} for a value of 1001. This gives
a prettier table when you have very wide confidence intervals.}
\item{sprintf_ci_str}{A string according to \code{\link{sprintf}()} to
write the confidence interval where the first \%s is the lower and
the second the upper. You can choose to set this through setting the option
\code{sprintf_ci_str}, e.g. \code{options(sprintf_ci_str = "\%s - \%s")}.}
\item{add_references}{True if it should use the data set to look for
references, otherwise supply the function with a vector with names.
Sometimes you want to indicate the reference row for each group.
This needs to be just as many as the groups as the order identified.
Use NA if you don't want to have a reference for that particular group.}
\item{add_references_pos}{The position where a reference should be added.
Sometimes you don't want the reference to be at the top, for instance
if you have age groups then you may have < 25, 25-39, 40-55, > 55 and
you have the reference to be 25-39 then you should set the reference
list for \code{age_groups} as \code{add_references_pos = list(age_groups = 2)}
so that you have the second group as the position for the reference.}
\item{reference_zero_effect}{Used with references, tells if zero effect
is in exponential form, i.e. \code{exp(0) = 1}, or in regular format,
i.e. \code{0 = 0} (can be set to any value)}
\item{groups}{If you wish to have other than the default \code{rgroup} names
for the grouping parameter}
\item{rowname.fn}{A function that takes a row name and sees if it needs
beautifying. The function has only one parameter the coefficients name and
should return a string or expression.}
\item{use_labels}{If the rowname.fn function doesn't change the name then
the label should be used instead of the name, that is if there is a
label and it isn't a factor.}
\item{desc_column}{Add descriptive column to the crude and adjusted table}
\item{desc_args}{The description arguments that are to be used for the
the description columns. The options/arguments should be generated by the
\code{\link{caDescribeOpts}} function.}
\item{impute_args}{A list with additional arguments if the provided input is
a imputed object. Currently the list options \code{coef_change} and
\code{variance.inflation} are supported. If you want both columns then
the simplest way is to provide the list:
\code{list(coef_change=TRUE, variance.inflation=TRUE)}.
The \code{coef_change} adds a column with the change in coefficients due to
the imputation, the the "raw" model is subtracted from the imputed results.
The "raw" model is the unimputed model, \code{coef(imputed_model) - coef(raw_model)}.
The \code{variance.inflation} adds the \code{variance.inflation.impute} from the
\code{\link[Hmisc]{fit.mult.impute}()} to a separate column. See the description
for the \code{variance.inflation.impute} in in the \code{\link[Hmisc]{fit.mult.impute}()}
description.
Both arguments can be customized by providing a \code{list}. The list can have
the elements \code{type}, \code{name}, \code{out_str}, and/or \code{digits}.
The \code{type} can for \code{coef_change}/\code{variance.impute} be either
"percent" or "ratio", note that \code{variance.inflation.impute} was not
originally intended to be interpreted as \%. The default for \code{coef_change} is to
have "diff", that gives the absolute difference in the coefficient.
The \code{name} provides the column name, the \code{out_str} should be a string
that is compatible with \code{\link[base]{sprintf}()} and also contains an argument
for accepting a float value, e.g. "%.0f%%" is used by default iun the coef_change
column. The \code{digits} can be used if you are not using the \code{out_str}
argument, it simply specifies the number of digits to show. See the example
for how for a working example.
\emph{Note} that currently only the \code{\link[Hmisc]{fit.mult.impute}()}
is supported by this option.}
\item{...}{Passed onto the Hmisc::\code{\link[Hmisc]{latex}()} function, or to
the \code{\link[htmlTable]{htmlTable}()} via the \code{\link[base]{print}()} call. Any variables that match
the formals of \code{\link{getCrudeAndAdjustedModelData}()} are identified
and passed on in case you have provided a model and not the returned element
from the \code{\link{getCrudeAndAdjustedModelData}()} call.}
\item{alt.names}{If you don't want to use named arguments for the tspanner attribute in the rbind
or the cgroup in the cbind but a vector with names then use this argument.}
\item{deparse.level}{backward compatibility}
\item{x}{The output object from the printCrudeAndAdjustedModel function}
\item{css.rgroup}{Css style for the rgorup, if different styles are wanted for each of the
rgroups you can just specify a vector with the number of elements. Passed on to \code{\link{htmlTable}()}.}
\item{object}{The output object from the printCrudeAndAdjustedModel function}
\item{...}{outputs from printCrudeAndAdjusted. If mixed then it defaults to rbind.data.frame}
}
\value{
\code{matrix} Returns a matrix of class printCrudeAndAdjusted that
has a default print method associated with
}
\description{
Prints table for a fitted object. It prints by default a latex table but can
also be converted into a HTML table that should be more compatible with common
word processors. For details run \code{vignette("printCrudeAndAdjustedModel")}
}
\section{Warning}{
If you call this function and you've changed any of the variables
used in the original call, i.e. the premises are changed, this function will not
remember the original values and the statistics will be faulty!
}
\examples{
# simulated data to use
set.seed(10)
ds <- data.frame(
ftime = rexp(200),
fstatus = sample(0:1,200,replace=TRUE),
Variable1 = runif(200),
Variable2 = runif(200),
Variable3 = runif(200),
Variable4 = factor(sample(LETTERS[1:4], size=200, replace=TRUE)))
library(rms)
dd <- datadist(ds)
options(datadist="dd")
fit <- cph(Surv(ftime, fstatus) ~ Variable1 + Variable3 + Variable2 + Variable4,
data=ds, x=TRUE, y=TRUE)
printCrudeAndAdjustedModel(fit, order = c("Variable[12]", "Variable3"))
printCrudeAndAdjustedModel(fit,
order=c("Variable3", "Variable4"),
add_references = TRUE,
desc_column=TRUE)
# Now to a missing example
n <- 500
ds <- data.frame(
x1 = factor(sample(LETTERS[1:4], size = n, replace = TRUE)),
x2 = rnorm(n, mean = 3, 2),
x3 = factor(sample(letters[1:3], size = n, replace = TRUE)))
ds$Missing_var1 <- factor(sample(letters[1:4], size=n, replace=TRUE))
ds$Missing_var2 <- factor(sample(letters[1:4], size=n, replace=TRUE))
ds$y <- rnorm(nrow(ds)) +
(as.numeric(ds$x1)-1) * 1 +
(as.numeric(ds$Missing_var1)-1)*1 +
(as.numeric(ds$Missing_var2)-1)*.5
# Create a messy missing variable
non_random_missing <- sample(which(ds$Missing_var1 \%in\% c("b", "d")),
size = 150, replace=FALSE)
# Restrict the non-random number on the x2 variables
non_random_missing <- non_random_missing[non_random_missing \%in\%
which(ds$x2 > mean(ds$x2)*1.5) &
non_random_missing \%in\%
which(ds$x2 > mean(ds$y))]
ds$Missing_var1[non_random_missing] <- NA
# Simple missing variable
ds$Missing_var2[sample(1:nrow(ds), size=50)] <- NA
# Setup the rms environment
ddist <- datadist(ds)
options(datadist = "ddist")
impute_formula <-
as.formula(paste("~",
paste(colnames(ds),
collapse="+")))
imp_ds <- aregImpute(impute_formula, data = ds, n.impute = 10)
fmult <- fit.mult.impute(y ~ x1 + x2 + x3 +
Missing_var1 + Missing_var2,
fitter = ols, xtrans = imp_ds, data = ds)
printCrudeAndAdjustedModel(fmult,
impute_args = list(variance.inflation=TRUE,
coef_change=list(type="diff",
digits=3)))
# Use some labels to prettify the output
# fro the mtcars dataset
data("mtcars")
label(mtcars$mpg) <- "Gas"
units(mtcars$mpg) <- "Miles/(US) gallon"
label(mtcars$wt) <- "Weight"
units(mtcars$wt) <- "10^3 kg" # not sure the unit is correct
mtcars$am <- factor(mtcars$am, levels=0:1, labels=c("Automatic", "Manual"))
label(mtcars$am) <- "Transmission"
mtcars$gear <- factor(mtcars$gear)
label(mtcars$gear) <- "Gears"
# Make up some data for making it slightly more interesting
mtcars$col <- factor(sample(c("red", "black", "silver"), size=NROW(mtcars), replace=TRUE))
label(mtcars$col) <- "Car color"
require(splines)
fit_mtcar <- lm(mpg ~ wt + gear + col, data=mtcars)
printCrudeAndAdjustedModel(fit_mtcar,
add_references=TRUE,
ctable=TRUE,
desc_column = TRUE,
digits=1,
desc_args = caDescribeOpts(digits = 1,
colnames = c("Avg.")))
printCrudeAndAdjustedModel(fit_mtcar,
add_references=TRUE,
desc_column=TRUE,
order=c("Interc", "gear"))
# Alterntive print - just an example, doesn't make sense to skip reference
printCrudeAndAdjustedModel(fit_mtcar,
order=c("col", "gear"),
groups=c("Color", "Gears"),
add_references=c("black", NA),
ctable=TRUE)
# Now we can also combine models into one table using rbind()
mpg_model <- printCrudeAndAdjustedModel(lm(mpg ~ wt + gear + col, data=mtcars),
add_references=TRUE,
ctable=TRUE,
desc_column = TRUE,
digits=1,
desc_args = caDescribeOpts(digits = 1,
colnames = c("Avg.")))
wt_model <- printCrudeAndAdjustedModel(lm(wt ~ mpg + gear + col, data=mtcars),
add_references=TRUE,
ctable=TRUE,
desc_column = TRUE,
digits=1,
desc_args = caDescribeOpts(digits = 1,
colnames = c("Avg.")))
library(magrittr)
rbind(Miles = mpg_model, Weight = wt_model) \%>\%
htmlTable(caption="Combining models together with a table spanner element separating each model")
}
\seealso{
\code{\link[Hmisc]{latex}()} for details.
Other printCrudeAndAdjusted functions: \code{\link{prCaAddRefAndStat}},
\code{\link{prCaAddReference}},
\code{\link{prCaAddUserReferences}},
\code{\link{prCaGetImputationCols}},
\code{\link{prCaGetRowname}},
\code{\link{prCaGetVnStats}},
\code{\link{prCaPrepareCrudeAndAdjusted}},
\code{\link{prCaReorderReferenceDescribe}},
\code{\link{prCaReorder}},
\code{\link{prCaSelectAndOrderVars}},
\code{\link{prCaSetRownames}}
}
\concept{printCrudeAndAdjusted functions}
\keyword{internal}
|
getBatchtoolsNewRegFileDir = function() {
fd = tempfile(pattern = "parallelMap_batchtools_reg_",
tmpdir = getPMOptStorageDir())
options(parallelMap.bt.reg.filedir = fd)
return(fd)
}
getBatchtoolsRegFileDir = function() {
getOption("parallelMap.bt.reg.filedir")
}
getBatchtoolsReg = function() {
an = intersect(names(getPMOptBatchtoolsArgs()), names(formals(batchtools::loadRegistry)))
do.call(batchtools::loadRegistry, args = c(list(file.dir = getBatchtoolsRegFileDir()), getPMOptBatchtoolsArgs()[an]))
}
| /R/batchtools.R | no_license | imbs-hl/parallelMap | R | false | false | 524 | r | getBatchtoolsNewRegFileDir = function() {
fd = tempfile(pattern = "parallelMap_batchtools_reg_",
tmpdir = getPMOptStorageDir())
options(parallelMap.bt.reg.filedir = fd)
return(fd)
}
getBatchtoolsRegFileDir = function() {
getOption("parallelMap.bt.reg.filedir")
}
getBatchtoolsReg = function() {
an = intersect(names(getPMOptBatchtoolsArgs()), names(formals(batchtools::loadRegistry)))
do.call(batchtools::loadRegistry, args = c(list(file.dir = getBatchtoolsRegFileDir()), getPMOptBatchtoolsArgs()[an]))
}
|
library(MapGAM)
### Name: colormap
### Title: Maps Predicted Values and Clusters on a Two-Dimentional Map
### Aliases: colormap
### Keywords: hplot misc smooth
### ** Examples
data(MAdata)
data(MAmap)
obj <- list(grid=data.frame(MAdata$Xcoord,MAdata$Ycoord),fit=MAdata$Mercury)
colormap(obj, MAmap, legend.name = "mercury")
# map the same data using a divergent color palette anchored to the median
if (require(colorspace)) {
newpal <- diverge_hsv(201) # from the colorspace library
colormap(obj, MAmap, legend.name = "mercury", col.seq = newpal,
legend.add.line=round(median(obj$fit),2), anchor = TRUE)
}
| /data/genthat_extracted_code/MapGAM/examples/colormap.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 624 | r | library(MapGAM)
### Name: colormap
### Title: Maps Predicted Values and Clusters on a Two-Dimentional Map
### Aliases: colormap
### Keywords: hplot misc smooth
### ** Examples
data(MAdata)
data(MAmap)
obj <- list(grid=data.frame(MAdata$Xcoord,MAdata$Ycoord),fit=MAdata$Mercury)
colormap(obj, MAmap, legend.name = "mercury")
# map the same data using a divergent color palette anchored to the median
if (require(colorspace)) {
newpal <- diverge_hsv(201) # from the colorspace library
colormap(obj, MAmap, legend.name = "mercury", col.seq = newpal,
legend.add.line=round(median(obj$fit),2), anchor = TRUE)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{v}
\alias{v}
\title{Returns a list of column names from the data dictionary for which the column
named in the first argument is true. The first arg can be either a string or
a name. The second must be a data.frame}
\usage{
v(
var,
dat,
retcol = getOption("tb.retcol", "column"),
dictionary = get("dct0"),
asname = F
)
}
\arguments{
\item{var}{Either a string or a name, of a column in `dictionary`}
\item{dat}{An optional data.frame, to constrain which rows of the
'dictionary' object get used}
\item{retcol}{Which column to return-- by default the same as used for 'matchcol'}
\item{dictionary}{A 'data.frame' that is used as a data dictionary. It must at
minimum contain a column of column-names for the dataset for
which it is a data dictionary ('matchcol') and one or more
columns each representing a _group_ of columns in the dataset,
such that a TRUE or T value means the column whose name is
the value of 'matchcol' is the name of a column in the data
that belongs to the group defined by the grouping column.
These grouping columns are what the argument 'var' is
supposed to refer to. We will use the convention that grouping
column names begin with 'c_' but this convention is not
(currently) enforced programmatically.}
}
\description{
Returns a list of column names from the data dictionary for which the column
named in the first argument is true. The first arg can be either a string or
a name. The second must be a data.frame
}
\examples{
dct0 <- tblinfo(mtcars);
v();
# Numeric variables in mtcars that behave like discrete variables
v(c_ordinal);
# Numeric variables in mtcars
v(c_numeric);
# Variables in mtcars that only have two values, so could be encoded as
# boolean
v(c_tf);
# Non-default data dictionary
dct1 <- tblinfo(state.x77)
v(c_ordinal,dict=dct1)
v(c_factor,dict=dct1)
v(c_tf,dict=dct1)
v(c_numeric,dict=dct1)
}
| /man/v.Rd | permissive | bokov/tidbits | R | false | true | 1,957 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{v}
\alias{v}
\title{Returns a list of column names from the data dictionary for which the column
named in the first argument is true. The first arg can be either a string or
a name. The second must be a data.frame}
\usage{
v(
var,
dat,
retcol = getOption("tb.retcol", "column"),
dictionary = get("dct0"),
asname = F
)
}
\arguments{
\item{var}{Either a string or a name, of a column in `dictionary`}
\item{dat}{An optional data.frame, to constrain which rows of the
'dictionary' object get used}
\item{retcol}{Which column to return-- by default the same as used for 'matchcol'}
\item{dictionary}{A 'data.frame' that is used as a data dictionary. It must at
minimum contain a column of column-names for the dataset for
which it is a data dictionary ('matchcol') and one or more
columns each representing a _group_ of columns in the dataset,
such that a TRUE or T value means the column whose name is
the value of 'matchcol' is the name of a column in the data
that belongs to the group defined by the grouping column.
These grouping columns are what the argument 'var' is
supposed to refer to. We will use the convention that grouping
column names begin with 'c_' but this convention is not
(currently) enforced programmatically.}
}
\description{
Returns a list of column names from the data dictionary for which the column
named in the first argument is true. The first arg can be either a string or
a name. The second must be a data.frame
}
\examples{
dct0 <- tblinfo(mtcars);
v();
# Numeric variables in mtcars that behave like discrete variables
v(c_ordinal);
# Numeric variables in mtcars
v(c_numeric);
# Variables in mtcars that only have two values, so could be encoded as
# boolean
v(c_tf);
# Non-default data dictionary
dct1 <- tblinfo(state.x77)
v(c_ordinal,dict=dct1)
v(c_factor,dict=dct1)
v(c_tf,dict=dct1)
v(c_numeric,dict=dct1)
}
|
library(shiny)
library(tidyverse)
library(hkdata)
library(DT)
library(leaflet)
library(rgdal)
# for converting the northing and easting into lon
# https://stackoverflow.com/questions/36520915/converting-utms-to-lat-long-in-r
# https://medium.com/@eric_hk/dcca-boundary-map-99edb31b62ca
wgs84 = "+init=epsg:4326"
hk1980 = "+init=epsg:2326"
ConvertCoordinates <- function(easting,northing) {
out = cbind(easting,northing)
mask = !is.na(easting)
sp <- sp::spTransform(sp::SpatialPoints(list(easting[mask],northing[mask]),
proj4string=sp::CRS(hk1980)),
sp::CRS(wgs84))
out[mask,]=sp@coords
out
}
id_names <- tribble(
~id, ~datum,
"no2", "Nitrogen Dioxide (ppb)",
"no", "Nitrogen Monoxide (ppb)",
"o3", "Ozone (ppb)",
"co", "Carbon Monoxide (ppm)",
"radiation", "Ultraviolet Radiation (uW/cm^2)",
"pm10", "PM10 (ug/m^3)",
"pm25", "PM2.5 (ug/m^3)",
"pm1", "PM1 (ug/m^3)",
"temperature_45", "Temperature at 4.5m above Ground (degree Celsius)",
"humidity_45","Relative Humidity (%)",
"temperature_2", "Temperature at 2m above Ground (degree Celsius)",
"humidity_2", "Relative Humidity at 2m above Ground (%)",
"pressure", "Atospheric Pressure (hPa)",
"windspeed", "Wind Speed (m/s)",
"winddirection", "Wind Direction (bearing in degree)",
"vehiclecount_e", "Daily Cumulative Number of Vehicles (Eastbound)",
"vehiclecount_w", "Daily Cumulative Number of Vehicles (Westbound)",
"vehiclecount_in", "Daily Cumulative Number of Vehicles (In)",
"vehiclecount_out", "Daily Cumulative Number of Vehicles (Out)",
"peoplecount", "Daily Cumulative Number of Pedestrians"
)
value_ids <- c("no2", "no", "o3", "co",
"radiation",
"pm10", "pm25", "pm1",
"temperature_45",
"humidity_45",
"temperature_2",
"humidity_2",
"pressure", "windspeed")
value_names <- c("Nitrogen Dioxide (ppb)", "Nitrogen Monoxide (ppb)", "Ozone (ppb)", "Carbon Monoxide (ppm)",
"Ultraviolet Radiation (uW/cm^2)",
"PM10 (ug/m^3)", "PM2.5 (ug/m^3)", "PM1 (ug/m^3)",
"Temperature at 4.5m above Ground (degree Celsius)",
"Relative Humidity (%)",
"Temperature at 2m above Ground (degree Celsius)",
"Relative Humidity at 2m above Ground (%)",
"Atospheric Pressure (hPa)", "Wind Speed (m/s)")
##
actionButton1 <- function(inputId, label, icon = NULL, width = NULL,
status = "default", ...) {
value <- restoreInput(id = inputId, default = NULL)
cs <- sprintf("btn %s action-button", paste0("btn-", status))
tags$button(id = inputId, style = if (!is.null(width))
paste0("width: ", validateCssUnit(width), ";"), type = "button",
class = cs, `data-val` = value,
list(shiny:::validateIcon(icon), label), ...)
}
ui <- fluidPage(
titlePanel("Real-time city data collected by multi-purpose lamp posts in Kowloon East"),
# fluidRow(column(
# 12,
# textAreaInput("address", "Enter Address to Parse", cols = 200, rows = 3),
# actionButton1("submit", "Submit", status = "primary")
# )),
# hr(style = "border: 3px solid green;"),
hr(),
radioButtons("datum", "Show the Value of",
choiceNames = value_names,
choiceValues = value_ids,
inline = TRUE),
hr(),
h5("Refresh every minute."),
textOutput("update_date"),
fluidRow(column(
6,
DT::dataTableOutput("lamp_post")
),
column(
6,
leafletOutput("map")
)),
hr()
)
server <- function(input, output) {
data <- reactive({
# refresh every 30 seconds
invalidateLater(30000)
# get the real-time data, for the current minute
res <- lamp_posts_data() %>%
mutate_at(vars(hk1980_northing, hk1980_easting, value), as.numeric)
lat_lng <- ConvertCoordinates(res$hk1980_easting, res$hk1980_northing)
res$lng <- lat_lng[,"easting"]
res$lat <- lat_lng[,"northing"]
res
})
lamp_posts <- reactive({
data() %>%
select(-id, -value) %>%
unique() %>%
mutate(label = paste0("<b>", fullname_en, "</b><br>\n",
update_date, "<br>",
"lat: ", lat, "<br>",
"lng: ", lng, "<br>"))
})
output$update_date <- renderText(max(data()$update_date))
# with clickable marker
# https://www.r-graph-gallery.com/4-tricks-for-working-with-r-leaflet-and-shiny/
# create a reactive value that will store the click position
data_of_click <- reactiveValues(clickedMarker=NULL)
output$map <- renderLeaflet({
data_pol <- data() %>% filter(id == input$datum)
pal <- colorNumeric(palette = c("green", "red"), domain = data_pol$value)
leaflet() %>%
addTiles() %>%
addMarkers(data = lamp_posts(),
lng = ~lng, lat = ~lat,
layerId = ~name, popup = ~label) %>%
# color pallete
# https://stackoverflow.com/questions/48002096/how-to-change-circle-colours-in-leaflet-based-on-a-variable
addCircles(lng = data_pol$lng, lat = data_pol$lat,
opacity = 0.9, fillOpacity = 0.5,
weight = 1, radius = 100, color = pal(data_pol$value))
})
# store the click
observeEvent(input$map_marker_click,{
data_of_click$clickedMarker <- input$map_marker_click
})
output$lamp_post <- DT::renderDataTable({
data() %>%
filter(name == data_of_click$clickedMarker$id) %>%
inner_join(id_names, by = "id") %>%
select(datum, value)
})
}
shinyApp(ui, server)
| /inst/lamp_post_data/app.R | no_license | XiangdongGu/hkdata | R | false | false | 5,711 | r | library(shiny)
library(tidyverse)
library(hkdata)
library(DT)
library(leaflet)
library(rgdal)
# for converting the northing and easting into lon
# https://stackoverflow.com/questions/36520915/converting-utms-to-lat-long-in-r
# https://medium.com/@eric_hk/dcca-boundary-map-99edb31b62ca
wgs84 = "+init=epsg:4326"
hk1980 = "+init=epsg:2326"
ConvertCoordinates <- function(easting,northing) {
out = cbind(easting,northing)
mask = !is.na(easting)
sp <- sp::spTransform(sp::SpatialPoints(list(easting[mask],northing[mask]),
proj4string=sp::CRS(hk1980)),
sp::CRS(wgs84))
out[mask,]=sp@coords
out
}
id_names <- tribble(
~id, ~datum,
"no2", "Nitrogen Dioxide (ppb)",
"no", "Nitrogen Monoxide (ppb)",
"o3", "Ozone (ppb)",
"co", "Carbon Monoxide (ppm)",
"radiation", "Ultraviolet Radiation (uW/cm^2)",
"pm10", "PM10 (ug/m^3)",
"pm25", "PM2.5 (ug/m^3)",
"pm1", "PM1 (ug/m^3)",
"temperature_45", "Temperature at 4.5m above Ground (degree Celsius)",
"humidity_45","Relative Humidity (%)",
"temperature_2", "Temperature at 2m above Ground (degree Celsius)",
"humidity_2", "Relative Humidity at 2m above Ground (%)",
"pressure", "Atospheric Pressure (hPa)",
"windspeed", "Wind Speed (m/s)",
"winddirection", "Wind Direction (bearing in degree)",
"vehiclecount_e", "Daily Cumulative Number of Vehicles (Eastbound)",
"vehiclecount_w", "Daily Cumulative Number of Vehicles (Westbound)",
"vehiclecount_in", "Daily Cumulative Number of Vehicles (In)",
"vehiclecount_out", "Daily Cumulative Number of Vehicles (Out)",
"peoplecount", "Daily Cumulative Number of Pedestrians"
)
value_ids <- c("no2", "no", "o3", "co",
"radiation",
"pm10", "pm25", "pm1",
"temperature_45",
"humidity_45",
"temperature_2",
"humidity_2",
"pressure", "windspeed")
value_names <- c("Nitrogen Dioxide (ppb)", "Nitrogen Monoxide (ppb)", "Ozone (ppb)", "Carbon Monoxide (ppm)",
"Ultraviolet Radiation (uW/cm^2)",
"PM10 (ug/m^3)", "PM2.5 (ug/m^3)", "PM1 (ug/m^3)",
"Temperature at 4.5m above Ground (degree Celsius)",
"Relative Humidity (%)",
"Temperature at 2m above Ground (degree Celsius)",
"Relative Humidity at 2m above Ground (%)",
"Atospheric Pressure (hPa)", "Wind Speed (m/s)")
##
actionButton1 <- function(inputId, label, icon = NULL, width = NULL,
status = "default", ...) {
value <- restoreInput(id = inputId, default = NULL)
cs <- sprintf("btn %s action-button", paste0("btn-", status))
tags$button(id = inputId, style = if (!is.null(width))
paste0("width: ", validateCssUnit(width), ";"), type = "button",
class = cs, `data-val` = value,
list(shiny:::validateIcon(icon), label), ...)
}
ui <- fluidPage(
titlePanel("Real-time city data collected by multi-purpose lamp posts in Kowloon East"),
# fluidRow(column(
# 12,
# textAreaInput("address", "Enter Address to Parse", cols = 200, rows = 3),
# actionButton1("submit", "Submit", status = "primary")
# )),
# hr(style = "border: 3px solid green;"),
hr(),
radioButtons("datum", "Show the Value of",
choiceNames = value_names,
choiceValues = value_ids,
inline = TRUE),
hr(),
h5("Refresh every minute."),
textOutput("update_date"),
fluidRow(column(
6,
DT::dataTableOutput("lamp_post")
),
column(
6,
leafletOutput("map")
)),
hr()
)
server <- function(input, output) {
data <- reactive({
# refresh every 30 seconds
invalidateLater(30000)
# get the real-time data, for the current minute
res <- lamp_posts_data() %>%
mutate_at(vars(hk1980_northing, hk1980_easting, value), as.numeric)
lat_lng <- ConvertCoordinates(res$hk1980_easting, res$hk1980_northing)
res$lng <- lat_lng[,"easting"]
res$lat <- lat_lng[,"northing"]
res
})
lamp_posts <- reactive({
data() %>%
select(-id, -value) %>%
unique() %>%
mutate(label = paste0("<b>", fullname_en, "</b><br>\n",
update_date, "<br>",
"lat: ", lat, "<br>",
"lng: ", lng, "<br>"))
})
output$update_date <- renderText(max(data()$update_date))
# with clickable marker
# https://www.r-graph-gallery.com/4-tricks-for-working-with-r-leaflet-and-shiny/
# create a reactive value that will store the click position
data_of_click <- reactiveValues(clickedMarker=NULL)
output$map <- renderLeaflet({
data_pol <- data() %>% filter(id == input$datum)
pal <- colorNumeric(palette = c("green", "red"), domain = data_pol$value)
leaflet() %>%
addTiles() %>%
addMarkers(data = lamp_posts(),
lng = ~lng, lat = ~lat,
layerId = ~name, popup = ~label) %>%
# color pallete
# https://stackoverflow.com/questions/48002096/how-to-change-circle-colours-in-leaflet-based-on-a-variable
addCircles(lng = data_pol$lng, lat = data_pol$lat,
opacity = 0.9, fillOpacity = 0.5,
weight = 1, radius = 100, color = pal(data_pol$value))
})
# store the click
observeEvent(input$map_marker_click,{
data_of_click$clickedMarker <- input$map_marker_click
})
output$lamp_post <- DT::renderDataTable({
data() %>%
filter(name == data_of_click$clickedMarker$id) %>%
inner_join(id_names, by = "id") %>%
select(datum, value)
})
}
shinyApp(ui, server)
|
.libPaths(c("C:/Users/bhanu/Documents/R/win-library/3.3","C:/Program Files/R/R-3.3.2/library"))
library(dplyr)
library(cluster)
library(WRS2)
x<-read.csv("limitval.csv",header = FALSE)
y <- x$V1
#args <- commandArgs(TRUE)
#op <- args[1]
#str(op)
#Robost estimators(resistant to outliers) for dirty data
#Winsorized mean
wm <- winmean(y,0.2)
#median high beak even point
me <- median(y)
#huber M-estimator default K=1.28
hm <- mest(y)
#mean lowest break point
m <- mean(y,0.2)
#mad
val<- cbind(wm,hm,m)
f_val <- round(min(val))
#f_val <- round(hm)
f_val
#plot
png(filename="temp.png", width=500, height=500)
hist(y,col = "ghostwhite",border = "black",prob= TRUE,xlab = "Limit values")
lines(density(y),lwd =2,col="royalblue4")
#plot mean
abline(v = mean(y,0.2),col="green4")
#meadian
abline(v=median(y),col = "red")
#winmean
abline(v = winmean(y,0.1),col = "sienna4" )
#huber m estimator
abline(v = mest(y),col = "slateblue3")
legend(x = "topright",c("Trimmed Mean","Median(L-est)","Winsorized mean","Huber's M-esti"),pch = 1,
col = c("green4","red","sienna4","slateblue3"),lwd = c(2,2,2,2) )
dev.off() | /estimators.R | no_license | bhanu49/Application-for-HRc | R | false | false | 1,112 | r | .libPaths(c("C:/Users/bhanu/Documents/R/win-library/3.3","C:/Program Files/R/R-3.3.2/library"))
library(dplyr)
library(cluster)
library(WRS2)
x<-read.csv("limitval.csv",header = FALSE)
y <- x$V1
#args <- commandArgs(TRUE)
#op <- args[1]
#str(op)
#Robost estimators(resistant to outliers) for dirty data
#Winsorized mean
wm <- winmean(y,0.2)
#median high beak even point
me <- median(y)
#huber M-estimator default K=1.28
hm <- mest(y)
#mean lowest break point
m <- mean(y,0.2)
#mad
val<- cbind(wm,hm,m)
f_val <- round(min(val))
#f_val <- round(hm)
f_val
#plot
png(filename="temp.png", width=500, height=500)
hist(y,col = "ghostwhite",border = "black",prob= TRUE,xlab = "Limit values")
lines(density(y),lwd =2,col="royalblue4")
#plot mean
abline(v = mean(y,0.2),col="green4")
#meadian
abline(v=median(y),col = "red")
#winmean
abline(v = winmean(y,0.1),col = "sienna4" )
#huber m estimator
abline(v = mest(y),col = "slateblue3")
legend(x = "topright",c("Trimmed Mean","Median(L-est)","Winsorized mean","Huber's M-esti"),pch = 1,
col = c("green4","red","sienna4","slateblue3"),lwd = c(2,2,2,2) )
dev.off() |
#MIT License
#Copyright (c) 2021 Octavio Gonzalez-Lugo
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
#@author: Octavio Gonzalez-Lugo
###############################################################################
# Loading packages
###############################################################################
library(deSolve)
library(ggplot2)
library(gridExtra)
library(grid)
library(lattice)
###############################################################################
# Plot Functions
###############################################################################
MakeActionPotentialPlot<-function(inputData,PlotTitle){
#Returns a ggplot
#inputData -> model data
#ColumNames -> Names for the columns in the data
#PlotTitle -> Title for the plot
ModelData<-inputData
graphActionPotential<-ggplot(data=ModelData,aes(x=Time,y=ActionPotential,color="ActionPotential"))+geom_line()+
labs(title="Action Potential")+
scale_color_manual(values=c("ActionPotential"="red"))+
theme(axis.title.y = element_blank(),axis.text.y = element_text(),
axis.ticks.y = element_blank(),axis.text.x = element_blank(),
axis.ticks.x = element_blank(),legend.position = "none")
graphPotassiumGating<-ggplot(data=ModelData,aes(x=Time,y=PotassiumGating,color="PotassiumGating"))+geom_line()+
labs(title="Potassium Gating")+
scale_color_manual(values=c("PotassiumGating"="blue"))+
theme(axis.title.y = element_blank(),axis.text.y = element_blank(),
axis.ticks.y = element_blank(),axis.text.x = element_blank(),
axis.ticks.x = element_blank(),axis.title.x = element_blank(),
legend.position = "none")
graphSodiumActivation<-ggplot(data=ModelData,aes(x=Time,y=SodiumChannelActivation,color="SodiumChannelActivation"))+geom_line()+
labs(title="Sodium Channel Activation")+
scale_color_manual(values=c("SodiumChannelActivation"="orange"))+
theme(axis.title.y = element_blank(),axis.text.y = element_blank(),
axis.ticks.y = element_blank(),axis.text.x = element_blank(),
axis.ticks.x = element_blank(),axis.title.x = element_blank(),
legend.position = "none")
graphSodiumInactivation<-ggplot(data=ModelData,aes(x=Time,y=SodiumChannelInactivation,color="SodiumChannelInactivation"))+geom_line()+
labs(title="Sodium Channel Inactivation")+
scale_color_manual(values=c("SodiumChannelInactivation"="cyan"))+
theme(axis.title.y = element_blank(),axis.text.y = element_blank(),
axis.ticks.y = element_blank(),axis.text.x = element_blank(),
axis.ticks.x = element_blank(),legend.position = "none")
gridarrange<-rbind(c(1,1,2),c(1,1,3),c(1,1,4))
graphContainer<-grid.arrange(graphActionPotential,graphPotassiumGating,graphSodiumActivation,graphSodiumInactivation, layout_matrix=gridarrange)
show(graphContainer)
}
###############################################################################
#Solver function
###############################################################################
solveModel<- function(Model,InitialConditions,ModelParameters,ColumnNames,MinMax){
#Solves numerically an ODE system model,returns a formated dataframe
#Model -> function, Model to be solved
#InitialConditions -> list, Initial conditions for the ODE system
#ModelParameters -> list, Parameters of the ODE model
#ColumnNames -> list, names of the columns for the dataframe
#MinMax -> bool, controlls if a minmax normalization is applied to the data.
times <- seq(0, 25, by = 0.01)
out <- ode(InitialConditions,times,Model,ModelParameters,method="rk4")
if (MinMax){
dims<-dim(out)
for (k in 2:dims[2]){
out[,k]<-MinMaxNormalization(out[,k])
}
}
ModelData<-data.frame(out)
colnames(ModelData)<-ColumnNames
ModelData
}
###############################################################################
#Models
###############################################################################
AlphaN<-function(v){
up<-0.01*(v+55)
down<-1-exp(-(v+55)/10)
an<-up/down
an
}
BetaN<-function(v){
bn<-0.125*exp(-(v+65)/80)
bn
}
AlphaM<-function(v){
up<-0.1*(v+40)
down<-1-exp(-(v+40)/10)
am<-up/down
am
}
BetaM<-function(v){
bm<-4*exp(-(v+65)/18)
bm
}
AlphaH<-function(v){
ah<-0.07*exp(-(v+65)/20)
ah
}
BetaH<-function(v){
down<-1+exp(-(v+35)/10)
bh<-1/down
bh
}
Im<-function(t,Impulse){
responce<-0
if(t<5 & t>3){
responce<-Impulse
}
else{
responce<-0
}
responce
}
ActionPotentialModel <- function(t,Y,params){
#Simple organic matter decomposition model
#t -> integration time value
#
#Y -> list Values for the function to be evaluated
# Y[1] -> Potassium channel gating
# Y[2] -> Sodium channel activation
# Y[3] -> Sodium channel inactivation
# Y[4] -> Action Potential
#
#params -> Parameters of the ODE system model
# gk -> Postassium (K) maximum conductances
# Vk -> Postassium (K) Nernst reversal potentials
# gna -> Sodium (Na) maximum conductances
# Vna -> Sodium (Na) Nernst reversal potentials
# gl -> Leak maximum conductances
# Vl -> Leak Nernst reversal potentials
# Cm -> membrane capacitance
# imp -> External Current
#
with(as.list(c(Y,params)),{
dndt=AlphaN(Y[4])*(1-Y[1])-BetaN(Y[4])*Y[1]
dmdt=AlphaM(Y[4])*(1-Y[2])-BetaM(Y[4])*Y[2]
dhdt=AlphaH(Y[4])*(1-Y[3])-BetaH(Y[4])*Y[3]
dvdt=(Im(t,imp)-gk*(Y[1]**4)*(Y[4]-Vk)-gna*(Y[2]**3)*Y[3]*(Y[4]-Vna)-gl*(Y[4]-Vl))/Cm
list(c(dndt,dmdt,dhdt,dvdt))
})
}
params <- c(gk=36,Vk=-77,gna=120,Vna=50,gl=0.3,Vl=-54.387,Cm=1,imp=35)
Y <- c(0.32,0.05,0.6,-65)
columnNames<-c("Time","PotassiumGating","SodiumChannelActivation","SodiumChannelInactivation","ActionPotential")
ActionPotentialData<-solveModel(ActionPotentialModel,Y,params,columnNames,FALSE)
MakeActionPotentialPlot(ActionPotentialData,"Action Potential")
###############################################################################
#All or nothing character of the action potential
###############################################################################
k<-1
Impulses<-seq(0, 8, by = 0.1)
maxVoltages<-rep(0,length(Impulses))
for (val in Impulses){
localParams<-c(gk=36,Vk=-77,gna=120,Vna=50,gl=0.3,Vl=-54.387,Cm=1,imp=val)
localData<-solveModel(ActionPotentialModel,Y,localParams,columnNames,FALSE)
maxVoltages[k]<-max(localData$ActionPotential)
k<-k+1
}
IVData<-Container<-matrix(0,length(Impulses),2)
IVData[,1]<-Impulses
IVData[,2]<-maxVoltages
IVData<-data.frame(IVData)
colnames(IVData)<-c("Current","Voltage")
graphIV<-ggplot(data=IVData,aes(x=Current,y=Voltage,color="Voltage"))+geom_point(shape=8)+
labs(title="Action Potential")+
scale_color_manual(values=c("Voltage"="black"))+
theme(axis.text.y = element_text(),axis.ticks.y = element_blank(),
axis.text.x = element_text(),axis.ticks.x = element_blank(),
legend.position = "none")
show(graphIV)
###############################################################################
#Membrane changes
###############################################################################
MakeVoltagePlot<-function(ModelData,Title){
graphActionPotential<-ggplot(data=ModelData,aes(x=Time,y=LowCapacitance,color="LowCapacitance"))+geom_line()+
geom_line(aes(y=NormalCapacitance,color="NormalCapacitance"))+
labs(title=Title,color="")+
scale_color_manual(values=c("LowCapacitance"="red","NormalCapacitance"="black"))+
theme(axis.title.y = element_blank(),axis.text.y = element_text(),
axis.ticks.y = element_blank(),axis.text.x = element_blank(),
axis.ticks.x = element_blank())
show(graphActionPotential)
}
makeComparisonDataFrame <- function(Capacitance,current){
params <- c(gk=36,Vk=-77,gna=120,Vna=50,gl=0.3,Vl=-54.387,Cm=Capacitance[1],imp=current)
Y <- c(0.32,0.05,0.6,-65)
columnNames<-c("Time","PotassiumGating","SodiumChannelActivation","SodiumChannelInactivation","ActionPotential")
lowCapacitance<-solveModel(ActionPotentialModel,Y,params,columnNames,FALSE)
params <- c(gk=36,Vk=-77,gna=120,Vna=50,gl=0.3,Vl=-54.387,Cm=Capacitance[2],imp=current)
Y <- c(0.32,0.05,0.6,-65)
columnNames<-c("Time","PotassiumGating","SodiumChannelActivation","SodiumChannelInactivation","ActionPotential")
normalCapacitance<-solveModel(ActionPotentialModel,Y,params,columnNames,FALSE)
capacitanceDF <- data.frame(lowCapacitance$Time,lowCapacitance$ActionPotential,normalCapacitance$ActionPotential)
colnames(capacitanceDF)<-c("Time","LowCapacitance","NormalCapacitance")
capacitanceDF
}
comparisonCapacitance<- makeComparisonDataFrame(c(0.5,1),35)
MakeVoltagePlot(comparisonCapacitance,"Normal Activation Current")
comparisonImpulse<- makeComparisonDataFrame(c(0.5,1),3.5)
MakeVoltagePlot(comparisonImpulse,"Low Activation Current")
| /Modeling/hh.r | permissive | TavoGLC/DataAnalysisByExample | R | false | false | 10,039 | r | #MIT License
#Copyright (c) 2021 Octavio Gonzalez-Lugo
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
#@author: Octavio Gonzalez-Lugo
###############################################################################
# Loading packages
###############################################################################
library(deSolve)
library(ggplot2)
library(gridExtra)
library(grid)
library(lattice)
###############################################################################
# Plot Functions
###############################################################################
MakeActionPotentialPlot<-function(inputData,PlotTitle){
#Returns a ggplot
#inputData -> model data
#ColumNames -> Names for the columns in the data
#PlotTitle -> Title for the plot
ModelData<-inputData
graphActionPotential<-ggplot(data=ModelData,aes(x=Time,y=ActionPotential,color="ActionPotential"))+geom_line()+
labs(title="Action Potential")+
scale_color_manual(values=c("ActionPotential"="red"))+
theme(axis.title.y = element_blank(),axis.text.y = element_text(),
axis.ticks.y = element_blank(),axis.text.x = element_blank(),
axis.ticks.x = element_blank(),legend.position = "none")
graphPotassiumGating<-ggplot(data=ModelData,aes(x=Time,y=PotassiumGating,color="PotassiumGating"))+geom_line()+
labs(title="Potassium Gating")+
scale_color_manual(values=c("PotassiumGating"="blue"))+
theme(axis.title.y = element_blank(),axis.text.y = element_blank(),
axis.ticks.y = element_blank(),axis.text.x = element_blank(),
axis.ticks.x = element_blank(),axis.title.x = element_blank(),
legend.position = "none")
graphSodiumActivation<-ggplot(data=ModelData,aes(x=Time,y=SodiumChannelActivation,color="SodiumChannelActivation"))+geom_line()+
labs(title="Sodium Channel Activation")+
scale_color_manual(values=c("SodiumChannelActivation"="orange"))+
theme(axis.title.y = element_blank(),axis.text.y = element_blank(),
axis.ticks.y = element_blank(),axis.text.x = element_blank(),
axis.ticks.x = element_blank(),axis.title.x = element_blank(),
legend.position = "none")
graphSodiumInactivation<-ggplot(data=ModelData,aes(x=Time,y=SodiumChannelInactivation,color="SodiumChannelInactivation"))+geom_line()+
labs(title="Sodium Channel Inactivation")+
scale_color_manual(values=c("SodiumChannelInactivation"="cyan"))+
theme(axis.title.y = element_blank(),axis.text.y = element_blank(),
axis.ticks.y = element_blank(),axis.text.x = element_blank(),
axis.ticks.x = element_blank(),legend.position = "none")
gridarrange<-rbind(c(1,1,2),c(1,1,3),c(1,1,4))
graphContainer<-grid.arrange(graphActionPotential,graphPotassiumGating,graphSodiumActivation,graphSodiumInactivation, layout_matrix=gridarrange)
show(graphContainer)
}
###############################################################################
#Solver function
###############################################################################
solveModel<- function(Model,InitialConditions,ModelParameters,ColumnNames,MinMax){
#Solves numerically an ODE system model,returns a formated dataframe
#Model -> function, Model to be solved
#InitialConditions -> list, Initial conditions for the ODE system
#ModelParameters -> list, Parameters of the ODE model
#ColumnNames -> list, names of the columns for the dataframe
#MinMax -> bool, controlls if a minmax normalization is applied to the data.
times <- seq(0, 25, by = 0.01)
out <- ode(InitialConditions,times,Model,ModelParameters,method="rk4")
if (MinMax){
dims<-dim(out)
for (k in 2:dims[2]){
out[,k]<-MinMaxNormalization(out[,k])
}
}
ModelData<-data.frame(out)
colnames(ModelData)<-ColumnNames
ModelData
}
###############################################################################
#Models
###############################################################################
AlphaN<-function(v){
up<-0.01*(v+55)
down<-1-exp(-(v+55)/10)
an<-up/down
an
}
BetaN<-function(v){
bn<-0.125*exp(-(v+65)/80)
bn
}
AlphaM<-function(v){
up<-0.1*(v+40)
down<-1-exp(-(v+40)/10)
am<-up/down
am
}
BetaM<-function(v){
bm<-4*exp(-(v+65)/18)
bm
}
AlphaH<-function(v){
ah<-0.07*exp(-(v+65)/20)
ah
}
BetaH<-function(v){
down<-1+exp(-(v+35)/10)
bh<-1/down
bh
}
Im<-function(t,Impulse){
responce<-0
if(t<5 & t>3){
responce<-Impulse
}
else{
responce<-0
}
responce
}
ActionPotentialModel <- function(t,Y,params){
#Simple organic matter decomposition model
#t -> integration time value
#
#Y -> list Values for the function to be evaluated
# Y[1] -> Potassium channel gating
# Y[2] -> Sodium channel activation
# Y[3] -> Sodium channel inactivation
# Y[4] -> Action Potential
#
#params -> Parameters of the ODE system model
# gk -> Postassium (K) maximum conductances
# Vk -> Postassium (K) Nernst reversal potentials
# gna -> Sodium (Na) maximum conductances
# Vna -> Sodium (Na) Nernst reversal potentials
# gl -> Leak maximum conductances
# Vl -> Leak Nernst reversal potentials
# Cm -> membrane capacitance
# imp -> External Current
#
with(as.list(c(Y,params)),{
dndt=AlphaN(Y[4])*(1-Y[1])-BetaN(Y[4])*Y[1]
dmdt=AlphaM(Y[4])*(1-Y[2])-BetaM(Y[4])*Y[2]
dhdt=AlphaH(Y[4])*(1-Y[3])-BetaH(Y[4])*Y[3]
dvdt=(Im(t,imp)-gk*(Y[1]**4)*(Y[4]-Vk)-gna*(Y[2]**3)*Y[3]*(Y[4]-Vna)-gl*(Y[4]-Vl))/Cm
list(c(dndt,dmdt,dhdt,dvdt))
})
}
params <- c(gk=36,Vk=-77,gna=120,Vna=50,gl=0.3,Vl=-54.387,Cm=1,imp=35)
Y <- c(0.32,0.05,0.6,-65)
columnNames<-c("Time","PotassiumGating","SodiumChannelActivation","SodiumChannelInactivation","ActionPotential")
ActionPotentialData<-solveModel(ActionPotentialModel,Y,params,columnNames,FALSE)
MakeActionPotentialPlot(ActionPotentialData,"Action Potential")
###############################################################################
#All or nothing character of the action potential
###############################################################################
k<-1
Impulses<-seq(0, 8, by = 0.1)
maxVoltages<-rep(0,length(Impulses))
for (val in Impulses){
localParams<-c(gk=36,Vk=-77,gna=120,Vna=50,gl=0.3,Vl=-54.387,Cm=1,imp=val)
localData<-solveModel(ActionPotentialModel,Y,localParams,columnNames,FALSE)
maxVoltages[k]<-max(localData$ActionPotential)
k<-k+1
}
IVData<-Container<-matrix(0,length(Impulses),2)
IVData[,1]<-Impulses
IVData[,2]<-maxVoltages
IVData<-data.frame(IVData)
colnames(IVData)<-c("Current","Voltage")
graphIV<-ggplot(data=IVData,aes(x=Current,y=Voltage,color="Voltage"))+geom_point(shape=8)+
labs(title="Action Potential")+
scale_color_manual(values=c("Voltage"="black"))+
theme(axis.text.y = element_text(),axis.ticks.y = element_blank(),
axis.text.x = element_text(),axis.ticks.x = element_blank(),
legend.position = "none")
show(graphIV)
###############################################################################
#Membrane changes
###############################################################################
MakeVoltagePlot<-function(ModelData,Title){
graphActionPotential<-ggplot(data=ModelData,aes(x=Time,y=LowCapacitance,color="LowCapacitance"))+geom_line()+
geom_line(aes(y=NormalCapacitance,color="NormalCapacitance"))+
labs(title=Title,color="")+
scale_color_manual(values=c("LowCapacitance"="red","NormalCapacitance"="black"))+
theme(axis.title.y = element_blank(),axis.text.y = element_text(),
axis.ticks.y = element_blank(),axis.text.x = element_blank(),
axis.ticks.x = element_blank())
show(graphActionPotential)
}
makeComparisonDataFrame <- function(Capacitance,current){
params <- c(gk=36,Vk=-77,gna=120,Vna=50,gl=0.3,Vl=-54.387,Cm=Capacitance[1],imp=current)
Y <- c(0.32,0.05,0.6,-65)
columnNames<-c("Time","PotassiumGating","SodiumChannelActivation","SodiumChannelInactivation","ActionPotential")
lowCapacitance<-solveModel(ActionPotentialModel,Y,params,columnNames,FALSE)
params <- c(gk=36,Vk=-77,gna=120,Vna=50,gl=0.3,Vl=-54.387,Cm=Capacitance[2],imp=current)
Y <- c(0.32,0.05,0.6,-65)
columnNames<-c("Time","PotassiumGating","SodiumChannelActivation","SodiumChannelInactivation","ActionPotential")
normalCapacitance<-solveModel(ActionPotentialModel,Y,params,columnNames,FALSE)
capacitanceDF <- data.frame(lowCapacitance$Time,lowCapacitance$ActionPotential,normalCapacitance$ActionPotential)
colnames(capacitanceDF)<-c("Time","LowCapacitance","NormalCapacitance")
capacitanceDF
}
comparisonCapacitance<- makeComparisonDataFrame(c(0.5,1),35)
MakeVoltagePlot(comparisonCapacitance,"Normal Activation Current")
comparisonImpulse<- makeComparisonDataFrame(c(0.5,1),3.5)
MakeVoltagePlot(comparisonImpulse,"Low Activation Current")
|
runif.polygon <- function(n, win) {
stopifnot(inherits(win, "sphwin") && win$type=="polygon")
esphere <- sphwin(type="sphere", rad=win$rad)
output <- matrix(, nrow=0, ncol=2)
while((need <- (n - nrow(output))) > 0) {
proposed <- runif.sphere(need, win=esphere)
accept <- in.W(points=proposed, win=win)
if(any(accept))
output <- rbind(output, proposed[accept, , drop=FALSE])
}
if(nrow(output) > n)
output <- output[seq_len(nrow), , drop=FALSE]
return(output)
}
| /R/runif.polygon.R | no_license | baddstats/spherstat | R | false | false | 512 | r | runif.polygon <- function(n, win) {
stopifnot(inherits(win, "sphwin") && win$type=="polygon")
esphere <- sphwin(type="sphere", rad=win$rad)
output <- matrix(, nrow=0, ncol=2)
while((need <- (n - nrow(output))) > 0) {
proposed <- runif.sphere(need, win=esphere)
accept <- in.W(points=proposed, win=win)
if(any(accept))
output <- rbind(output, proposed[accept, , drop=FALSE])
}
if(nrow(output) > n)
output <- output[seq_len(nrow), , drop=FALSE]
return(output)
}
|
#' Update labelled data to last version
#'
#' Labelled data imported with \pkg{haven} version 1.1.2 or before or
#' created with [haven::labelled()] version 1.1.0 or before was using
#' "labelled" and "labelled_spss" classes.
#'
#' Since version 2.0.0 of these two packages, "haven_labelled" and
#' "haven_labelled_spss" are used instead.
#'
#' Since haven 2.3.0, "haven_labelled" class has been evolving
#' using now \pkg{vctrs} package.
#'
#' `update_labelled()` convert labelled vectors
#' from the old to the new classes and to reconstruct all
#' labelled vectors with the last version of the package.
#'
#' @param x An object (vector or data.frame) to convert.
#' @seealso [haven::labelled()], [haven::labelled_spss()]
#' @export
update_labelled <- function(x) {
UseMethod("update_labelled")
}
#' @export
update_labelled.default <- function(x) {
# return x
x
}
#' @rdname update_labelled
#' @export
update_labelled.labelled <- function(x) {
# update only previous labelled class, but not objects from Hmisc
if (!is.null(attr(x, "labels", exact = TRUE))) {
if (is.null(attr(x, "na_values", exact = TRUE)) & is.null(attr(x, "na_range", exact = TRUE))) {
x <- labelled(x, labels = attr(x, "labels", exact = TRUE), label = attr(x, "label", exact = TRUE))
} else {
x <- labelled_spss(
x, na_values = attr(x, "na_values", exact = TRUE), na_range = attr(x, "range", exact = TRUE),
labels = attr(x, "labels", exact = TRUE), label = attr(x, "label", exact = TRUE)
)
}
}
x
}
#' @rdname update_labelled
#' @export
update_labelled.haven_labelled_spss <- function(x) {
labelled_spss(
x, labels = val_labels(x), label = var_label(x),
na_values = na_values(x), na_range = na_range(x)
)
}
#' @rdname update_labelled
#' @export
update_labelled.haven_labelled <- function(x) {
labelled(
x, labels = val_labels(x), label = var_label(x)
)
}
#' @rdname update_labelled
#' @export
update_labelled.data.frame <- function(x) {
x[] <- lapply(x, update_labelled)
x
}
| /R/retrocompatibility.R | no_license | henrydoth/labelled | R | false | false | 2,036 | r | #' Update labelled data to last version
#'
#' Labelled data imported with \pkg{haven} version 1.1.2 or before or
#' created with [haven::labelled()] version 1.1.0 or before was using
#' "labelled" and "labelled_spss" classes.
#'
#' Since version 2.0.0 of these two packages, "haven_labelled" and
#' "haven_labelled_spss" are used instead.
#'
#' Since haven 2.3.0, "haven_labelled" class has been evolving
#' using now \pkg{vctrs} package.
#'
#' `update_labelled()` convert labelled vectors
#' from the old to the new classes and to reconstruct all
#' labelled vectors with the last version of the package.
#'
#' @param x An object (vector or data.frame) to convert.
#' @seealso [haven::labelled()], [haven::labelled_spss()]
#' @export
update_labelled <- function(x) {
UseMethod("update_labelled")
}
#' @export
update_labelled.default <- function(x) {
# return x
x
}
#' @rdname update_labelled
#' @export
update_labelled.labelled <- function(x) {
# update only previous labelled class, but not objects from Hmisc
if (!is.null(attr(x, "labels", exact = TRUE))) {
if (is.null(attr(x, "na_values", exact = TRUE)) & is.null(attr(x, "na_range", exact = TRUE))) {
x <- labelled(x, labels = attr(x, "labels", exact = TRUE), label = attr(x, "label", exact = TRUE))
} else {
x <- labelled_spss(
x, na_values = attr(x, "na_values", exact = TRUE), na_range = attr(x, "range", exact = TRUE),
labels = attr(x, "labels", exact = TRUE), label = attr(x, "label", exact = TRUE)
)
}
}
x
}
#' @rdname update_labelled
#' @export
update_labelled.haven_labelled_spss <- function(x) {
labelled_spss(
x, labels = val_labels(x), label = var_label(x),
na_values = na_values(x), na_range = na_range(x)
)
}
#' @rdname update_labelled
#' @export
update_labelled.haven_labelled <- function(x) {
labelled(
x, labels = val_labels(x), label = var_label(x)
)
}
#' @rdname update_labelled
#' @export
update_labelled.data.frame <- function(x) {
x[] <- lapply(x, update_labelled)
x
}
|
#PROGENy analysis Jurkat WT vs clones
#This code follows the tutorial available here: https://bioc.ism.ac.jp/packages/3.14/bioc/vignettes/progeny/inst/doc/progenyBulk.html
#PROGENy is available here: https://saezlab.github.io/progeny/
#This script calculates PROGENy pathway scores for all pathways included in the database
#Input: Normalised expression dataframe from Jurkat_wt_vs_a7_vs_c9.R
#Input: Differential expression analysis output from Jurkat_wt_vs_clones.R
#Input: Study design
#Output: plot for NES heatmap for all samples across all pathways;
#Output: plot for NES difference for each pathway, comparing WT vs clones
#Output: pathway responsive genes for each pathway - scatterplots and heatmap only for WNT
library(progeny)
library(pheatmap)
library(tidyverse)
library(ggrepel)
library(gridExtra)
library(gplots)
library(RColorBrewer)
#load data
Normalised_counts <- read_csv("normalised_expression_jurkat_wt_vs_a7_c9.csv")
Experimental_design <- read_tsv("study_design_jurkat.txt")
diff_expr <- read_csv("Jurkat_wt_v_clones_all_genes.csv")
Normalised_counts_matrix <- Normalised_counts %>%
dplyr::mutate_if(~ any(is.na(.x)),~ if_else(is.na(.x),0,.x)) %>%
tibble::column_to_rownames(var = "geneID") %>%
as.matrix()
diff_expr_wt_vs_clones_matrix <- diff_expr %>%
dplyr::select(geneID, t) %>%
dplyr::filter(!is.na(t)) %>%
column_to_rownames(var = "geneID") %>%
as.matrix()
PathwayActivity_counts <- progeny(Normalised_counts_matrix, scale=TRUE,
organism="Human", top = 100)
Activity_counts <- as.vector(PathwayActivity_counts)
paletteLength <- 100
myColor <- colorRampPalette(c("blue", "whitesmoke", "red"))(paletteLength)
progenyBreaks <- c(seq(min(Activity_counts), 0,
length.out=ceiling(paletteLength/2) + 1),
seq(max(Activity_counts)/paletteLength,
max(Activity_counts),
length.out=floor(paletteLength/2)))
#heatmap for NES for each samples
progeny_hmap <- pheatmap(t(PathwayActivity_counts),fontsize=14,
fontsize_row = 14, fontsize_col = 14,
color=myColor, breaks = progenyBreaks,
main = "PROGENy (100) - Jurkat - WT vs clones", angle_col = 0,
treeheight_row = 0, border_color = NA)
PathwayActivity_zscore <- progeny(diff_expr_wt_vs_clones_matrix,
scale=TRUE, organism="Human", top = 100, perm = 10000, z_scores = TRUE) %>%
t()
colnames(PathwayActivity_zscore) <- "NES"
PathwayActivity_zscore_df <- as.data.frame(PathwayActivity_zscore) %>%
rownames_to_column(var = "Pathway") %>%
dplyr::arrange(NES) %>%
dplyr::mutate(Pathway = factor(Pathway))
#NES difference for all pathways comparing WT and clones
pathways_plot <- ggplot(PathwayActivity_zscore_df,aes(x = reorder(Pathway, -NES), y = NES)) +
geom_bar(aes(fill = NES), stat = "identity") +
scale_fill_gradient2(low = "blue", high = "red",
mid = "whitesmoke", midpoint = 0) +
theme_minimal(base_size = 18) +
scale_y_continuous(breaks = scales::pretty_breaks(n = 7), limits = c(-4, 4)) +
theme(axis.title = element_text(face = "bold", size = 18),
axis.text.x = element_text(size = 18, face = "bold"),
axis.text.y = element_text(size = 14, face = "bold"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
plot.title = element_text(size = 18)) +
coord_flip() +
#labs(title = "Jurkat WT vs clones - enriched pathways") +
xlab("Pathways")
#library(Cairo)
#ggsave(plot = pathways_plot, file = "./progeny_pathways.pdf",
# width = 6.5, height = 5, dpi = 1000, device = cairo_pdf)
prog_matrix <- getModel("Human", top=100) %>%
as.data.frame() %>%
tibble::rownames_to_column("GeneID")
diff_expr_wt_vs_clones_df <- diff_expr_wt_vs_clones_matrix %>%
as.data.frame() %>%
tibble::rownames_to_column("geneID") %>%
left_join(diff_expr, by = "geneID") %>%
select(c("geneID", "logFC")) %>%
rename(GeneID = geneID)
write.csv(prog_matrix, file = "jurkat_wt_vs_clones_progeny_weights.csv")
scat_plots <- progenyScatter(df = diff_expr_wt_vs_clones_df,
weight_matrix = prog_matrix,
statName = "logFC", verbose = FALSE)
#saving scatterplots for all pathways
for (pathway in names(scat_plots[[1]])) {
png(filename = paste0("progeny_", pathway,
"_jurkat_wt_vs_clones.png"),
width = 750, height = 500)
plot(scat_plots[[1]][[pathway]])
dev.off()
}
#prepare for WNT heatmap
signif_genes <- diff_expr %>%
filter(adj.P.Val < 0.25)
wnt_genes <- prog_matrix %>%
filter(WNT != 0 ) %>%
select(c("GeneID", "WNT")) %>%
filter(GeneID %in% signif_genes$geneID) %>%
rename("geneID" = "GeneID")
merged_wnt_signif_genes <- left_join(wnt_genes, signif_genes)
colnames(Normalised_counts) <- c("geneID", "A7_1", "WT_1", "WT_2", "WT_3", "A7_2", "A7_3", "C9_1", "C9_2", "C9_3")
normalised_counts_wnt <- Normalised_counts %>%
filter(geneID %in% merged_wnt_signif_genes$geneID) %>%
as.data.frame()
rownames(normalised_counts_wnt) <- normalised_counts_wnt$geneID
normalised_counts_wnt <- normalised_counts_wnt %>%
select(-geneID) %>%
as.matrix()
myheatcolours <- rev(brewer.pal(n = 8, name = "RdBu"))
clustRows.jurkat <- hclust(as.dist(1-cor(t(normalised_counts_wnt), method="pearson")), method="complete") #cluster rows by pearson correlation
clustColumns.jurkat <- hclust(as.dist(1-cor(normalised_counts_wnt, method="pearson")), method="complete")
module.assign.jurkat <- wnt_genes %>%
mutate(module = case_when(WNT < 0 ~ 1,
WNT > 0 ~ 2)) %>%
select(-WNT) %>%
deframe()
module.colour.jurkat <- hcl.colors(length(unique(module.assign.jurkat)), palette = "viridis", rev = TRUE)
module.colour.jurkat <- module.colour.jurkat[as.vector(module.assign.jurkat)]
df_labels <- as.data.frame(colnames(normalised_counts_wnt))
df_labels$group <- c("A7", "WT", "WT", "WT", "A7", "A7", "C9", "C9", "C9")
labCol <- df_labels$group[match(colnames(normalised_counts_wnt), df_labels$'colnames(normalised_counts_wnt)') ]
#heatmap
pdf("heatmap_WNT_responsive_genes.pdf", width = 7, height = 4)
heatmap.2(normalised_counts_wnt,
dendrogram = 'column',
Rowv=as.dendrogram(clustRows.jurkat),
Colv=as.dendrogram(clustColumns.jurkat),
#RowSideColors=module.colour.jurkat,
col=myColor, scale='row', srtCol= 360, adjCol = 0.5, #labRow=NA,
density.info="none", trace="none",
cexRow=1, cexCol=1.4, margins=c(4,10), labCol = labCol,
main = "WT vs EZH2-KO\nWNT responsive genes", key.title = NA,
key.xlab = "Expression\nz-score")
dev.off()
| /Progeny_jurkat_wt_vs_clones.R | no_license | cosmintudose/RNASeq_Jurkat_pipeline | R | false | false | 6,835 | r | #PROGENy analysis Jurkat WT vs clones
#This code follows the tutorial available here: https://bioc.ism.ac.jp/packages/3.14/bioc/vignettes/progeny/inst/doc/progenyBulk.html
#PROGENy is available here: https://saezlab.github.io/progeny/
#This script calculates PROGENy pathway scores for all pathways included in the database
#Input: Normalised expression dataframe from Jurkat_wt_vs_a7_vs_c9.R
#Input: Differential expression analysis output from Jurkat_wt_vs_clones.R
#Input: Study design
#Output: plot for NES heatmap for all samples across all pathways;
#Output: plot for NES difference for each pathway, comparing WT vs clones
#Output: pathway responsive genes for each pathway - scatterplots and heatmap only for WNT
library(progeny)
library(pheatmap)
library(tidyverse)
library(ggrepel)
library(gridExtra)
library(gplots)
library(RColorBrewer)
#load data
Normalised_counts <- read_csv("normalised_expression_jurkat_wt_vs_a7_c9.csv")
Experimental_design <- read_tsv("study_design_jurkat.txt")
diff_expr <- read_csv("Jurkat_wt_v_clones_all_genes.csv")
Normalised_counts_matrix <- Normalised_counts %>%
dplyr::mutate_if(~ any(is.na(.x)),~ if_else(is.na(.x),0,.x)) %>%
tibble::column_to_rownames(var = "geneID") %>%
as.matrix()
diff_expr_wt_vs_clones_matrix <- diff_expr %>%
dplyr::select(geneID, t) %>%
dplyr::filter(!is.na(t)) %>%
column_to_rownames(var = "geneID") %>%
as.matrix()
PathwayActivity_counts <- progeny(Normalised_counts_matrix, scale=TRUE,
organism="Human", top = 100)
Activity_counts <- as.vector(PathwayActivity_counts)
paletteLength <- 100
myColor <- colorRampPalette(c("blue", "whitesmoke", "red"))(paletteLength)
progenyBreaks <- c(seq(min(Activity_counts), 0,
length.out=ceiling(paletteLength/2) + 1),
seq(max(Activity_counts)/paletteLength,
max(Activity_counts),
length.out=floor(paletteLength/2)))
#heatmap for NES for each samples
progeny_hmap <- pheatmap(t(PathwayActivity_counts),fontsize=14,
fontsize_row = 14, fontsize_col = 14,
color=myColor, breaks = progenyBreaks,
main = "PROGENy (100) - Jurkat - WT vs clones", angle_col = 0,
treeheight_row = 0, border_color = NA)
PathwayActivity_zscore <- progeny(diff_expr_wt_vs_clones_matrix,
scale=TRUE, organism="Human", top = 100, perm = 10000, z_scores = TRUE) %>%
t()
colnames(PathwayActivity_zscore) <- "NES"
PathwayActivity_zscore_df <- as.data.frame(PathwayActivity_zscore) %>%
rownames_to_column(var = "Pathway") %>%
dplyr::arrange(NES) %>%
dplyr::mutate(Pathway = factor(Pathway))
#NES difference for all pathways comparing WT and clones
pathways_plot <- ggplot(PathwayActivity_zscore_df,aes(x = reorder(Pathway, -NES), y = NES)) +
geom_bar(aes(fill = NES), stat = "identity") +
scale_fill_gradient2(low = "blue", high = "red",
mid = "whitesmoke", midpoint = 0) +
theme_minimal(base_size = 18) +
scale_y_continuous(breaks = scales::pretty_breaks(n = 7), limits = c(-4, 4)) +
theme(axis.title = element_text(face = "bold", size = 18),
axis.text.x = element_text(size = 18, face = "bold"),
axis.text.y = element_text(size = 14, face = "bold"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
plot.title = element_text(size = 18)) +
coord_flip() +
#labs(title = "Jurkat WT vs clones - enriched pathways") +
xlab("Pathways")
#library(Cairo)
#ggsave(plot = pathways_plot, file = "./progeny_pathways.pdf",
# width = 6.5, height = 5, dpi = 1000, device = cairo_pdf)
prog_matrix <- getModel("Human", top=100) %>%
as.data.frame() %>%
tibble::rownames_to_column("GeneID")
diff_expr_wt_vs_clones_df <- diff_expr_wt_vs_clones_matrix %>%
as.data.frame() %>%
tibble::rownames_to_column("geneID") %>%
left_join(diff_expr, by = "geneID") %>%
select(c("geneID", "logFC")) %>%
rename(GeneID = geneID)
write.csv(prog_matrix, file = "jurkat_wt_vs_clones_progeny_weights.csv")
scat_plots <- progenyScatter(df = diff_expr_wt_vs_clones_df,
weight_matrix = prog_matrix,
statName = "logFC", verbose = FALSE)
#saving scatterplots for all pathways
for (pathway in names(scat_plots[[1]])) {
png(filename = paste0("progeny_", pathway,
"_jurkat_wt_vs_clones.png"),
width = 750, height = 500)
plot(scat_plots[[1]][[pathway]])
dev.off()
}
#prepare for WNT heatmap
signif_genes <- diff_expr %>%
filter(adj.P.Val < 0.25)
wnt_genes <- prog_matrix %>%
filter(WNT != 0 ) %>%
select(c("GeneID", "WNT")) %>%
filter(GeneID %in% signif_genes$geneID) %>%
rename("geneID" = "GeneID")
merged_wnt_signif_genes <- left_join(wnt_genes, signif_genes)
colnames(Normalised_counts) <- c("geneID", "A7_1", "WT_1", "WT_2", "WT_3", "A7_2", "A7_3", "C9_1", "C9_2", "C9_3")
normalised_counts_wnt <- Normalised_counts %>%
filter(geneID %in% merged_wnt_signif_genes$geneID) %>%
as.data.frame()
rownames(normalised_counts_wnt) <- normalised_counts_wnt$geneID
normalised_counts_wnt <- normalised_counts_wnt %>%
select(-geneID) %>%
as.matrix()
myheatcolours <- rev(brewer.pal(n = 8, name = "RdBu"))
clustRows.jurkat <- hclust(as.dist(1-cor(t(normalised_counts_wnt), method="pearson")), method="complete") #cluster rows by pearson correlation
clustColumns.jurkat <- hclust(as.dist(1-cor(normalised_counts_wnt, method="pearson")), method="complete")
module.assign.jurkat <- wnt_genes %>%
mutate(module = case_when(WNT < 0 ~ 1,
WNT > 0 ~ 2)) %>%
select(-WNT) %>%
deframe()
module.colour.jurkat <- hcl.colors(length(unique(module.assign.jurkat)), palette = "viridis", rev = TRUE)
module.colour.jurkat <- module.colour.jurkat[as.vector(module.assign.jurkat)]
df_labels <- as.data.frame(colnames(normalised_counts_wnt))
df_labels$group <- c("A7", "WT", "WT", "WT", "A7", "A7", "C9", "C9", "C9")
labCol <- df_labels$group[match(colnames(normalised_counts_wnt), df_labels$'colnames(normalised_counts_wnt)') ]
#heatmap
pdf("heatmap_WNT_responsive_genes.pdf", width = 7, height = 4)
heatmap.2(normalised_counts_wnt,
dendrogram = 'column',
Rowv=as.dendrogram(clustRows.jurkat),
Colv=as.dendrogram(clustColumns.jurkat),
#RowSideColors=module.colour.jurkat,
col=myColor, scale='row', srtCol= 360, adjCol = 0.5, #labRow=NA,
density.info="none", trace="none",
cexRow=1, cexCol=1.4, margins=c(4,10), labCol = labCol,
main = "WT vs EZH2-KO\nWNT responsive genes", key.title = NA,
key.xlab = "Expression\nz-score")
dev.off()
|
#*************************************************
# FP-GROWTH - SPARK FRAMEWORK
#*************************************************
# En este script se usa el algoritmo de obtencion de reglas FP-GROWTH
# El motivo es realizar una comparación con los resultados obtenidos en APRIORI
#Iniciamos sesión en Spark
if (nchar(Sys.getenv("SPARK_HOME")) < 1) {
Sys.setenv(SPARK_HOME = "/Users/joseadiazg/spark-2.2.0-bin-hadoop2.7")
}
library(SparkR, lib.loc = c(file.path(Sys.getenv("SPARK_HOME"), "R", "lib")))
sparkR.session(master = "local[*]", sparkConfig = list(spark.driver.memory = "7g"))
#Creamos un dataset para que FP-GROWTH pueda entenderlo
#No podemos tener items repetidos en una transaccion, por lo que haremos una nueva versión
items <- strsplit(as.character(finalCorpus$content), " ")
reduce_row = function(i) {
split = strsplit(i, split=" ")
paste(unique(split[[1]]), collapse = " ")
}
itemsUnique<-lapply(finalCorpus$content[1:length(finalCorpus$content)],reduce_row)
length(itemsUnique)
listUnique<-strsplit(as.character(itemsUnique[1:length(itemsUnique)]), split=" ")
for (i in 1:length(listUnique))
{
listUnique[[i]]<-fusion(listUnique[[i]], "ben", "simmons")
listUnique[[i]]<-fusion(listUnique[[i]], "donald", "trump")
listUnique[[i]]<-fusion(listUnique[[i]], "hillary", "clinton")
listUnique[[i]]<-fusion(listUnique[[i]], "bill", "clinton")
listUnique[[i]]<-fusion(listUnique[[i]], "barack", "obama")
listUnique[[i]]<-fusion(listUnique[[i]], "justin", "bieber")
listUnique[[i]]<-fusion(listUnique[[i]], "bernie", "sanders")
listUnique[[i]]<-fusion(listUnique[[i]], "ted", "cruz")
print(i)
}
#Ya tenemos items únicos, ahora los pasamos a lista de elementos
lapply(listUnique, write, "test.txt", append=TRUE, ncolumns=1000)
#Vamos a crear el tipo de datos para Spark Fp-Growth
raw_data <- read.df(
"./test.txt",
source = "csv",
schema = structType(structField("raw_items", "string")))
data <- selectExpr(raw_data, "split(raw_items, ' ') as items")
# Vamos a probar como se comporta el algoritmo para los valores de soporte de: 0.01, 0.001, 0.0001
t <- proc.time() # Inicia el cronómetro
fpm <- spark.fpGrowth(data, itemsCol="items", minSupport=0.0001, minConfidence=0.7)
association_rules <- spark.associationRules(fpm)
proc.time()-t # Detiene el cronómetro
# Para cada experimento pasamos el dataframe de Spark a DataFrame de R ya que este permite más acciones
ar00001<-collect(association_rules)
object.size(ar0001)
#Obtenemos itemsets frecuentes
frequent_itemsets<-spark.freqItemsets(fpm)
showDF(frequent_itemsets)
# Obtenemos reglas de asociación
association_rules <- spark.associationRules(fpm)
showDF(association_rules)
| /fp-frowth.R | permissive | joseangeldiazg/twitter-text-mining | R | false | false | 2,714 | r | #*************************************************
# FP-GROWTH - SPARK FRAMEWORK
#*************************************************
# En este script se usa el algoritmo de obtencion de reglas FP-GROWTH
# El motivo es realizar una comparación con los resultados obtenidos en APRIORI
#Iniciamos sesión en Spark
if (nchar(Sys.getenv("SPARK_HOME")) < 1) {
Sys.setenv(SPARK_HOME = "/Users/joseadiazg/spark-2.2.0-bin-hadoop2.7")
}
library(SparkR, lib.loc = c(file.path(Sys.getenv("SPARK_HOME"), "R", "lib")))
sparkR.session(master = "local[*]", sparkConfig = list(spark.driver.memory = "7g"))
#Creamos un dataset para que FP-GROWTH pueda entenderlo
#No podemos tener items repetidos en una transaccion, por lo que haremos una nueva versión
items <- strsplit(as.character(finalCorpus$content), " ")
reduce_row = function(i) {
split = strsplit(i, split=" ")
paste(unique(split[[1]]), collapse = " ")
}
itemsUnique<-lapply(finalCorpus$content[1:length(finalCorpus$content)],reduce_row)
length(itemsUnique)
listUnique<-strsplit(as.character(itemsUnique[1:length(itemsUnique)]), split=" ")
for (i in 1:length(listUnique))
{
listUnique[[i]]<-fusion(listUnique[[i]], "ben", "simmons")
listUnique[[i]]<-fusion(listUnique[[i]], "donald", "trump")
listUnique[[i]]<-fusion(listUnique[[i]], "hillary", "clinton")
listUnique[[i]]<-fusion(listUnique[[i]], "bill", "clinton")
listUnique[[i]]<-fusion(listUnique[[i]], "barack", "obama")
listUnique[[i]]<-fusion(listUnique[[i]], "justin", "bieber")
listUnique[[i]]<-fusion(listUnique[[i]], "bernie", "sanders")
listUnique[[i]]<-fusion(listUnique[[i]], "ted", "cruz")
print(i)
}
#Ya tenemos items únicos, ahora los pasamos a lista de elementos
lapply(listUnique, write, "test.txt", append=TRUE, ncolumns=1000)
#Vamos a crear el tipo de datos para Spark Fp-Growth
raw_data <- read.df(
"./test.txt",
source = "csv",
schema = structType(structField("raw_items", "string")))
data <- selectExpr(raw_data, "split(raw_items, ' ') as items")
# Vamos a probar como se comporta el algoritmo para los valores de soporte de: 0.01, 0.001, 0.0001
t <- proc.time() # Inicia el cronómetro
fpm <- spark.fpGrowth(data, itemsCol="items", minSupport=0.0001, minConfidence=0.7)
association_rules <- spark.associationRules(fpm)
proc.time()-t # Detiene el cronómetro
# Para cada experimento pasamos el dataframe de Spark a DataFrame de R ya que este permite más acciones
ar00001<-collect(association_rules)
object.size(ar0001)
#Obtenemos itemsets frecuentes
frequent_itemsets<-spark.freqItemsets(fpm)
showDF(frequent_itemsets)
# Obtenemos reglas de asociación
association_rules <- spark.associationRules(fpm)
showDF(association_rules)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/partners_objects.R
\name{UserOverrides}
\alias{UserOverrides}
\title{UserOverrides Object}
\usage{
UserOverrides(ipAddress = NULL, userId = NULL)
}
\arguments{
\item{ipAddress}{IP address to use instead of the user's geo-located IP address}
\item{userId}{Logged-in user ID to impersonate instead of the user's ID}
}
\value{
UserOverrides object
}
\description{
UserOverrides Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Values to use instead of the user's respective defaults. These are only honored by whitelisted products.
}
| /googlepartnersv2.auto/man/UserOverrides.Rd | permissive | Phippsy/autoGoogleAPI | R | false | true | 649 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/partners_objects.R
\name{UserOverrides}
\alias{UserOverrides}
\title{UserOverrides Object}
\usage{
UserOverrides(ipAddress = NULL, userId = NULL)
}
\arguments{
\item{ipAddress}{IP address to use instead of the user's geo-located IP address}
\item{userId}{Logged-in user ID to impersonate instead of the user's ID}
}
\value{
UserOverrides object
}
\description{
UserOverrides Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Values to use instead of the user's respective defaults. These are only honored by whitelisted products.
}
|
library(here)
library(tidyverse)
library(readxl)
## Extract bacterial qPCR standard curves
## Standard Curve 2016-09-19 ---------------------------------------------------
get_std_0919 <- function(xcl_file){
cols <- c("well","sample_name","conc",
"plate1", "skip5", "plate2", "skip7", "plate3",
paste0("skip", 9:13))
read_excel(path = xcl_file, sheet = "QDNA_20160919",
skip = 3, col_names = cols, na = "Undetermined",) %>%
select(-starts_with("skip")) %>%
filter(sample_name %in% paste0("Std",1:7)) %>%
gather("plate","Ct",-well, -sample_name, -conc) %>%
mutate(conc = as.numeric(conc), Ct = as.numeric(Ct),
std = "zymo", date = "2016-09-19")
}
## Standard Curve 2016-09-19 ---------------------------------------------------
get_std_1209 <- function(xcl_file){
basic_cols <- c("well", "sample_name", "conc",
"plate1","plate2", "plate3")
cols <- c("well", "sample_name", "conc",
"plate1","skip5", "plate2", "skip7", "plate3", "skip9")
full_cols <- c(paste("shan",cols,sep = "_"), "skip10",
paste("zymo",cols,sep = "_"))
bac_std <-
read_excel(path = xcl_file,
sheet = "ReDo_QDNA_20161209",skip = 3,
na = "Undetermined", col_names = full_cols) %>%
filter(shan_sample_name %in% paste0("Std",1:7)) %>%
select(-contains("skip"))
shan_std <- bac_std %>% select(starts_with("shan")) %>%
set_colnames(basic_cols) %>% mutate(std = "shan")
bac_std <- bac_std %>% select(starts_with("zymo")) %>%
set_colnames(basic_cols) %>% mutate(std = "zymo") %>%
bind_rows(shan_std)
bac_std %>% gather("plate","Ct",-well, -sample_name, -conc, -std) %>%
mutate(conc = as.numeric(conc), Ct = as.numeric(Ct), date = "2016-12-09")
}
## Generating full dataset and caching
qpcrBacStd <- here("data","raw","MixStudy_Nate_20161209.xls") %>%
{full_join(get_std_0919(.), get_std_1209(.))}
saveRDS(qpcrBacStd, here("data","qpcrBacStd.RDS")) | /secondary_analysis/qpcrBacStd.R | no_license | nate-d-olson/abundance_assessment | R | false | false | 2,240 | r | library(here)
library(tidyverse)
library(readxl)
## Extract bacterial qPCR standard curves
## Standard Curve 2016-09-19 ---------------------------------------------------
get_std_0919 <- function(xcl_file){
cols <- c("well","sample_name","conc",
"plate1", "skip5", "plate2", "skip7", "plate3",
paste0("skip", 9:13))
read_excel(path = xcl_file, sheet = "QDNA_20160919",
skip = 3, col_names = cols, na = "Undetermined",) %>%
select(-starts_with("skip")) %>%
filter(sample_name %in% paste0("Std",1:7)) %>%
gather("plate","Ct",-well, -sample_name, -conc) %>%
mutate(conc = as.numeric(conc), Ct = as.numeric(Ct),
std = "zymo", date = "2016-09-19")
}
## Standard Curve 2016-09-19 ---------------------------------------------------
get_std_1209 <- function(xcl_file){
basic_cols <- c("well", "sample_name", "conc",
"plate1","plate2", "plate3")
cols <- c("well", "sample_name", "conc",
"plate1","skip5", "plate2", "skip7", "plate3", "skip9")
full_cols <- c(paste("shan",cols,sep = "_"), "skip10",
paste("zymo",cols,sep = "_"))
bac_std <-
read_excel(path = xcl_file,
sheet = "ReDo_QDNA_20161209",skip = 3,
na = "Undetermined", col_names = full_cols) %>%
filter(shan_sample_name %in% paste0("Std",1:7)) %>%
select(-contains("skip"))
shan_std <- bac_std %>% select(starts_with("shan")) %>%
set_colnames(basic_cols) %>% mutate(std = "shan")
bac_std <- bac_std %>% select(starts_with("zymo")) %>%
set_colnames(basic_cols) %>% mutate(std = "zymo") %>%
bind_rows(shan_std)
bac_std %>% gather("plate","Ct",-well, -sample_name, -conc, -std) %>%
mutate(conc = as.numeric(conc), Ct = as.numeric(Ct), date = "2016-12-09")
}
## Generating full dataset and caching
qpcrBacStd <- here("data","raw","MixStudy_Nate_20161209.xls") %>%
{full_join(get_std_0919(.), get_std_1209(.))}
saveRDS(qpcrBacStd, here("data","qpcrBacStd.RDS")) |
## Summary
This assigment aims and goals at furnishing the R code required for plotting 4 pre-defined plots
## Plot 1
Reading the file
Reading, naming and subsetting power consumption data
```{r}
filepath<-"C:/Users/new/Downloads/exdata_data_household_power_consumption/household_power_consumption.txt"
power <- read.table(filepath,skip=1,sep=";")
names(power) <- c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3")
subpower <- subset(power,power$Date=="1/2/2007" | power$Date =="2/2/2007")
```
This code Calling the basic plot function
```{r}
hist(as.numeric(as.character(subpower$Global_active_power)),col="red",main="Global Active Power",xlab="Global Active Power(kilowatts)")
```
This code annotating graph
```{r}
#title(main="Global Active Power")
``` | /Plot1.R | no_license | dans515c/ExData_Plotting1 | R | false | false | 865 | r | ## Summary
This assigment aims and goals at furnishing the R code required for plotting 4 pre-defined plots
## Plot 1
Reading the file
Reading, naming and subsetting power consumption data
```{r}
filepath<-"C:/Users/new/Downloads/exdata_data_household_power_consumption/household_power_consumption.txt"
power <- read.table(filepath,skip=1,sep=";")
names(power) <- c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3")
subpower <- subset(power,power$Date=="1/2/2007" | power$Date =="2/2/2007")
```
This code Calling the basic plot function
```{r}
hist(as.numeric(as.character(subpower$Global_active_power)),col="red",main="Global Active Power",xlab="Global Active Power(kilowatts)")
```
This code annotating graph
```{r}
#title(main="Global Active Power")
``` |
test_that("dm_disambiguate_cols() works as intended", {
# FIXME: solve issue #330
skip_if_src("postgres")
expect_equivalent_dm(
expect_message(dm_disambiguate_cols(dm_for_disambiguate())),
dm_for_disambiguate_2()
)
})
| /tests/testthat/test-disambiguate.R | permissive | jawond/dm | R | false | false | 234 | r | test_that("dm_disambiguate_cols() works as intended", {
# FIXME: solve issue #330
skip_if_src("postgres")
expect_equivalent_dm(
expect_message(dm_disambiguate_cols(dm_for_disambiguate())),
dm_for_disambiguate_2()
)
})
|
# Title: Various gene selection methods.
#
# Author: M. Slawski, adapted from A-L Boulesteix
# Email: <Martin.Slawski@campus.lmu.de>
# Date of creation: 26.9.2007
#
# Brief description:
# Returns an object of class 'GeneSel'.
#
# Arguments:
# -X: matrix of variables (rows are observations,columns are variables)
# -y: survival response of class Surv
# -f: formula for survival model
# -LearningSets: output-object of method GenerateLearningsets
# (splits of dataset into training/test sets)
# -method: variable selection method to be used
# -trace: print some additional information
# -criterion: pvalue or coefficients, which shall be returned by
# the filter method?
#
# Value:
# GeneSel
#
###############################################################################
setGeneric("geneSelection", function(X, y, ...)
standardGeneric("geneSelection"))
setMethod("geneSelection", signature(X = "data.frame", y = "Surv"),
function(X, y, LearningSets, method = c("fastCox"),
criterion = c("coefficient"), trace = TRUE, ...) {
nrx <- nrow(X)
if (nrx != nrow(y))
stop("Number of rows of 'X' must agree with length of y \n")
tempcall <- as.character(match.call())
if (missing(LearningSets)) {
warning("Argument 'LearningSets' is missing; set to a row vector
with entries '1:nrow(X)' \n")
learnmatrix <- matrix(1:nrx, ncol = nrx)
} else {
learnmatrix <- LearningSets@learnmatrix
if (ncol(learnmatrix) > nrx)
stop("'LearningSets' do not match the input data \n")
}
niter <- nrow(learnmatrix)
p <- ncol(X)
outrankings <- outimportance <- matrix(nrow = niter, ncol = p)
rankings <- importance <- matrix(0, niter, p)
selfun <- switch(method, fastCox = fastCox,
stop("Invalid 'method' specified\n"))
for (i in 1:niter) {
if (trace)
cat("geneSelection: iteration", i, "\n")
outporg <- selfun(X, y, learnind = learnmatrix[i, ],
criterion = criterion, ...)
outp <- outporg@varsel
decr <- outporg@criterion != "pvalue"
outrankings[i, ] <- ord <- order(outp, decreasing = decr)
outimportance[i, ] <- outp[ord]
}
colnames(outrankings) <- paste("rank", 1:p, sep = "")
colnames(outimportance) <- paste("gene", ord, sep = "")
rownames(outrankings) <- rownames(outimportance) <-
paste("iter.", 1:niter, sep = "")
rankings <- importance <- list()
rankings[[1]] <- outrankings
importance[[1]] <- outimportance
new("GeneSel", rankings = rankings, importance = importance,
method = method, criterion = criterion)
})
setMethod("geneSelection", signature(X = "ExpressionSet", y = "character"),
function(X, y, ... ) {
Xdat <- as.data.frame(t(exprs(X)), check.names = FALSE)
geneSelection(X = Xdat, y = .fetchyFromEset(X,y), ...)
})
setMethod("geneSelection", signature(X = "ExpressionSet", y = "Surv"),
function(X, y,...) {
geneSelection(X = as.data.frame(t(exprs(X)), check.names = FALSE),
y = y, ... )})
setMethod("geneSelection", signature(X = "matrix", y = "Surv"), function(X, y,
...) {
geneSelection(X = as.data.frame(X, check.names = FALSE),
y = y, ... )
}) | /Code/surv/geneSelection.R | no_license | mywanuo/PLPPS-pipeline | R | false | false | 3,915 | r | # Title: Various gene selection methods.
#
# Author: M. Slawski, adapted from A-L Boulesteix
# Email: <Martin.Slawski@campus.lmu.de>
# Date of creation: 26.9.2007
#
# Brief description:
# Returns an object of class 'GeneSel'.
#
# Arguments:
# -X: matrix of variables (rows are observations,columns are variables)
# -y: survival response of class Surv
# -f: formula for survival model
# -LearningSets: output-object of method GenerateLearningsets
# (splits of dataset into training/test sets)
# -method: variable selection method to be used
# -trace: print some additional information
# -criterion: pvalue or coefficients, which shall be returned by
# the filter method?
#
# Value:
# GeneSel
#
###############################################################################
setGeneric("geneSelection", function(X, y, ...)
standardGeneric("geneSelection"))
setMethod("geneSelection", signature(X = "data.frame", y = "Surv"),
function(X, y, LearningSets, method = c("fastCox"),
criterion = c("coefficient"), trace = TRUE, ...) {
nrx <- nrow(X)
if (nrx != nrow(y))
stop("Number of rows of 'X' must agree with length of y \n")
tempcall <- as.character(match.call())
if (missing(LearningSets)) {
warning("Argument 'LearningSets' is missing; set to a row vector
with entries '1:nrow(X)' \n")
learnmatrix <- matrix(1:nrx, ncol = nrx)
} else {
learnmatrix <- LearningSets@learnmatrix
if (ncol(learnmatrix) > nrx)
stop("'LearningSets' do not match the input data \n")
}
niter <- nrow(learnmatrix)
p <- ncol(X)
outrankings <- outimportance <- matrix(nrow = niter, ncol = p)
rankings <- importance <- matrix(0, niter, p)
selfun <- switch(method, fastCox = fastCox,
stop("Invalid 'method' specified\n"))
for (i in 1:niter) {
if (trace)
cat("geneSelection: iteration", i, "\n")
outporg <- selfun(X, y, learnind = learnmatrix[i, ],
criterion = criterion, ...)
outp <- outporg@varsel
decr <- outporg@criterion != "pvalue"
outrankings[i, ] <- ord <- order(outp, decreasing = decr)
outimportance[i, ] <- outp[ord]
}
colnames(outrankings) <- paste("rank", 1:p, sep = "")
colnames(outimportance) <- paste("gene", ord, sep = "")
rownames(outrankings) <- rownames(outimportance) <-
paste("iter.", 1:niter, sep = "")
rankings <- importance <- list()
rankings[[1]] <- outrankings
importance[[1]] <- outimportance
new("GeneSel", rankings = rankings, importance = importance,
method = method, criterion = criterion)
})
setMethod("geneSelection", signature(X = "ExpressionSet", y = "character"),
function(X, y, ... ) {
Xdat <- as.data.frame(t(exprs(X)), check.names = FALSE)
geneSelection(X = Xdat, y = .fetchyFromEset(X,y), ...)
})
setMethod("geneSelection", signature(X = "ExpressionSet", y = "Surv"),
function(X, y,...) {
geneSelection(X = as.data.frame(t(exprs(X)), check.names = FALSE),
y = y, ... )})
setMethod("geneSelection", signature(X = "matrix", y = "Surv"), function(X, y,
...) {
geneSelection(X = as.data.frame(X, check.names = FALSE),
y = y, ... )
}) |
#' Extracts and computes information criteria and fits statistics for kfold
#' cross validated partial least squares beta regression models
#'
#' This function extracts and computes information criteria and fits statistics
#' for kfold cross validated partial least squares beta regression models for
#' both formula or classic specifications of the model.
#'
#' The Mclassed option should only set to \code{TRUE} if the response is
#' binary.
#'
#' @param pls_kfolds an object computed using \code{\link{PLS_beta_kfoldcv}}
#' @param MClassed should number of miss classed be computed
#' @return \item{list}{table of fit statistics for first group partition}
#' \item{list()}{\dots{}} \item{list}{table of fit statistics for last group
#' partition}
#' @author Frédéric Bertrand\cr
#' \email{frederic.bertrand@@utt.fr}\cr
#' \url{https://fbertran.github.io/homepage/}
#' @seealso \code{\link[plsRglm]{kfolds2coeff}},
#' \code{\link[plsRglm]{kfolds2Pressind}}, \code{\link[plsRglm]{kfolds2Press}},
#' \code{\link[plsRglm]{kfolds2Mclassedind}} and
#' \code{\link[plsRglm]{kfolds2Mclassed}} to extract and transforms results
#' from kfold cross validation.
#' @references Frédéric Bertrand, Nicolas Meyer,
#' Michèle Beau-Faller, Karim El Bayed, Izzie-Jacques Namer,
#' Myriam Maumy-Bertrand (2013). Régression Bêta
#' PLS. \emph{Journal de la Société Française de Statistique},
#' \bold{154}(3):143-159.
#' \url{http://publications-sfds.math.cnrs.fr/index.php/J-SFdS/article/view/215}
#' @keywords models regression
#' @examples
#'
#' \dontrun{
#' data("GasolineYield",package="betareg")
#' bbb <- PLS_beta_kfoldcv_formula(yield~.,data=GasolineYield,nt=3,modele="pls-beta")
#' kfolds2CVinfos_beta(bbb)
#' }
#'
kfolds2CVinfos_beta <- function(pls_kfolds,MClassed=FALSE) {
if(!(match("dataY",names(pls_kfolds$call), 0L)==0L)){
(mf <- pls_kfolds$call)
(m <- match(c("dataY", "dataX", "nt", "limQ2set", "modele", "family", "scaleX", "scaleY", "weights", "method", "sparse", "naive", "link", "link.phi", "type"), names(pls_kfolds$call), 0))
(mf <- mf[c(1, m)])
(mf$typeVC <- "none")
(mf$MClassed <- MClassed)
if (!is.null(mf$family)) {mf$modele <- "pls-glm-family"}
(mf[[1]] <- as.name("PLS_beta"))
(tempres <- eval(mf, parent.frame()))
nt <- as.numeric(as.character(pls_kfolds$call["nt"]))
computed_nt <- tempres$computed_nt
if (MClassed==TRUE) {
Mclassed_kfolds <- kfolds2Mclassed(pls_kfolds)
}
if (as.character(pls_kfolds$call["modele"]) == "pls") {
press_kfolds <- kfolds2Press(pls_kfolds)
Q2cum_2=rep(NA, nt)
CVinfos <- vector("list",length(pls_kfolds[[1]]))
limQ2 <- rep(as.numeric(as.character(pls_kfolds$call["limQ2set"])),computed_nt)
for (nnkk in 1:length(pls_kfolds[[1]])) {
cat(paste("NK:", nnkk, "\n"))
Q2_2 <- 1-press_kfolds[[nnkk]][1:min(length(press_kfolds[[nnkk]]),computed_nt)]/tempres$RSS[1:min(length(press_kfolds[[nnkk]]),computed_nt)]
for (k in 1:min(length(press_kfolds[[nnkk]]),computed_nt)) {Q2cum_2[k] <- prod(press_kfolds[[nnkk]][1:k])/prod(tempres$RSS[1:k])}
Q2cum_2 <- 1 - Q2cum_2
if(length(Q2_2)<computed_nt) {Q2_2 <- c(Q2_2,rep(NA,computed_nt-length(Q2_2)))}
if(length(Q2cum_2)<computed_nt) {Q2_2cum_2 <- c(Q2cum_2,rep(NA,computed_nt-length(Q2cum_2)))}
if(length(press_kfolds[[nnkk]])<computed_nt) {press_kfolds[[nnkk]] <- c(press_kfolds[[nnkk]],rep(NA,computed_nt-length(press_kfolds[[nnkk]])))}
if (MClassed==FALSE) {
CVinfos[[nnkk]] <- t(rbind(tempres$AIC[1:(computed_nt+1)], c(NA,Q2cum_2[1:computed_nt]), c(NA,limQ2[1:computed_nt]), c(NA,Q2_2[1:computed_nt]), c(NA,press_kfolds[[nnkk]][1:computed_nt]), tempres$RSS[1:(computed_nt+1)], c(NA,tempres$R2[1:computed_nt]), tempres$AIC.std[1:(computed_nt+1)]))
dimnames(CVinfos[[nnkk]]) <- list(paste("Nb_Comp_",0:computed_nt,sep=""), c("AIC", "Q2cum_Y", "LimQ2_Y", "Q2_Y", "PRESS_Y", "RSS_Y", "R2_Y", "AIC.std"))
CVinfos[[nnkk]] <- cbind(CVinfos[[nnkk]],tempres$ic.dof)
} else {
CVinfos[[nnkk]] <- t(rbind(tempres$AIC[1:(computed_nt+1)], tempres$MissClassed[1:(computed_nt+1)], c(NA,Mclassed_kfolds[[nnkk]][1:computed_nt]), c(NA,Q2cum_2[1:computed_nt]), c(NA,limQ2[1:computed_nt]), c(NA,Q2_2[1:computed_nt]), c(NA,press_kfolds[[nnkk]][1:computed_nt]), tempres$RSS[1:(computed_nt+1)], c(NA,tempres$R2[1:computed_nt]), tempres$AIC.std[1:(computed_nt+1)]))
dimnames(CVinfos[[nnkk]]) <- list(paste("Nb_Comp_",0:computed_nt,sep=""), c("AIC", "MissClassed", "CV_MissClassed", "Q2cum_Y", "LimQ2_Y", "Q2_Y", "PRESS_Y", "RSS_Y", "R2_Y", "AIC.std"))
CVinfos[[nnkk]] <- cbind(CVinfos[[nnkk]],tempres$ic.dof)
}
}
}
if (as.character(pls_kfolds$call["modele"]) %in% c("pls-glm-family","pls-glm-Gamma","pls-glm-gaussian","pls-glm-inverse.gaussian","pls-glm-logistic","pls-glm-poisson")) {
press_kfolds <- kfolds2Press(pls_kfolds)
preChisq_kfolds <- kfolds2Chisq(pls_kfolds)
Q2Chisqcum_2=rep(NA, nt)
CVinfos <- vector("list",length(pls_kfolds[[1]]))
limQ2 <- rep(as.numeric(as.character(pls_kfolds$call["limQ2set"])),computed_nt)
for (nnkk in 1:length(pls_kfolds[[1]])) {
cat(paste("NK:", nnkk, "\n"))
Q2Chisq_2 <- 1-preChisq_kfolds[[nnkk]][1:min(length(preChisq_kfolds[[nnkk]]),computed_nt)]/tempres$ChisqPearson[1:min(length(preChisq_kfolds[[nnkk]]),computed_nt)]
for (k in 1:min(length(preChisq_kfolds[[nnkk]]),computed_nt)) {Q2Chisqcum_2[k] <- prod(preChisq_kfolds[[nnkk]][1:k])/prod(tempres$ChisqPearson[1:k])}
Q2Chisqcum_2 <- 1 - Q2Chisqcum_2
if(length(Q2Chisq_2)<computed_nt) {Q2Chisq_2 <- c(Q2Chisq_2,rep(NA,computed_nt-length(Q2Chisq_2)))}
if(length(Q2Chisqcum_2)<computed_nt) {Q2Chisqcum_2 <- c(Q2Chisqcum_2,rep(NA,computed_nt-length(Q2Chisqcum_2)))}
if(length(press_kfolds[[nnkk]])<computed_nt) {press_kfolds[[nnkk]] <- c(press_kfolds[[nnkk]],rep(NA,computed_nt-length(press_kfolds[[nnkk]])))}
if (MClassed==FALSE) {
CVinfos[[nnkk]] <- t(rbind(tempres$AIC[1:(computed_nt+1)], tempres$BIC[1:(computed_nt+1)], c(NA,Q2Chisqcum_2[1:computed_nt]), c(NA,limQ2[1:computed_nt]), c(NA,Q2Chisq_2[1:computed_nt]), c(NA,preChisq_kfolds[[nnkk]][1:computed_nt]), tempres$ChisqPearson[1:(computed_nt+1)], tempres$RSS[1:(computed_nt+1)], c(NA,tempres$R2[1:computed_nt])))
dimnames(CVinfos[[nnkk]]) <- list(paste("Nb_Comp_",0:computed_nt,sep=""), c("AIC", "BIC", "Q2Chisqcum_Y", "limQ2", "Q2Chisq_Y", "PREChi2_Pearson_Y", "Chi2_Pearson_Y", "RSS_Y", "R2_Y"))
CVinfos[[nnkk]] <- cbind(CVinfos[[nnkk]],tempres$ic.dof)
} else {
CVinfos[[nnkk]] <- t(rbind(tempres$AIC[1:(computed_nt+1)], tempres$BIC[1:(computed_nt+1)], tempres$MissClassed[1:(computed_nt+1)], c(NA,Mclassed_kfolds[[nnkk]][1:computed_nt]), c(NA,Q2Chisqcum_2[1:computed_nt]), c(NA,limQ2[1:computed_nt]), c(NA,Q2Chisq_2[1:computed_nt]), c(NA,preChisq_kfolds[[nnkk]][1:computed_nt]), tempres$ChisqPearson[1:(computed_nt+1)], tempres$RSS[1:(computed_nt+1)], c(NA,tempres$R2[1:computed_nt])))
dimnames(CVinfos[[nnkk]]) <- list(paste("Nb_Comp_",0:computed_nt,sep=""), c("AIC", "BIC", "MissClassed", "CV_MissClassed", "Q2Chisqcum_Y", "limQ2", "Q2Chisq_Y", "PREChi2_Pearson_Y", "Chi2_Pearson_Y", "RSS_Y", "R2_Y"))
CVinfos[[nnkk]] <- cbind(CVinfos[[nnkk]],tempres$ic.dof)
}
}
}
if (as.character(pls_kfolds$call["modele"]) == "pls-glm-polr") {
preChisq_kfolds <- kfolds2Chisq(pls_kfolds)
Q2Chisqcum_2=rep(NA, nt)
CVinfos <- vector("list",length(pls_kfolds[[1]]))
limQ2 <- rep(as.numeric(as.character(pls_kfolds$call["limQ2set"])),computed_nt)
for (nnkk in 1:length(pls_kfolds[[1]])) {
cat(paste("NK:", nnkk, "\n"))
Q2Chisq_2 <- 1-preChisq_kfolds[[nnkk]][1:min(length(preChisq_kfolds[[nnkk]]),computed_nt)]/tempres$ChisqPearson[1:min(length(preChisq_kfolds[[nnkk]]),computed_nt)]
for (k in 1:min(length(preChisq_kfolds[[nnkk]]),computed_nt)) {Q2Chisqcum_2[k] <- prod(preChisq_kfolds[[nnkk]][1:k])/prod(tempres$ChisqPearson[1:k])}
Q2Chisqcum_2 <- 1 - Q2Chisqcum_2
if(length(Q2Chisq_2)<computed_nt) {Q2Chisq_2 <- c(Q2Chisq_2,rep(NA,computed_nt-length(Q2Chisq_2)))}
if(length(Q2Chisqcum_2)<computed_nt) {Q2Chisqcum_2 <- c(Q2Chisqcum_2,rep(NA,computed_nt-length(Q2Chisqcum_2)))}
if (MClassed==FALSE) {
CVinfos[[nnkk]] <- t(rbind(tempres$AIC[1:(computed_nt+1)], tempres$BIC[1:(computed_nt+1)], c(NA,Q2Chisqcum_2[1:computed_nt]), c(NA,limQ2[1:computed_nt]), c(NA,Q2Chisq_2[1:computed_nt]), c(NA,preChisq_kfolds[[nnkk]][1:computed_nt]), tempres$ChisqPearson[1:(computed_nt+1)]))
dimnames(CVinfos[[nnkk]]) <- list(paste("Nb_Comp_",0:computed_nt,sep=""), c("AIC", "BIC", "Q2Chisqcum_Y", "limQ2", "Q2Chisq_Y", "PREChi2_Pearson_Y", "Chi2_Pearson_Y"))
CVinfos[[nnkk]] <- cbind(CVinfos[[nnkk]],tempres$ic.dof)
} else {
CVinfos[[nnkk]] <- t(rbind(tempres$AIC[1:(computed_nt+1)], tempres$BIC[1:(computed_nt+1)], tempres$MissClassed[1:(computed_nt+1)], c(NA,Mclassed_kfolds[[nnkk]][1:computed_nt]), c(NA,Q2Chisqcum_2[1:computed_nt]), c(NA,limQ2[1:computed_nt]), c(NA,Q2Chisq_2[1:computed_nt]), c(NA,preChisq_kfolds[[nnkk]][1:computed_nt]), tempres$ChisqPearson[1:(computed_nt+1)]))
dimnames(CVinfos[[nnkk]]) <- list(paste("Nb_Comp_",0:computed_nt,sep=""), c("AIC", "BIC", "MissClassed", "CV_MissClassed", "Q2Chisqcum_Y", "limQ2", "Q2Chisq_Y", "PREChi2_Pearson_Y", "Chi2_Pearson_Y"))
CVinfos[[nnkk]] <- cbind(CVinfos[[nnkk]],tempres$ic.dof)
}
}
}
if (as.character(pls_kfolds$call["modele"]) %in% c("pls-beta")) {
press_kfolds <- kfolds2Press(pls_kfolds)
preChisq_kfolds <- kfolds2Chisq(pls_kfolds)
Q2Chisqcum_2=rep(NA, nt)
CVinfos <- vector("list",length(pls_kfolds[[1]]))
limQ2 <- rep(as.numeric(as.character(pls_kfolds$call["limQ2set"])),computed_nt)
for (nnkk in 1:length(pls_kfolds[[1]])) {
cat(paste("NK:", nnkk, "\n"))
Q2Chisq_2 <- 1-preChisq_kfolds[[nnkk]][1:min(length(preChisq_kfolds[[nnkk]]),computed_nt)]/tempres$ChisqPearson[1:min(length(preChisq_kfolds[[nnkk]]),computed_nt)]
for (k in 1:min(length(preChisq_kfolds[[nnkk]]),computed_nt)) {Q2Chisqcum_2[k] <- prod(preChisq_kfolds[[nnkk]][1:k])/prod(tempres$ChisqPearson[1:k])}
Q2Chisqcum_2 <- 1 - Q2Chisqcum_2
if(length(Q2Chisq_2)<computed_nt) {Q2Chisq_2 <- c(Q2Chisq_2,rep(NA,computed_nt-length(Q2Chisq_2)))}
if(length(Q2Chisqcum_2)<computed_nt) {Q2Chisqcum_2 <- c(Q2Chisqcum_2,rep(NA,computed_nt-length(Q2Chisqcum_2)))}
if(length(press_kfolds[[nnkk]])<computed_nt) {press_kfolds[[nnkk]] <- c(press_kfolds[[nnkk]],rep(NA,computed_nt-length(press_kfolds[[nnkk]])))}
if (MClassed==FALSE) {
CVinfos[[nnkk]] <- t(rbind(tempres$AIC[1:(computed_nt+1)], tempres$BIC[1:(computed_nt+1)], c(NA,Q2Chisqcum_2[1:computed_nt]), c(NA,limQ2[1:computed_nt]), c(NA,Q2Chisq_2[1:computed_nt]), c(NA,preChisq_kfolds[[nnkk]][1:computed_nt]), tempres$ChisqPearson[1:(computed_nt+1)], tempres$RSS[1:(computed_nt+1)], c(NA,tempres$pseudo.R2[1:computed_nt]), c(NA,tempres$R2[1:computed_nt])))
dimnames(CVinfos[[nnkk]]) <- list(paste("Nb_Comp_",0:computed_nt,sep=""), c("AIC", "BIC", "Q2Chisqcum_Y", "limQ2", "Q2Chisq_Y", "PREChi2_Pearson_Y", "Chi2_Pearson_Y", "RSS_Y", "pseudo_R2_Y", "R2_Y"))
CVinfos[[nnkk]] <- cbind(CVinfos[[nnkk]],tempres$ic.dof)
} else {
CVinfos[[nnkk]] <- t(rbind(tempres$AIC[1:(computed_nt+1)], tempres$BIC[1:(computed_nt+1)], tempres$MissClassed[1:(computed_nt+1)], c(NA,Mclassed_kfolds[[nnkk]][1:computed_nt]), c(NA,Q2Chisqcum_2[1:computed_nt]), c(NA,limQ2[1:computed_nt]), c(NA,Q2Chisq_2[1:computed_nt]), c(NA,preChisq_kfolds[[nnkk]][1:computed_nt]), tempres$ChisqPearson[1:(computed_nt+1)], tempres$RSS[1:(computed_nt+1)], c(NA,tempres$pseudo.R2[1:computed_nt]), c(NA,tempres$R2[1:computed_nt])))
dimnames(CVinfos[[nnkk]]) <- list(paste("Nb_Comp_",0:computed_nt,sep=""), c("AIC", "BIC", "MissClassed", "CV_MissClassed", "Q2Chisqcum_Y", "limQ2", "Q2Chisq_Y", "PREChi2_Pearson_Y", "Chi2_Pearson_Y", "RSS_Y", "pseudo_R2_Y", "R2_Y"))
CVinfos[[nnkk]] <- cbind(CVinfos[[nnkk]],tempres$ic.dof)
}
}
}
return(CVinfos)
}
if(!(match("formula",names(pls_kfolds$call), 0L)==0L)){
(mf <- pls_kfolds$call)
(m <- match(c("formula", "data", "nt", "limQ2set", "modele", "family", "scaleX", "scaleY", "weights","subset","start","etastart","mustart","offset","control","method","contrasts","method", "sparse", "naive", "link", "link.phi", "type"), names(pls_kfolds$call), 0))
(mf <- mf[c(1, m)])
(mf$typeVC <- "none")
(mf$MClassed <- MClassed)
if (mf$modele %in% c("pls","pls-glm-logistic","pls-glm-Gamma","pls-glm-gaussian","pls-glm-inverse.gaussian","pls-glm-poisson","pls-glm-polr")){mf$family <- NULL}
(mf[[1]] <- as.name("PLS_beta_formula"))
(tempres <- eval(mf, parent.frame()))
nt <- as.numeric(as.character(pls_kfolds$call["nt"]))
computed_nt <- tempres$computed_nt
if (MClassed==TRUE) {
Mclassed_kfolds <- kfolds2Mclassed(pls_kfolds)
}
if (as.character(pls_kfolds$call["modele"]) == "pls") {
press_kfolds <- kfolds2Press(pls_kfolds)
Q2cum_2=rep(NA, nt)
CVinfos <- vector("list",length(pls_kfolds[[1]]))
limQ2 <- rep(as.numeric(as.character(pls_kfolds$call["limQ2set"])),computed_nt)
for (nnkk in 1:length(pls_kfolds[[1]])) {
cat(paste("NK:", nnkk, "\n"))
Q2_2 <- 1-press_kfolds[[nnkk]][1:min(length(press_kfolds[[nnkk]]),computed_nt)]/tempres$RSS[1:min(length(press_kfolds[[nnkk]]),computed_nt)]
for (k in 1:min(length(press_kfolds[[nnkk]]),computed_nt)) {Q2cum_2[k] <- prod(press_kfolds[[nnkk]][1:k])/prod(tempres$RSS[1:k])}
Q2cum_2 <- 1 - Q2cum_2
if(length(Q2_2)<computed_nt) {Q2_2 <- c(Q2_2,rep(NA,computed_nt-length(Q2_2)))}
if(length(Q2cum_2)<computed_nt) {Q2_2cum_2 <- c(Q2cum_2,rep(NA,computed_nt-length(Q2cum_2)))}
if(length(press_kfolds[[nnkk]])<computed_nt) {press_kfolds[[nnkk]] <- c(press_kfolds[[nnkk]],rep(NA,computed_nt-length(press_kfolds[[nnkk]])))}
if (MClassed==FALSE) {
CVinfos[[nnkk]] <- t(rbind(tempres$AIC[1:(computed_nt+1)], c(NA,Q2cum_2[1:computed_nt]), c(NA,limQ2[1:computed_nt]), c(NA,Q2_2[1:computed_nt]), c(NA,press_kfolds[[nnkk]][1:computed_nt]), tempres$RSS[1:(computed_nt+1)], c(NA,tempres$R2[1:computed_nt]), tempres$AIC.std[1:(computed_nt+1)]))
dimnames(CVinfos[[nnkk]]) <- list(paste("Nb_Comp_",0:computed_nt,sep=""), c("AIC", "Q2cum_Y", "LimQ2_Y", "Q2_Y", "PRESS_Y", "RSS_Y", "R2_Y", "AIC.std"))
CVinfos[[nnkk]] <- cbind(CVinfos[[nnkk]],tempres$ic.dof)
} else {
CVinfos[[nnkk]] <- t(rbind(tempres$AIC[1:(computed_nt+1)], tempres$MissClassed[1:(computed_nt+1)], c(NA,Mclassed_kfolds[[nnkk]][1:computed_nt]), c(NA,Q2cum_2[1:computed_nt]), c(NA,limQ2[1:computed_nt]), c(NA,Q2_2[1:computed_nt]), c(NA,press_kfolds[[nnkk]][1:computed_nt]), tempres$RSS[1:(computed_nt+1)], c(NA,tempres$R2[1:computed_nt]), tempres$AIC.std[1:(computed_nt+1)]))
dimnames(CVinfos[[nnkk]]) <- list(paste("Nb_Comp_",0:computed_nt,sep=""), c("AIC", "MissClassed", "CV_MissClassed", "Q2cum_Y", "LimQ2_Y", "Q2_Y", "PRESS_Y", "RSS_Y", "R2_Y", "AIC.std"))
CVinfos[[nnkk]] <- cbind(CVinfos[[nnkk]],tempres$ic.dof)
}
}
}
if (as.character(pls_kfolds$call["modele"]) %in% c("pls-glm-family","pls-glm-Gamma","pls-glm-gaussian","pls-glm-inverse.gaussian","pls-glm-logistic","pls-glm-poisson")) {
press_kfolds <- kfolds2Press(pls_kfolds)
preChisq_kfolds <- kfolds2Chisq(pls_kfolds)
Q2Chisqcum_2=rep(NA, nt)
CVinfos <- vector("list",length(pls_kfolds[[1]]))
limQ2 <- rep(as.numeric(as.character(pls_kfolds$call["limQ2set"])),computed_nt)
for (nnkk in 1:length(pls_kfolds[[1]])) {
cat(paste("NK:", nnkk, "\n"))
Q2Chisq_2 <- 1-preChisq_kfolds[[nnkk]][1:min(length(preChisq_kfolds[[nnkk]]),computed_nt)]/tempres$ChisqPearson[1:min(length(preChisq_kfolds[[nnkk]]),computed_nt)]
for (k in 1:min(length(preChisq_kfolds[[nnkk]]),computed_nt)) {Q2Chisqcum_2[k] <- prod(preChisq_kfolds[[nnkk]][1:k])/prod(tempres$ChisqPearson[1:k])}
Q2Chisqcum_2 <- 1 - Q2Chisqcum_2
if(length(Q2Chisq_2)<computed_nt) {Q2Chisq_2 <- c(Q2Chisq_2,rep(NA,computed_nt-length(Q2Chisq_2)))}
if(length(Q2Chisqcum_2)<computed_nt) {Q2Chisqcum_2 <- c(Q2Chisqcum_2,rep(NA,computed_nt-length(Q2Chisqcum_2)))}
if(length(press_kfolds[[nnkk]])<computed_nt) {press_kfolds[[nnkk]] <- c(press_kfolds[[nnkk]],rep(NA,computed_nt-length(press_kfolds[[nnkk]])))}
if (MClassed==FALSE) {
CVinfos[[nnkk]] <- t(rbind(tempres$AIC[1:(computed_nt+1)], tempres$BIC[1:(computed_nt+1)], c(NA,Q2Chisqcum_2[1:computed_nt]), c(NA,limQ2[1:computed_nt]), c(NA,Q2Chisq_2[1:computed_nt]), c(NA,preChisq_kfolds[[nnkk]][1:computed_nt]), tempres$ChisqPearson[1:(computed_nt+1)], tempres$RSS[1:(computed_nt+1)], c(NA,tempres$R2[1:computed_nt])))
dimnames(CVinfos[[nnkk]]) <- list(paste("Nb_Comp_",0:computed_nt,sep=""), c("AIC", "BIC", "Q2Chisqcum_Y", "limQ2", "Q2Chisq_Y", "PREChi2_Pearson_Y", "Chi2_Pearson_Y", "RSS_Y", "R2_Y"))
CVinfos[[nnkk]] <- cbind(CVinfos[[nnkk]],tempres$ic.dof)
} else {
CVinfos[[nnkk]] <- t(rbind(tempres$AIC[1:(computed_nt+1)], tempres$BIC[1:(computed_nt+1)], tempres$MissClassed[1:(computed_nt+1)], c(NA,Mclassed_kfolds[[nnkk]][1:computed_nt]), c(NA,Q2Chisqcum_2[1:computed_nt]), c(NA,limQ2[1:computed_nt]), c(NA,Q2Chisq_2[1:computed_nt]), c(NA,preChisq_kfolds[[nnkk]][1:computed_nt]), tempres$ChisqPearson[1:(computed_nt+1)], tempres$RSS[1:(computed_nt+1)], c(NA,tempres$R2[1:computed_nt])))
dimnames(CVinfos[[nnkk]]) <- list(paste("Nb_Comp_",0:computed_nt,sep=""), c("AIC", "BIC", "MissClassed", "CV_MissClassed", "Q2Chisqcum_Y", "limQ2", "Q2Chisq_Y", "PREChi2_Pearson_Y", "Chi2_Pearson_Y", "RSS_Y", "R2_Y"))
CVinfos[[nnkk]] <- cbind(CVinfos[[nnkk]],tempres$ic.dof)
}
}
}
if (as.character(pls_kfolds$call["modele"]) == "pls-glm-polr") {
preChisq_kfolds <- kfolds2Chisq(pls_kfolds)
Q2Chisqcum_2=rep(NA, nt)
CVinfos <- vector("list",length(pls_kfolds[[1]]))
limQ2 <- rep(as.numeric(as.character(pls_kfolds$call["limQ2set"])),computed_nt)
for (nnkk in 1:length(pls_kfolds[[1]])) {
cat(paste("NK:", nnkk, "\n"))
Q2Chisq_2 <- 1-preChisq_kfolds[[nnkk]][1:min(length(preChisq_kfolds[[nnkk]]),computed_nt)]/tempres$ChisqPearson[1:min(length(preChisq_kfolds[[nnkk]]),computed_nt)]
for (k in 1:min(length(preChisq_kfolds[[nnkk]]),computed_nt)) {Q2Chisqcum_2[k] <- prod(preChisq_kfolds[[nnkk]][1:k])/prod(tempres$ChisqPearson[1:k])}
Q2Chisqcum_2 <- 1 - Q2Chisqcum_2
if(length(Q2Chisq_2)<computed_nt) {Q2Chisq_2 <- c(Q2Chisq_2,rep(NA,computed_nt-length(Q2Chisq_2)))}
if(length(Q2Chisqcum_2)<computed_nt) {Q2Chisqcum_2 <- c(Q2Chisqcum_2,rep(NA,computed_nt-length(Q2Chisqcum_2)))}
if (MClassed==FALSE) {
CVinfos[[nnkk]] <- t(rbind(tempres$AIC[1:(computed_nt+1)], tempres$BIC[1:(computed_nt+1)], c(NA,Q2Chisqcum_2[1:computed_nt]), c(NA,limQ2[1:computed_nt]), c(NA,Q2Chisq_2[1:computed_nt]), c(NA,preChisq_kfolds[[nnkk]][1:computed_nt]), tempres$ChisqPearson[1:(computed_nt+1)]))
dimnames(CVinfos[[nnkk]]) <- list(paste("Nb_Comp_",0:computed_nt,sep=""), c("AIC", "BIC", "Q2Chisqcum_Y", "limQ2", "Q2Chisq_Y", "PREChi2_Pearson_Y", "Chi2_Pearson_Y"))
CVinfos[[nnkk]] <- cbind(CVinfos[[nnkk]],tempres$ic.dof)
} else {
CVinfos[[nnkk]] <- t(rbind(tempres$AIC[1:(computed_nt+1)], tempres$BIC[1:(computed_nt+1)], tempres$MissClassed[1:(computed_nt+1)], c(NA,Mclassed_kfolds[[nnkk]][1:computed_nt]), c(NA,Q2Chisqcum_2[1:computed_nt]), c(NA,limQ2[1:computed_nt]), c(NA,Q2Chisq_2[1:computed_nt]), c(NA,preChisq_kfolds[[nnkk]][1:computed_nt]), tempres$ChisqPearson[1:(computed_nt+1)]))
dimnames(CVinfos[[nnkk]]) <- list(paste("Nb_Comp_",0:computed_nt,sep=""), c("AIC", "BIC", "MissClassed", "CV_MissClassed", "Q2Chisqcum_Y", "limQ2", "Q2Chisq_Y", "PREChi2_Pearson_Y", "Chi2_Pearson_Y"))
CVinfos[[nnkk]] <- cbind(CVinfos[[nnkk]],tempres$ic.dof)
}
}
}
if (as.character(pls_kfolds$call["modele"]) %in% c("pls-beta")) {
press_kfolds <- kfolds2Press(pls_kfolds)
preChisq_kfolds <- kfolds2Chisq(pls_kfolds)
Q2Chisqcum_2=rep(NA, nt)
CVinfos <- vector("list",length(pls_kfolds[[1]]))
limQ2 <- rep(as.numeric(as.character(pls_kfolds$call["limQ2set"])),computed_nt)
for (nnkk in 1:length(pls_kfolds[[1]])) {
cat(paste("NK:", nnkk, "\n"))
Q2Chisq_2 <- 1-preChisq_kfolds[[nnkk]][1:min(length(preChisq_kfolds[[nnkk]]),computed_nt)]/tempres$ChisqPearson[1:min(length(preChisq_kfolds[[nnkk]]),computed_nt)]
for (k in 1:min(length(preChisq_kfolds[[nnkk]]),computed_nt)) {Q2Chisqcum_2[k] <- prod(preChisq_kfolds[[nnkk]][1:k])/prod(tempres$ChisqPearson[1:k])}
Q2Chisqcum_2 <- 1 - Q2Chisqcum_2
if(length(Q2Chisq_2)<computed_nt) {Q2Chisq_2 <- c(Q2Chisq_2,rep(NA,computed_nt-length(Q2Chisq_2)))}
if(length(Q2Chisqcum_2)<computed_nt) {Q2Chisqcum_2 <- c(Q2Chisqcum_2,rep(NA,computed_nt-length(Q2Chisqcum_2)))}
if(length(press_kfolds[[nnkk]])<computed_nt) {press_kfolds[[nnkk]] <- c(press_kfolds[[nnkk]],rep(NA,computed_nt-length(press_kfolds[[nnkk]])))}
if (MClassed==FALSE) {
CVinfos[[nnkk]] <- t(rbind(tempres$AIC[1:(computed_nt+1)], tempres$BIC[1:(computed_nt+1)], c(NA,Q2Chisqcum_2[1:computed_nt]), c(NA,limQ2[1:computed_nt]), c(NA,Q2Chisq_2[1:computed_nt]), c(NA,preChisq_kfolds[[nnkk]][1:computed_nt]), tempres$ChisqPearson[1:(computed_nt+1)], tempres$RSS[1:(computed_nt+1)], c(NA,tempres$pseudo.R2[1:computed_nt]), c(NA,tempres$R2[1:computed_nt])))
dimnames(CVinfos[[nnkk]]) <- list(paste("Nb_Comp_",0:computed_nt,sep=""), c("AIC", "BIC", "Q2Chisqcum_Y", "limQ2", "Q2Chisq_Y", "PREChi2_Pearson_Y", "Chi2_Pearson_Y", "RSS_Y","pseudo_R2_Y", "R2_Y"))
CVinfos[[nnkk]] <- cbind(CVinfos[[nnkk]],tempres$ic.dof)
} else {
CVinfos[[nnkk]] <- t(rbind(tempres$AIC[1:(computed_nt+1)], tempres$BIC[1:(computed_nt+1)], tempres$MissClassed[1:(computed_nt+1)], c(NA,Mclassed_kfolds[[nnkk]][1:computed_nt]), c(NA,Q2Chisqcum_2[1:computed_nt]), c(NA,limQ2[1:computed_nt]), c(NA,Q2Chisq_2[1:computed_nt]), c(NA,preChisq_kfolds[[nnkk]][1:computed_nt]), tempres$ChisqPearson[1:(computed_nt+1)], tempres$RSS[1:(computed_nt+1)], c(NA,tempres$pseudo.R2[1:computed_nt]), c(NA,tempres$R2[1:computed_nt])))
dimnames(CVinfos[[nnkk]]) <- list(paste("Nb_Comp_",0:computed_nt,sep=""), c("AIC", "BIC", "MissClassed", "CV_MissClassed", "Q2Chisqcum_Y", "limQ2", "Q2Chisq_Y", "PREChi2_Pearson_Y", "Chi2_Pearson_Y", "RSS_Y", "pseudo_R2_Y", "R2_Y"))
CVinfos[[nnkk]] <- cbind(CVinfos[[nnkk]],tempres$ic.dof)
}
}
}
return(CVinfos)
}
}
| /R/kfolds2CVinfos_beta.R | no_license | fbertran/plsRbeta | R | false | false | 22,905 | r | #' Extracts and computes information criteria and fits statistics for kfold
#' cross validated partial least squares beta regression models
#'
#' This function extracts and computes information criteria and fits statistics
#' for kfold cross validated partial least squares beta regression models for
#' both formula or classic specifications of the model.
#'
#' The Mclassed option should only set to \code{TRUE} if the response is
#' binary.
#'
#' @param pls_kfolds an object computed using \code{\link{PLS_beta_kfoldcv}}
#' @param MClassed should number of miss classed be computed
#' @return \item{list}{table of fit statistics for first group partition}
#' \item{list()}{\dots{}} \item{list}{table of fit statistics for last group
#' partition}
#' @author Frédéric Bertrand\cr
#' \email{frederic.bertrand@@utt.fr}\cr
#' \url{https://fbertran.github.io/homepage/}
#' @seealso \code{\link[plsRglm]{kfolds2coeff}},
#' \code{\link[plsRglm]{kfolds2Pressind}}, \code{\link[plsRglm]{kfolds2Press}},
#' \code{\link[plsRglm]{kfolds2Mclassedind}} and
#' \code{\link[plsRglm]{kfolds2Mclassed}} to extract and transforms results
#' from kfold cross validation.
#' @references Frédéric Bertrand, Nicolas Meyer,
#' Michèle Beau-Faller, Karim El Bayed, Izzie-Jacques Namer,
#' Myriam Maumy-Bertrand (2013). Régression Bêta
#' PLS. \emph{Journal de la Société Française de Statistique},
#' \bold{154}(3):143-159.
#' \url{http://publications-sfds.math.cnrs.fr/index.php/J-SFdS/article/view/215}
#' @keywords models regression
#' @examples
#'
#' \dontrun{
#' data("GasolineYield",package="betareg")
#' bbb <- PLS_beta_kfoldcv_formula(yield~.,data=GasolineYield,nt=3,modele="pls-beta")
#' kfolds2CVinfos_beta(bbb)
#' }
#'
kfolds2CVinfos_beta <- function(pls_kfolds,MClassed=FALSE) {
if(!(match("dataY",names(pls_kfolds$call), 0L)==0L)){
(mf <- pls_kfolds$call)
(m <- match(c("dataY", "dataX", "nt", "limQ2set", "modele", "family", "scaleX", "scaleY", "weights", "method", "sparse", "naive", "link", "link.phi", "type"), names(pls_kfolds$call), 0))
(mf <- mf[c(1, m)])
(mf$typeVC <- "none")
(mf$MClassed <- MClassed)
if (!is.null(mf$family)) {mf$modele <- "pls-glm-family"}
(mf[[1]] <- as.name("PLS_beta"))
(tempres <- eval(mf, parent.frame()))
nt <- as.numeric(as.character(pls_kfolds$call["nt"]))
computed_nt <- tempres$computed_nt
if (MClassed==TRUE) {
Mclassed_kfolds <- kfolds2Mclassed(pls_kfolds)
}
if (as.character(pls_kfolds$call["modele"]) == "pls") {
press_kfolds <- kfolds2Press(pls_kfolds)
Q2cum_2=rep(NA, nt)
CVinfos <- vector("list",length(pls_kfolds[[1]]))
limQ2 <- rep(as.numeric(as.character(pls_kfolds$call["limQ2set"])),computed_nt)
for (nnkk in 1:length(pls_kfolds[[1]])) {
cat(paste("NK:", nnkk, "\n"))
Q2_2 <- 1-press_kfolds[[nnkk]][1:min(length(press_kfolds[[nnkk]]),computed_nt)]/tempres$RSS[1:min(length(press_kfolds[[nnkk]]),computed_nt)]
for (k in 1:min(length(press_kfolds[[nnkk]]),computed_nt)) {Q2cum_2[k] <- prod(press_kfolds[[nnkk]][1:k])/prod(tempres$RSS[1:k])}
Q2cum_2 <- 1 - Q2cum_2
if(length(Q2_2)<computed_nt) {Q2_2 <- c(Q2_2,rep(NA,computed_nt-length(Q2_2)))}
if(length(Q2cum_2)<computed_nt) {Q2_2cum_2 <- c(Q2cum_2,rep(NA,computed_nt-length(Q2cum_2)))}
if(length(press_kfolds[[nnkk]])<computed_nt) {press_kfolds[[nnkk]] <- c(press_kfolds[[nnkk]],rep(NA,computed_nt-length(press_kfolds[[nnkk]])))}
if (MClassed==FALSE) {
CVinfos[[nnkk]] <- t(rbind(tempres$AIC[1:(computed_nt+1)], c(NA,Q2cum_2[1:computed_nt]), c(NA,limQ2[1:computed_nt]), c(NA,Q2_2[1:computed_nt]), c(NA,press_kfolds[[nnkk]][1:computed_nt]), tempres$RSS[1:(computed_nt+1)], c(NA,tempres$R2[1:computed_nt]), tempres$AIC.std[1:(computed_nt+1)]))
dimnames(CVinfos[[nnkk]]) <- list(paste("Nb_Comp_",0:computed_nt,sep=""), c("AIC", "Q2cum_Y", "LimQ2_Y", "Q2_Y", "PRESS_Y", "RSS_Y", "R2_Y", "AIC.std"))
CVinfos[[nnkk]] <- cbind(CVinfos[[nnkk]],tempres$ic.dof)
} else {
CVinfos[[nnkk]] <- t(rbind(tempres$AIC[1:(computed_nt+1)], tempres$MissClassed[1:(computed_nt+1)], c(NA,Mclassed_kfolds[[nnkk]][1:computed_nt]), c(NA,Q2cum_2[1:computed_nt]), c(NA,limQ2[1:computed_nt]), c(NA,Q2_2[1:computed_nt]), c(NA,press_kfolds[[nnkk]][1:computed_nt]), tempres$RSS[1:(computed_nt+1)], c(NA,tempres$R2[1:computed_nt]), tempres$AIC.std[1:(computed_nt+1)]))
dimnames(CVinfos[[nnkk]]) <- list(paste("Nb_Comp_",0:computed_nt,sep=""), c("AIC", "MissClassed", "CV_MissClassed", "Q2cum_Y", "LimQ2_Y", "Q2_Y", "PRESS_Y", "RSS_Y", "R2_Y", "AIC.std"))
CVinfos[[nnkk]] <- cbind(CVinfos[[nnkk]],tempres$ic.dof)
}
}
}
if (as.character(pls_kfolds$call["modele"]) %in% c("pls-glm-family","pls-glm-Gamma","pls-glm-gaussian","pls-glm-inverse.gaussian","pls-glm-logistic","pls-glm-poisson")) {
press_kfolds <- kfolds2Press(pls_kfolds)
preChisq_kfolds <- kfolds2Chisq(pls_kfolds)
Q2Chisqcum_2=rep(NA, nt)
CVinfos <- vector("list",length(pls_kfolds[[1]]))
limQ2 <- rep(as.numeric(as.character(pls_kfolds$call["limQ2set"])),computed_nt)
for (nnkk in 1:length(pls_kfolds[[1]])) {
cat(paste("NK:", nnkk, "\n"))
Q2Chisq_2 <- 1-preChisq_kfolds[[nnkk]][1:min(length(preChisq_kfolds[[nnkk]]),computed_nt)]/tempres$ChisqPearson[1:min(length(preChisq_kfolds[[nnkk]]),computed_nt)]
for (k in 1:min(length(preChisq_kfolds[[nnkk]]),computed_nt)) {Q2Chisqcum_2[k] <- prod(preChisq_kfolds[[nnkk]][1:k])/prod(tempres$ChisqPearson[1:k])}
Q2Chisqcum_2 <- 1 - Q2Chisqcum_2
if(length(Q2Chisq_2)<computed_nt) {Q2Chisq_2 <- c(Q2Chisq_2,rep(NA,computed_nt-length(Q2Chisq_2)))}
if(length(Q2Chisqcum_2)<computed_nt) {Q2Chisqcum_2 <- c(Q2Chisqcum_2,rep(NA,computed_nt-length(Q2Chisqcum_2)))}
if(length(press_kfolds[[nnkk]])<computed_nt) {press_kfolds[[nnkk]] <- c(press_kfolds[[nnkk]],rep(NA,computed_nt-length(press_kfolds[[nnkk]])))}
if (MClassed==FALSE) {
CVinfos[[nnkk]] <- t(rbind(tempres$AIC[1:(computed_nt+1)], tempres$BIC[1:(computed_nt+1)], c(NA,Q2Chisqcum_2[1:computed_nt]), c(NA,limQ2[1:computed_nt]), c(NA,Q2Chisq_2[1:computed_nt]), c(NA,preChisq_kfolds[[nnkk]][1:computed_nt]), tempres$ChisqPearson[1:(computed_nt+1)], tempres$RSS[1:(computed_nt+1)], c(NA,tempres$R2[1:computed_nt])))
dimnames(CVinfos[[nnkk]]) <- list(paste("Nb_Comp_",0:computed_nt,sep=""), c("AIC", "BIC", "Q2Chisqcum_Y", "limQ2", "Q2Chisq_Y", "PREChi2_Pearson_Y", "Chi2_Pearson_Y", "RSS_Y", "R2_Y"))
CVinfos[[nnkk]] <- cbind(CVinfos[[nnkk]],tempres$ic.dof)
} else {
CVinfos[[nnkk]] <- t(rbind(tempres$AIC[1:(computed_nt+1)], tempres$BIC[1:(computed_nt+1)], tempres$MissClassed[1:(computed_nt+1)], c(NA,Mclassed_kfolds[[nnkk]][1:computed_nt]), c(NA,Q2Chisqcum_2[1:computed_nt]), c(NA,limQ2[1:computed_nt]), c(NA,Q2Chisq_2[1:computed_nt]), c(NA,preChisq_kfolds[[nnkk]][1:computed_nt]), tempres$ChisqPearson[1:(computed_nt+1)], tempres$RSS[1:(computed_nt+1)], c(NA,tempres$R2[1:computed_nt])))
dimnames(CVinfos[[nnkk]]) <- list(paste("Nb_Comp_",0:computed_nt,sep=""), c("AIC", "BIC", "MissClassed", "CV_MissClassed", "Q2Chisqcum_Y", "limQ2", "Q2Chisq_Y", "PREChi2_Pearson_Y", "Chi2_Pearson_Y", "RSS_Y", "R2_Y"))
CVinfos[[nnkk]] <- cbind(CVinfos[[nnkk]],tempres$ic.dof)
}
}
}
if (as.character(pls_kfolds$call["modele"]) == "pls-glm-polr") {
preChisq_kfolds <- kfolds2Chisq(pls_kfolds)
Q2Chisqcum_2=rep(NA, nt)
CVinfos <- vector("list",length(pls_kfolds[[1]]))
limQ2 <- rep(as.numeric(as.character(pls_kfolds$call["limQ2set"])),computed_nt)
for (nnkk in 1:length(pls_kfolds[[1]])) {
cat(paste("NK:", nnkk, "\n"))
Q2Chisq_2 <- 1-preChisq_kfolds[[nnkk]][1:min(length(preChisq_kfolds[[nnkk]]),computed_nt)]/tempres$ChisqPearson[1:min(length(preChisq_kfolds[[nnkk]]),computed_nt)]
for (k in 1:min(length(preChisq_kfolds[[nnkk]]),computed_nt)) {Q2Chisqcum_2[k] <- prod(preChisq_kfolds[[nnkk]][1:k])/prod(tempres$ChisqPearson[1:k])}
Q2Chisqcum_2 <- 1 - Q2Chisqcum_2
if(length(Q2Chisq_2)<computed_nt) {Q2Chisq_2 <- c(Q2Chisq_2,rep(NA,computed_nt-length(Q2Chisq_2)))}
if(length(Q2Chisqcum_2)<computed_nt) {Q2Chisqcum_2 <- c(Q2Chisqcum_2,rep(NA,computed_nt-length(Q2Chisqcum_2)))}
if (MClassed==FALSE) {
CVinfos[[nnkk]] <- t(rbind(tempres$AIC[1:(computed_nt+1)], tempres$BIC[1:(computed_nt+1)], c(NA,Q2Chisqcum_2[1:computed_nt]), c(NA,limQ2[1:computed_nt]), c(NA,Q2Chisq_2[1:computed_nt]), c(NA,preChisq_kfolds[[nnkk]][1:computed_nt]), tempres$ChisqPearson[1:(computed_nt+1)]))
dimnames(CVinfos[[nnkk]]) <- list(paste("Nb_Comp_",0:computed_nt,sep=""), c("AIC", "BIC", "Q2Chisqcum_Y", "limQ2", "Q2Chisq_Y", "PREChi2_Pearson_Y", "Chi2_Pearson_Y"))
CVinfos[[nnkk]] <- cbind(CVinfos[[nnkk]],tempres$ic.dof)
} else {
CVinfos[[nnkk]] <- t(rbind(tempres$AIC[1:(computed_nt+1)], tempres$BIC[1:(computed_nt+1)], tempres$MissClassed[1:(computed_nt+1)], c(NA,Mclassed_kfolds[[nnkk]][1:computed_nt]), c(NA,Q2Chisqcum_2[1:computed_nt]), c(NA,limQ2[1:computed_nt]), c(NA,Q2Chisq_2[1:computed_nt]), c(NA,preChisq_kfolds[[nnkk]][1:computed_nt]), tempres$ChisqPearson[1:(computed_nt+1)]))
dimnames(CVinfos[[nnkk]]) <- list(paste("Nb_Comp_",0:computed_nt,sep=""), c("AIC", "BIC", "MissClassed", "CV_MissClassed", "Q2Chisqcum_Y", "limQ2", "Q2Chisq_Y", "PREChi2_Pearson_Y", "Chi2_Pearson_Y"))
CVinfos[[nnkk]] <- cbind(CVinfos[[nnkk]],tempres$ic.dof)
}
}
}
if (as.character(pls_kfolds$call["modele"]) %in% c("pls-beta")) {
press_kfolds <- kfolds2Press(pls_kfolds)
preChisq_kfolds <- kfolds2Chisq(pls_kfolds)
Q2Chisqcum_2=rep(NA, nt)
CVinfos <- vector("list",length(pls_kfolds[[1]]))
limQ2 <- rep(as.numeric(as.character(pls_kfolds$call["limQ2set"])),computed_nt)
for (nnkk in 1:length(pls_kfolds[[1]])) {
cat(paste("NK:", nnkk, "\n"))
Q2Chisq_2 <- 1-preChisq_kfolds[[nnkk]][1:min(length(preChisq_kfolds[[nnkk]]),computed_nt)]/tempres$ChisqPearson[1:min(length(preChisq_kfolds[[nnkk]]),computed_nt)]
for (k in 1:min(length(preChisq_kfolds[[nnkk]]),computed_nt)) {Q2Chisqcum_2[k] <- prod(preChisq_kfolds[[nnkk]][1:k])/prod(tempres$ChisqPearson[1:k])}
Q2Chisqcum_2 <- 1 - Q2Chisqcum_2
if(length(Q2Chisq_2)<computed_nt) {Q2Chisq_2 <- c(Q2Chisq_2,rep(NA,computed_nt-length(Q2Chisq_2)))}
if(length(Q2Chisqcum_2)<computed_nt) {Q2Chisqcum_2 <- c(Q2Chisqcum_2,rep(NA,computed_nt-length(Q2Chisqcum_2)))}
if(length(press_kfolds[[nnkk]])<computed_nt) {press_kfolds[[nnkk]] <- c(press_kfolds[[nnkk]],rep(NA,computed_nt-length(press_kfolds[[nnkk]])))}
if (MClassed==FALSE) {
CVinfos[[nnkk]] <- t(rbind(tempres$AIC[1:(computed_nt+1)], tempres$BIC[1:(computed_nt+1)], c(NA,Q2Chisqcum_2[1:computed_nt]), c(NA,limQ2[1:computed_nt]), c(NA,Q2Chisq_2[1:computed_nt]), c(NA,preChisq_kfolds[[nnkk]][1:computed_nt]), tempres$ChisqPearson[1:(computed_nt+1)], tempres$RSS[1:(computed_nt+1)], c(NA,tempres$pseudo.R2[1:computed_nt]), c(NA,tempres$R2[1:computed_nt])))
dimnames(CVinfos[[nnkk]]) <- list(paste("Nb_Comp_",0:computed_nt,sep=""), c("AIC", "BIC", "Q2Chisqcum_Y", "limQ2", "Q2Chisq_Y", "PREChi2_Pearson_Y", "Chi2_Pearson_Y", "RSS_Y", "pseudo_R2_Y", "R2_Y"))
CVinfos[[nnkk]] <- cbind(CVinfos[[nnkk]],tempres$ic.dof)
} else {
CVinfos[[nnkk]] <- t(rbind(tempres$AIC[1:(computed_nt+1)], tempres$BIC[1:(computed_nt+1)], tempres$MissClassed[1:(computed_nt+1)], c(NA,Mclassed_kfolds[[nnkk]][1:computed_nt]), c(NA,Q2Chisqcum_2[1:computed_nt]), c(NA,limQ2[1:computed_nt]), c(NA,Q2Chisq_2[1:computed_nt]), c(NA,preChisq_kfolds[[nnkk]][1:computed_nt]), tempres$ChisqPearson[1:(computed_nt+1)], tempres$RSS[1:(computed_nt+1)], c(NA,tempres$pseudo.R2[1:computed_nt]), c(NA,tempres$R2[1:computed_nt])))
dimnames(CVinfos[[nnkk]]) <- list(paste("Nb_Comp_",0:computed_nt,sep=""), c("AIC", "BIC", "MissClassed", "CV_MissClassed", "Q2Chisqcum_Y", "limQ2", "Q2Chisq_Y", "PREChi2_Pearson_Y", "Chi2_Pearson_Y", "RSS_Y", "pseudo_R2_Y", "R2_Y"))
CVinfos[[nnkk]] <- cbind(CVinfos[[nnkk]],tempres$ic.dof)
}
}
}
return(CVinfos)
}
if(!(match("formula",names(pls_kfolds$call), 0L)==0L)){
(mf <- pls_kfolds$call)
(m <- match(c("formula", "data", "nt", "limQ2set", "modele", "family", "scaleX", "scaleY", "weights","subset","start","etastart","mustart","offset","control","method","contrasts","method", "sparse", "naive", "link", "link.phi", "type"), names(pls_kfolds$call), 0))
(mf <- mf[c(1, m)])
(mf$typeVC <- "none")
(mf$MClassed <- MClassed)
if (mf$modele %in% c("pls","pls-glm-logistic","pls-glm-Gamma","pls-glm-gaussian","pls-glm-inverse.gaussian","pls-glm-poisson","pls-glm-polr")){mf$family <- NULL}
(mf[[1]] <- as.name("PLS_beta_formula"))
(tempres <- eval(mf, parent.frame()))
nt <- as.numeric(as.character(pls_kfolds$call["nt"]))
computed_nt <- tempres$computed_nt
if (MClassed==TRUE) {
Mclassed_kfolds <- kfolds2Mclassed(pls_kfolds)
}
if (as.character(pls_kfolds$call["modele"]) == "pls") {
press_kfolds <- kfolds2Press(pls_kfolds)
Q2cum_2=rep(NA, nt)
CVinfos <- vector("list",length(pls_kfolds[[1]]))
limQ2 <- rep(as.numeric(as.character(pls_kfolds$call["limQ2set"])),computed_nt)
for (nnkk in 1:length(pls_kfolds[[1]])) {
cat(paste("NK:", nnkk, "\n"))
Q2_2 <- 1-press_kfolds[[nnkk]][1:min(length(press_kfolds[[nnkk]]),computed_nt)]/tempres$RSS[1:min(length(press_kfolds[[nnkk]]),computed_nt)]
for (k in 1:min(length(press_kfolds[[nnkk]]),computed_nt)) {Q2cum_2[k] <- prod(press_kfolds[[nnkk]][1:k])/prod(tempres$RSS[1:k])}
Q2cum_2 <- 1 - Q2cum_2
if(length(Q2_2)<computed_nt) {Q2_2 <- c(Q2_2,rep(NA,computed_nt-length(Q2_2)))}
if(length(Q2cum_2)<computed_nt) {Q2_2cum_2 <- c(Q2cum_2,rep(NA,computed_nt-length(Q2cum_2)))}
if(length(press_kfolds[[nnkk]])<computed_nt) {press_kfolds[[nnkk]] <- c(press_kfolds[[nnkk]],rep(NA,computed_nt-length(press_kfolds[[nnkk]])))}
if (MClassed==FALSE) {
CVinfos[[nnkk]] <- t(rbind(tempres$AIC[1:(computed_nt+1)], c(NA,Q2cum_2[1:computed_nt]), c(NA,limQ2[1:computed_nt]), c(NA,Q2_2[1:computed_nt]), c(NA,press_kfolds[[nnkk]][1:computed_nt]), tempres$RSS[1:(computed_nt+1)], c(NA,tempres$R2[1:computed_nt]), tempres$AIC.std[1:(computed_nt+1)]))
dimnames(CVinfos[[nnkk]]) <- list(paste("Nb_Comp_",0:computed_nt,sep=""), c("AIC", "Q2cum_Y", "LimQ2_Y", "Q2_Y", "PRESS_Y", "RSS_Y", "R2_Y", "AIC.std"))
CVinfos[[nnkk]] <- cbind(CVinfos[[nnkk]],tempres$ic.dof)
} else {
CVinfos[[nnkk]] <- t(rbind(tempres$AIC[1:(computed_nt+1)], tempres$MissClassed[1:(computed_nt+1)], c(NA,Mclassed_kfolds[[nnkk]][1:computed_nt]), c(NA,Q2cum_2[1:computed_nt]), c(NA,limQ2[1:computed_nt]), c(NA,Q2_2[1:computed_nt]), c(NA,press_kfolds[[nnkk]][1:computed_nt]), tempres$RSS[1:(computed_nt+1)], c(NA,tempres$R2[1:computed_nt]), tempres$AIC.std[1:(computed_nt+1)]))
dimnames(CVinfos[[nnkk]]) <- list(paste("Nb_Comp_",0:computed_nt,sep=""), c("AIC", "MissClassed", "CV_MissClassed", "Q2cum_Y", "LimQ2_Y", "Q2_Y", "PRESS_Y", "RSS_Y", "R2_Y", "AIC.std"))
CVinfos[[nnkk]] <- cbind(CVinfos[[nnkk]],tempres$ic.dof)
}
}
}
if (as.character(pls_kfolds$call["modele"]) %in% c("pls-glm-family","pls-glm-Gamma","pls-glm-gaussian","pls-glm-inverse.gaussian","pls-glm-logistic","pls-glm-poisson")) {
press_kfolds <- kfolds2Press(pls_kfolds)
preChisq_kfolds <- kfolds2Chisq(pls_kfolds)
Q2Chisqcum_2=rep(NA, nt)
CVinfos <- vector("list",length(pls_kfolds[[1]]))
limQ2 <- rep(as.numeric(as.character(pls_kfolds$call["limQ2set"])),computed_nt)
for (nnkk in 1:length(pls_kfolds[[1]])) {
cat(paste("NK:", nnkk, "\n"))
Q2Chisq_2 <- 1-preChisq_kfolds[[nnkk]][1:min(length(preChisq_kfolds[[nnkk]]),computed_nt)]/tempres$ChisqPearson[1:min(length(preChisq_kfolds[[nnkk]]),computed_nt)]
for (k in 1:min(length(preChisq_kfolds[[nnkk]]),computed_nt)) {Q2Chisqcum_2[k] <- prod(preChisq_kfolds[[nnkk]][1:k])/prod(tempres$ChisqPearson[1:k])}
Q2Chisqcum_2 <- 1 - Q2Chisqcum_2
if(length(Q2Chisq_2)<computed_nt) {Q2Chisq_2 <- c(Q2Chisq_2,rep(NA,computed_nt-length(Q2Chisq_2)))}
if(length(Q2Chisqcum_2)<computed_nt) {Q2Chisqcum_2 <- c(Q2Chisqcum_2,rep(NA,computed_nt-length(Q2Chisqcum_2)))}
if(length(press_kfolds[[nnkk]])<computed_nt) {press_kfolds[[nnkk]] <- c(press_kfolds[[nnkk]],rep(NA,computed_nt-length(press_kfolds[[nnkk]])))}
if (MClassed==FALSE) {
CVinfos[[nnkk]] <- t(rbind(tempres$AIC[1:(computed_nt+1)], tempres$BIC[1:(computed_nt+1)], c(NA,Q2Chisqcum_2[1:computed_nt]), c(NA,limQ2[1:computed_nt]), c(NA,Q2Chisq_2[1:computed_nt]), c(NA,preChisq_kfolds[[nnkk]][1:computed_nt]), tempres$ChisqPearson[1:(computed_nt+1)], tempres$RSS[1:(computed_nt+1)], c(NA,tempres$R2[1:computed_nt])))
dimnames(CVinfos[[nnkk]]) <- list(paste("Nb_Comp_",0:computed_nt,sep=""), c("AIC", "BIC", "Q2Chisqcum_Y", "limQ2", "Q2Chisq_Y", "PREChi2_Pearson_Y", "Chi2_Pearson_Y", "RSS_Y", "R2_Y"))
CVinfos[[nnkk]] <- cbind(CVinfos[[nnkk]],tempres$ic.dof)
} else {
CVinfos[[nnkk]] <- t(rbind(tempres$AIC[1:(computed_nt+1)], tempres$BIC[1:(computed_nt+1)], tempres$MissClassed[1:(computed_nt+1)], c(NA,Mclassed_kfolds[[nnkk]][1:computed_nt]), c(NA,Q2Chisqcum_2[1:computed_nt]), c(NA,limQ2[1:computed_nt]), c(NA,Q2Chisq_2[1:computed_nt]), c(NA,preChisq_kfolds[[nnkk]][1:computed_nt]), tempres$ChisqPearson[1:(computed_nt+1)], tempres$RSS[1:(computed_nt+1)], c(NA,tempres$R2[1:computed_nt])))
dimnames(CVinfos[[nnkk]]) <- list(paste("Nb_Comp_",0:computed_nt,sep=""), c("AIC", "BIC", "MissClassed", "CV_MissClassed", "Q2Chisqcum_Y", "limQ2", "Q2Chisq_Y", "PREChi2_Pearson_Y", "Chi2_Pearson_Y", "RSS_Y", "R2_Y"))
CVinfos[[nnkk]] <- cbind(CVinfos[[nnkk]],tempres$ic.dof)
}
}
}
if (as.character(pls_kfolds$call["modele"]) == "pls-glm-polr") {
preChisq_kfolds <- kfolds2Chisq(pls_kfolds)
Q2Chisqcum_2=rep(NA, nt)
CVinfos <- vector("list",length(pls_kfolds[[1]]))
limQ2 <- rep(as.numeric(as.character(pls_kfolds$call["limQ2set"])),computed_nt)
for (nnkk in 1:length(pls_kfolds[[1]])) {
cat(paste("NK:", nnkk, "\n"))
Q2Chisq_2 <- 1-preChisq_kfolds[[nnkk]][1:min(length(preChisq_kfolds[[nnkk]]),computed_nt)]/tempres$ChisqPearson[1:min(length(preChisq_kfolds[[nnkk]]),computed_nt)]
for (k in 1:min(length(preChisq_kfolds[[nnkk]]),computed_nt)) {Q2Chisqcum_2[k] <- prod(preChisq_kfolds[[nnkk]][1:k])/prod(tempres$ChisqPearson[1:k])}
Q2Chisqcum_2 <- 1 - Q2Chisqcum_2
if(length(Q2Chisq_2)<computed_nt) {Q2Chisq_2 <- c(Q2Chisq_2,rep(NA,computed_nt-length(Q2Chisq_2)))}
if(length(Q2Chisqcum_2)<computed_nt) {Q2Chisqcum_2 <- c(Q2Chisqcum_2,rep(NA,computed_nt-length(Q2Chisqcum_2)))}
if (MClassed==FALSE) {
CVinfos[[nnkk]] <- t(rbind(tempres$AIC[1:(computed_nt+1)], tempres$BIC[1:(computed_nt+1)], c(NA,Q2Chisqcum_2[1:computed_nt]), c(NA,limQ2[1:computed_nt]), c(NA,Q2Chisq_2[1:computed_nt]), c(NA,preChisq_kfolds[[nnkk]][1:computed_nt]), tempres$ChisqPearson[1:(computed_nt+1)]))
dimnames(CVinfos[[nnkk]]) <- list(paste("Nb_Comp_",0:computed_nt,sep=""), c("AIC", "BIC", "Q2Chisqcum_Y", "limQ2", "Q2Chisq_Y", "PREChi2_Pearson_Y", "Chi2_Pearson_Y"))
CVinfos[[nnkk]] <- cbind(CVinfos[[nnkk]],tempres$ic.dof)
} else {
CVinfos[[nnkk]] <- t(rbind(tempres$AIC[1:(computed_nt+1)], tempres$BIC[1:(computed_nt+1)], tempres$MissClassed[1:(computed_nt+1)], c(NA,Mclassed_kfolds[[nnkk]][1:computed_nt]), c(NA,Q2Chisqcum_2[1:computed_nt]), c(NA,limQ2[1:computed_nt]), c(NA,Q2Chisq_2[1:computed_nt]), c(NA,preChisq_kfolds[[nnkk]][1:computed_nt]), tempres$ChisqPearson[1:(computed_nt+1)]))
dimnames(CVinfos[[nnkk]]) <- list(paste("Nb_Comp_",0:computed_nt,sep=""), c("AIC", "BIC", "MissClassed", "CV_MissClassed", "Q2Chisqcum_Y", "limQ2", "Q2Chisq_Y", "PREChi2_Pearson_Y", "Chi2_Pearson_Y"))
CVinfos[[nnkk]] <- cbind(CVinfos[[nnkk]],tempres$ic.dof)
}
}
}
if (as.character(pls_kfolds$call["modele"]) %in% c("pls-beta")) {
press_kfolds <- kfolds2Press(pls_kfolds)
preChisq_kfolds <- kfolds2Chisq(pls_kfolds)
Q2Chisqcum_2=rep(NA, nt)
CVinfos <- vector("list",length(pls_kfolds[[1]]))
limQ2 <- rep(as.numeric(as.character(pls_kfolds$call["limQ2set"])),computed_nt)
for (nnkk in 1:length(pls_kfolds[[1]])) {
cat(paste("NK:", nnkk, "\n"))
Q2Chisq_2 <- 1-preChisq_kfolds[[nnkk]][1:min(length(preChisq_kfolds[[nnkk]]),computed_nt)]/tempres$ChisqPearson[1:min(length(preChisq_kfolds[[nnkk]]),computed_nt)]
for (k in 1:min(length(preChisq_kfolds[[nnkk]]),computed_nt)) {Q2Chisqcum_2[k] <- prod(preChisq_kfolds[[nnkk]][1:k])/prod(tempres$ChisqPearson[1:k])}
Q2Chisqcum_2 <- 1 - Q2Chisqcum_2
if(length(Q2Chisq_2)<computed_nt) {Q2Chisq_2 <- c(Q2Chisq_2,rep(NA,computed_nt-length(Q2Chisq_2)))}
if(length(Q2Chisqcum_2)<computed_nt) {Q2Chisqcum_2 <- c(Q2Chisqcum_2,rep(NA,computed_nt-length(Q2Chisqcum_2)))}
if(length(press_kfolds[[nnkk]])<computed_nt) {press_kfolds[[nnkk]] <- c(press_kfolds[[nnkk]],rep(NA,computed_nt-length(press_kfolds[[nnkk]])))}
if (MClassed==FALSE) {
CVinfos[[nnkk]] <- t(rbind(tempres$AIC[1:(computed_nt+1)], tempres$BIC[1:(computed_nt+1)], c(NA,Q2Chisqcum_2[1:computed_nt]), c(NA,limQ2[1:computed_nt]), c(NA,Q2Chisq_2[1:computed_nt]), c(NA,preChisq_kfolds[[nnkk]][1:computed_nt]), tempres$ChisqPearson[1:(computed_nt+1)], tempres$RSS[1:(computed_nt+1)], c(NA,tempres$pseudo.R2[1:computed_nt]), c(NA,tempres$R2[1:computed_nt])))
dimnames(CVinfos[[nnkk]]) <- list(paste("Nb_Comp_",0:computed_nt,sep=""), c("AIC", "BIC", "Q2Chisqcum_Y", "limQ2", "Q2Chisq_Y", "PREChi2_Pearson_Y", "Chi2_Pearson_Y", "RSS_Y","pseudo_R2_Y", "R2_Y"))
CVinfos[[nnkk]] <- cbind(CVinfos[[nnkk]],tempres$ic.dof)
} else {
CVinfos[[nnkk]] <- t(rbind(tempres$AIC[1:(computed_nt+1)], tempres$BIC[1:(computed_nt+1)], tempres$MissClassed[1:(computed_nt+1)], c(NA,Mclassed_kfolds[[nnkk]][1:computed_nt]), c(NA,Q2Chisqcum_2[1:computed_nt]), c(NA,limQ2[1:computed_nt]), c(NA,Q2Chisq_2[1:computed_nt]), c(NA,preChisq_kfolds[[nnkk]][1:computed_nt]), tempres$ChisqPearson[1:(computed_nt+1)], tempres$RSS[1:(computed_nt+1)], c(NA,tempres$pseudo.R2[1:computed_nt]), c(NA,tempres$R2[1:computed_nt])))
dimnames(CVinfos[[nnkk]]) <- list(paste("Nb_Comp_",0:computed_nt,sep=""), c("AIC", "BIC", "MissClassed", "CV_MissClassed", "Q2Chisqcum_Y", "limQ2", "Q2Chisq_Y", "PREChi2_Pearson_Y", "Chi2_Pearson_Y", "RSS_Y", "pseudo_R2_Y", "R2_Y"))
CVinfos[[nnkk]] <- cbind(CVinfos[[nnkk]],tempres$ic.dof)
}
}
}
return(CVinfos)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/boxplot_abundance.R
\name{boxplot_abundance}
\alias{boxplot_abundance}
\title{Abundance Boxplot}
\usage{
boxplot_abundance(d, x, y, line = NULL, violin = FALSE, na.rm = FALSE,
show.points = TRUE)
}
\arguments{
\item{d}{\code{\link{phyloseq-class}} object}
\item{x}{Metadata variable to map to the horizontal axis.}
\item{y}{OTU to map on the vertical axis}
\item{line}{The variable to map on lines}
\item{violin}{Use violin version of the boxplot}
\item{na.rm}{Remove NAs}
\item{show.points}{Include data points in the figure}
}
\value{
A \code{\link{ggplot}} plot object
}
\description{
Plot phyloseq abundances.
}
\details{
The directionality of change in paired boxplot is indicated by
the colors of the connecting lines.
}
\examples{
data(peerj32)
p <- boxplot_abundance(peerj32$phyloseq, x='time', y='Akkermansia',
line='subject')
}
\keyword{utilities}
| /man/boxplot_abundance.Rd | no_license | jykzel/microbiome | R | false | true | 947 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/boxplot_abundance.R
\name{boxplot_abundance}
\alias{boxplot_abundance}
\title{Abundance Boxplot}
\usage{
boxplot_abundance(d, x, y, line = NULL, violin = FALSE, na.rm = FALSE,
show.points = TRUE)
}
\arguments{
\item{d}{\code{\link{phyloseq-class}} object}
\item{x}{Metadata variable to map to the horizontal axis.}
\item{y}{OTU to map on the vertical axis}
\item{line}{The variable to map on lines}
\item{violin}{Use violin version of the boxplot}
\item{na.rm}{Remove NAs}
\item{show.points}{Include data points in the figure}
}
\value{
A \code{\link{ggplot}} plot object
}
\description{
Plot phyloseq abundances.
}
\details{
The directionality of change in paired boxplot is indicated by
the colors of the connecting lines.
}
\examples{
data(peerj32)
p <- boxplot_abundance(peerj32$phyloseq, x='time', y='Akkermansia',
line='subject')
}
\keyword{utilities}
|
library(parallel)
bsub = new.env(hash=T)
unlink(".RData")
bsub$get.bsub.accumulator <- function(funcFile, bsubCommand,
batchSize=1,
baseOut = fp(prop$tmpdir),
getInFile=bsub$getDefaultInputFile,
getOutFile=bsub$getDefaultOutputFile,
clusterScript = "./bsubScript.R")
{
sleepCheck=30
accumulator = new.env(hash=T)
accumulator$ready = F
accumulator$init <- function(func, otherGlobals=list())
{
accumulator$quantCommands <<- c()
## try(unlink(outdir, recursive = T), showWarnings = F)
outdir <<- fp(baseOut, paste0(tempdir(), bsub$get.start()))
dir.create(outdir, showWarnings = F, recursive=T)
accumulator$ready <<- T
funcFileForObj <<- fp(outdir, "func.RData")
unlink(funcFileForObj)
save(func, file = funcFileForObj)
otherGlobalsFile <<- fp(outdir, "otherGlobals.RData")
unlink(otherGlobalsFile)
save(otherGlobals, file = otherGlobalsFile)
}
accumulator$addCall <- function(funcArgs)
{
if(!accumulator$ready)
{
stop("init should have beein called after results were collected, or at the beginning before any calls")
}
## srcfile = attr(attr(func, "srcref"), "srcfile")$filename
## print(srcfile)
commandRoot = paste0("R CMD BATCH --no-save --no-restore '--args ")
i = length(accumulator$quantCommands)+1
inFile = getInFile(outdir, i)
outFile = getOutFile(outdir, i)
tic()
## funcArgs = eval(parse(text="funcArgs"))
save(file = inFile, list = c("outFile", "funcArgs", "funcFile", "prop", "funcFileForObj", "otherGlobalsFile"))
toc()
command = paste0(commandRoot, " -I",inFile, "' ",clusterScript)
print(command)
accumulator$quantCommands <<- c(accumulator$quantCommands, command)
toc()
}
accumulator$runAll <- function(loadFiles=F)
{
logdir = fp(outdir, "outfiles")
prevFailingCount = Inf
failing = bsub$submitCommands(bsubCommand, accumulator$quantCommands, sleepCheck, logdir, batchSize=batchSize)$failingcommands
while(length(failing)>0 & length(failing)<prevFailingCount)
{
print(paste0("failures:", length(failing)))
prevFailingCount = length(failing)
failing = bsub$submitCommands(bsubCommand, failing, sleepCheck, logdir, batchSize=batchSize)$failingcommands
}
outfiles = getOutFile(outdir, 1:length(accumulator$quantCommands))
accumulator$ready <<- F
return(outfiles)
}
accumulator$outputs.files = T
return(accumulator)
}
##given a vector of out file names from runall, converts to a list of the output objects themselves. May occasionally be memory prohibitive
bsub$getOutputs <- function(outfiles)
{
outputs = list()
for(i in 1:length(outfiles))
{
outFile = outfiles[i]
output = try(bsub$getOutput(outFile))
if(class(output)=="try-error")
{
print(paste0("failing outfile: ",outFile))
outputs[[i]] = NA
} else {
outputs[[i]] = output
}
}
return(outputs)
}
bsub$getOutput <- function(outfile)
{
load(outfile)
##This is the name of the variable as defined in bsubScript
return(clusterOut)
}
bsub$get.mc.accumulator <- function(batchSize=100*mc.cores, mc.cores)
{
accumulator = new.env(hash=T)
accumulator$ready = F
accumulator$.funcWrapper <- function(ind)
{
## print("calling func wrapper")
## out = list()
## for(i in inds)
## {
argz = accumulator$funcArgs[[ind]]
argz = c(argz, accumulator$otherGlobals)
out = do.call(accumulator$func, argz)
## }
return(out)
}
accumulator$init <- function(func, otherGlobals = list())
{
accumulator$func<<-func
accumulator$otherGlobals = otherGlobals
accumulator$funcArgs <<- list()
accumulator$ready = T
}
accumulator$addCall <- function(funcArgs)
{
if(!accumulator$ready)
{
stop("init should have beein called after results were collected, or at the beginning before any calls")
}
## i = length(accumulator$funcArgs)+1
accumulator$funcArgs <<- util$appendToList(accumulator$funcArgs, funcArgs)
}
accumulator$runAll <- function()
{
inds = 1:length(accumulator$funcArgs)
if(length(inds)==1 & class(inds)=="logical")
{
browser()
}
out = bsub$lapply.wrapper(inds, FUN = accumulator$.funcWrapper, batchSize = batchSize, mc.cores = mc.cores)
print("ranall")
accumulator$ready = F
return(out)
}
accumulator$outputs.files = F
return(accumulator)
}
##TODO move to parallel env
##FUN must take as its first argument a vector of indexes
##and grab the relevant portion of whatever the additional arguments are
bsub$lapply.wrapper <- function(inds, FUN, batchSize = 10*mc.cores, mc.cores, ...)
{
if(mc.cores==1)
{
results = lapply(X=inds, FUN=FUN, ...)
return(results)
}
else
{
##Split into batches to allow mclapply to work properly, it doesn't garbage collect well.
##function call returns a list of lists of indexes, splitting up inds into indexes of length batch size
indexGroups = util$getIndexGroupsForInds(inds, batchSize)
results = list()
for(i in 1:length(indexGroups))
{
indsGroup = indexGroups[[i]]
results[[i]] = mclapply(X=indsGroup, FUN=FUN, mc.cores = mc.cores, ...)
}
## at this point we have a list of lists (the outermost list corresponds to separate batches) and we'd like the returned value to avoid reflecting the innards of this method-- merge all the batches together
results = do.call(c, results)
return(results)
}
##eliparg = list(...)
}
##submit a list of commands prefixed by the bsubCommand, and if sleepcheck is not null,
##blocks untill all commands complete, checking at an interval of sleepCheck seconds to
##see if this is so. Sleeps rest of the time, so probably ok to run on the cluster
bsub$submitCommands <- function(bsubCommand, quantCommands, sleepCheck=NULL, bsuboutdir = NULL, batchSize = 1)
{
len = length(quantCommands)
indexGroups = util$getIndexGroupsForLen(len, batchSize)
start.time = bsub$get.start()
## a map from jobname, to the command or commands that are called by that job
submitted.jobs = list()
for(i in 1:length(indexGroups))
{
indsGroup = indexGroups[[i]]
quantCommandSet = quantCommands[unlist(indsGroup)]
jobname = paste0(start.time, ".", i)
submitted.jobs[[jobname]] = quantCommandSet
bsub$run.single.bsub(bsubCommand, jobname, quantCommandSet,bsuboutdir)
}
if(!is.null(sleepCheck))
{
bsub$block.on.bsub(names(submitted.jobs), sleepCheck)
}
failingcommands = c()
failingjobs = c()
if(!is.null(bsuboutdir))
{
for (jobname in names(submitted.jobs))
{
outfile = bsub$getOutLogFile(bsuboutdir,jobname)
if(!file.exists(outfile))
{
print("job failed!")
print(submitted.jobs[[jobname]])
failingcommands = c(failingcommands, submitted.jobs[[jobname]])
failingjobs = c(failingjobs, jobname)
next
}
grepcom = paste("grep -l ", "'Successfully completed.'", outfile)
out = try(system(grepcom, intern=T))
if(class(out)=="try-error")
{
print(out)
browser()
} else {
if(length(out != outfile)==0)
{
failingcommands = c(failingcommands, submitted.jobs[[jobname]])
failingjobs = c(failingjobs, jobname)
}
}
}
}
return(list(failingcommands = failingcommands, failingjobs = failingjobs))
}
bsub$get.start <- function()
{
start.time = gsub(pattern = " ", replacement ="_",format(Sys.time()))
return(start.time)
}
bsub$getOutLogFile <- function(outputlocaldir,jobname)
{
return(fp(outputlocaldir, paste0(jobname, ".bsub.out")))
}
bsub$getErrorLogFile <- function(outputlocaldir,jobname)
{
return(fp(outputlocaldir, paste0(jobname, ".bsub.err")))
}
bsub$run.single.bsub <- function(bsubCommand, jobname, quantcommandset, outputlocaldir=NULL)
{
##
bsubCommand = paste0(bsubCommand, " -J ", jobname)
if(!is.null(outputlocaldir))
{
dir.create(outputlocaldir, showWarnings=F, recursive =T)
bsubCommand = paste(bsubCommand, "-oo ", bsub$getOutLogFile(outputlocaldir, jobname))
bsubCommand = paste(bsubCommand, "-eo ", bsub$getErrorLogFile(outputlocaldir, jobname))
}
fullcommand = paste(bsubCommand,
" \" ",
paste(quantcommandset, collapse="; "),
" \" ")
cat(fullcommand)
system(fullcommand)
}
bsub$block.on.bsub <- function(submitted.jobs, sleepCheck)
{
while(T)
{
Sys.sleep(sleepCheck)
a = try(system("bjobs -w", intern=T))
##
if(length(a)==0)
{
break;
}
tokens = strsplit(a[1], "\\s+")[[1]]
colind = which(tokens == "JOB_NAME")
jobids = strsplit(a[2:length(a)], "\\s+")
jobids = unlist(lapply(jobids, "[", colind))
if(length(intersect(jobids, submitted.jobs))==0)
{
break;
}
}
}
##We want to make sure the input files are in a separate directory from where the output files are being generated; there may be some problems with multiple nodes reading from and writing to the same directory. The input files are generated serially before any output files are generated
bsub$getDefaultInputFile <- function(outdir, i)
{
outdirin = fp(outdir, "in")
dir.create(outdirin, showWarnings = F, recursive = T)
return(fp(outdirin, paste0("in_", i)))
}
bsub$getDefaultOutputFile <- function(outdir, i)
{
outdirout = fp(outdir, "out")
dir.create(outdirout, showWarnings = F, recursive = T)
return(fp(outdirout, paste0("out_", i)))
}
bsub$get.default.killdevil.bsub <- function(numProcessPerNode, memoryLimit.GB, queue)
{
command = paste0("bsub -R 'span[hosts=1]' -n ", numProcessPerNode, " -M ", memoryLimit.GB ," -q ", queue)
}
| /src/bsub.R | permissive | danoreper/ovx2016 | R | false | false | 10,988 | r | library(parallel)
bsub = new.env(hash=T)
unlink(".RData")
bsub$get.bsub.accumulator <- function(funcFile, bsubCommand,
batchSize=1,
baseOut = fp(prop$tmpdir),
getInFile=bsub$getDefaultInputFile,
getOutFile=bsub$getDefaultOutputFile,
clusterScript = "./bsubScript.R")
{
sleepCheck=30
accumulator = new.env(hash=T)
accumulator$ready = F
accumulator$init <- function(func, otherGlobals=list())
{
accumulator$quantCommands <<- c()
## try(unlink(outdir, recursive = T), showWarnings = F)
outdir <<- fp(baseOut, paste0(tempdir(), bsub$get.start()))
dir.create(outdir, showWarnings = F, recursive=T)
accumulator$ready <<- T
funcFileForObj <<- fp(outdir, "func.RData")
unlink(funcFileForObj)
save(func, file = funcFileForObj)
otherGlobalsFile <<- fp(outdir, "otherGlobals.RData")
unlink(otherGlobalsFile)
save(otherGlobals, file = otherGlobalsFile)
}
accumulator$addCall <- function(funcArgs)
{
if(!accumulator$ready)
{
stop("init should have beein called after results were collected, or at the beginning before any calls")
}
## srcfile = attr(attr(func, "srcref"), "srcfile")$filename
## print(srcfile)
commandRoot = paste0("R CMD BATCH --no-save --no-restore '--args ")
i = length(accumulator$quantCommands)+1
inFile = getInFile(outdir, i)
outFile = getOutFile(outdir, i)
tic()
## funcArgs = eval(parse(text="funcArgs"))
save(file = inFile, list = c("outFile", "funcArgs", "funcFile", "prop", "funcFileForObj", "otherGlobalsFile"))
toc()
command = paste0(commandRoot, " -I",inFile, "' ",clusterScript)
print(command)
accumulator$quantCommands <<- c(accumulator$quantCommands, command)
toc()
}
accumulator$runAll <- function(loadFiles=F)
{
logdir = fp(outdir, "outfiles")
prevFailingCount = Inf
failing = bsub$submitCommands(bsubCommand, accumulator$quantCommands, sleepCheck, logdir, batchSize=batchSize)$failingcommands
while(length(failing)>0 & length(failing)<prevFailingCount)
{
print(paste0("failures:", length(failing)))
prevFailingCount = length(failing)
failing = bsub$submitCommands(bsubCommand, failing, sleepCheck, logdir, batchSize=batchSize)$failingcommands
}
outfiles = getOutFile(outdir, 1:length(accumulator$quantCommands))
accumulator$ready <<- F
return(outfiles)
}
accumulator$outputs.files = T
return(accumulator)
}
##given a vector of out file names from runall, converts to a list of the output objects themselves. May occasionally be memory prohibitive
bsub$getOutputs <- function(outfiles)
{
outputs = list()
for(i in 1:length(outfiles))
{
outFile = outfiles[i]
output = try(bsub$getOutput(outFile))
if(class(output)=="try-error")
{
print(paste0("failing outfile: ",outFile))
outputs[[i]] = NA
} else {
outputs[[i]] = output
}
}
return(outputs)
}
bsub$getOutput <- function(outfile)
{
load(outfile)
##This is the name of the variable as defined in bsubScript
return(clusterOut)
}
bsub$get.mc.accumulator <- function(batchSize=100*mc.cores, mc.cores)
{
accumulator = new.env(hash=T)
accumulator$ready = F
accumulator$.funcWrapper <- function(ind)
{
## print("calling func wrapper")
## out = list()
## for(i in inds)
## {
argz = accumulator$funcArgs[[ind]]
argz = c(argz, accumulator$otherGlobals)
out = do.call(accumulator$func, argz)
## }
return(out)
}
accumulator$init <- function(func, otherGlobals = list())
{
accumulator$func<<-func
accumulator$otherGlobals = otherGlobals
accumulator$funcArgs <<- list()
accumulator$ready = T
}
accumulator$addCall <- function(funcArgs)
{
if(!accumulator$ready)
{
stop("init should have beein called after results were collected, or at the beginning before any calls")
}
## i = length(accumulator$funcArgs)+1
accumulator$funcArgs <<- util$appendToList(accumulator$funcArgs, funcArgs)
}
accumulator$runAll <- function()
{
inds = 1:length(accumulator$funcArgs)
if(length(inds)==1 & class(inds)=="logical")
{
browser()
}
out = bsub$lapply.wrapper(inds, FUN = accumulator$.funcWrapper, batchSize = batchSize, mc.cores = mc.cores)
print("ranall")
accumulator$ready = F
return(out)
}
accumulator$outputs.files = F
return(accumulator)
}
##TODO move to parallel env
##FUN must take as its first argument a vector of indexes
##and grab the relevant portion of whatever the additional arguments are
bsub$lapply.wrapper <- function(inds, FUN, batchSize = 10*mc.cores, mc.cores, ...)
{
if(mc.cores==1)
{
results = lapply(X=inds, FUN=FUN, ...)
return(results)
}
else
{
##Split into batches to allow mclapply to work properly, it doesn't garbage collect well.
##function call returns a list of lists of indexes, splitting up inds into indexes of length batch size
indexGroups = util$getIndexGroupsForInds(inds, batchSize)
results = list()
for(i in 1:length(indexGroups))
{
indsGroup = indexGroups[[i]]
results[[i]] = mclapply(X=indsGroup, FUN=FUN, mc.cores = mc.cores, ...)
}
## at this point we have a list of lists (the outermost list corresponds to separate batches) and we'd like the returned value to avoid reflecting the innards of this method-- merge all the batches together
results = do.call(c, results)
return(results)
}
##eliparg = list(...)
}
##submit a list of commands prefixed by the bsubCommand, and if sleepcheck is not null,
##blocks untill all commands complete, checking at an interval of sleepCheck seconds to
##see if this is so. Sleeps rest of the time, so probably ok to run on the cluster
bsub$submitCommands <- function(bsubCommand, quantCommands, sleepCheck=NULL, bsuboutdir = NULL, batchSize = 1)
{
len = length(quantCommands)
indexGroups = util$getIndexGroupsForLen(len, batchSize)
start.time = bsub$get.start()
## a map from jobname, to the command or commands that are called by that job
submitted.jobs = list()
for(i in 1:length(indexGroups))
{
indsGroup = indexGroups[[i]]
quantCommandSet = quantCommands[unlist(indsGroup)]
jobname = paste0(start.time, ".", i)
submitted.jobs[[jobname]] = quantCommandSet
bsub$run.single.bsub(bsubCommand, jobname, quantCommandSet,bsuboutdir)
}
if(!is.null(sleepCheck))
{
bsub$block.on.bsub(names(submitted.jobs), sleepCheck)
}
failingcommands = c()
failingjobs = c()
if(!is.null(bsuboutdir))
{
for (jobname in names(submitted.jobs))
{
outfile = bsub$getOutLogFile(bsuboutdir,jobname)
if(!file.exists(outfile))
{
print("job failed!")
print(submitted.jobs[[jobname]])
failingcommands = c(failingcommands, submitted.jobs[[jobname]])
failingjobs = c(failingjobs, jobname)
next
}
grepcom = paste("grep -l ", "'Successfully completed.'", outfile)
out = try(system(grepcom, intern=T))
if(class(out)=="try-error")
{
print(out)
browser()
} else {
if(length(out != outfile)==0)
{
failingcommands = c(failingcommands, submitted.jobs[[jobname]])
failingjobs = c(failingjobs, jobname)
}
}
}
}
return(list(failingcommands = failingcommands, failingjobs = failingjobs))
}
bsub$get.start <- function()
{
start.time = gsub(pattern = " ", replacement ="_",format(Sys.time()))
return(start.time)
}
bsub$getOutLogFile <- function(outputlocaldir,jobname)
{
return(fp(outputlocaldir, paste0(jobname, ".bsub.out")))
}
bsub$getErrorLogFile <- function(outputlocaldir,jobname)
{
return(fp(outputlocaldir, paste0(jobname, ".bsub.err")))
}
bsub$run.single.bsub <- function(bsubCommand, jobname, quantcommandset, outputlocaldir=NULL)
{
##
bsubCommand = paste0(bsubCommand, " -J ", jobname)
if(!is.null(outputlocaldir))
{
dir.create(outputlocaldir, showWarnings=F, recursive =T)
bsubCommand = paste(bsubCommand, "-oo ", bsub$getOutLogFile(outputlocaldir, jobname))
bsubCommand = paste(bsubCommand, "-eo ", bsub$getErrorLogFile(outputlocaldir, jobname))
}
fullcommand = paste(bsubCommand,
" \" ",
paste(quantcommandset, collapse="; "),
" \" ")
cat(fullcommand)
system(fullcommand)
}
bsub$block.on.bsub <- function(submitted.jobs, sleepCheck)
{
while(T)
{
Sys.sleep(sleepCheck)
a = try(system("bjobs -w", intern=T))
##
if(length(a)==0)
{
break;
}
tokens = strsplit(a[1], "\\s+")[[1]]
colind = which(tokens == "JOB_NAME")
jobids = strsplit(a[2:length(a)], "\\s+")
jobids = unlist(lapply(jobids, "[", colind))
if(length(intersect(jobids, submitted.jobs))==0)
{
break;
}
}
}
##We want to make sure the input files are in a separate directory from where the output files are being generated; there may be some problems with multiple nodes reading from and writing to the same directory. The input files are generated serially before any output files are generated
bsub$getDefaultInputFile <- function(outdir, i)
{
outdirin = fp(outdir, "in")
dir.create(outdirin, showWarnings = F, recursive = T)
return(fp(outdirin, paste0("in_", i)))
}
bsub$getDefaultOutputFile <- function(outdir, i)
{
outdirout = fp(outdir, "out")
dir.create(outdirout, showWarnings = F, recursive = T)
return(fp(outdirout, paste0("out_", i)))
}
bsub$get.default.killdevil.bsub <- function(numProcessPerNode, memoryLimit.GB, queue)
{
command = paste0("bsub -R 'span[hosts=1]' -n ", numProcessPerNode, " -M ", memoryLimit.GB ," -q ", queue)
}
|
################################# Start of Code =====================================
rm(list = ls())
getwd()
setwd("G:/Georgia Tech/Analytical Models/Assignments")
install.packages("data.table")
install.packages("lubridate")
install.packages("dplyr")
install.packages("weatherData")
require(data.table)
require(lubridate)
require(dplyr)
require(ggplot2)
require(weatherData)
################################### Q3 ============================================
######################### Get Data and manipulate =================================
ATL_station_code = getStationCode("Atlanta",region="GA")
fetchJul2OctoberData <- function (year) {
getSummarizedWeather (station_id = "KATL",
start_date = paste(year,"-07-1", sep = ''),
end_date = paste(year,"-10-31", sep = ''))
}
years = seq(1996,2015)
all_data <- Map(fetchJul2OctoberData, years)
final_data <- Reduce(rbind, all_data)
#CHecking for NAs.
sum(is.na(final_data$Mean_TemperatureF))
sum(is.na(final_data$Min_TemperatureF))
sum(is.na(final_data$Max_TemperatureF))
#Only one NA in mean temp. - entering value for that
final_data$Mean_TemperatureF[535] = (final_data$Max_TemperatureF[535] + final_data$Min_TemperatureF[535])/2
############################ Cusum Analysis ========================================
#We should do our analysis with Mean temp. as Min and Max temperatures can be inflated/
#deflated for a particular day but question says use the high temperature.
#Picking the critical value
#For the critical value of birthdays in class, we picked the middle value.
#Here, we are picking a critical value across all the years. It might have made sense
#to pick a separate critical value for each year due to "global warming" etc. but this
#will also work.
final_data$Month = month(final_data$Date)
#The average temperature for all the months can be taken as the critical value,
#going below which we can say that summer for Atlanta has ended. This is a good
#choice as so far we don't know when summer ends. Could be it ends in December.
#We cannot suppose anything.
###################Question says use Daily High temperature instead of the Mean.
critical = mean(final_data$Max_TemperatureF)
#Considering that the temperature can drop for a few days due to rain or other factors,
#Setting a threshold of >10 seems right. We will try different values of threshold.
cusum_func <- function(data, critical_value, threshold, change = "down"){
#data is a vector and the other two are numerical values.
S = 0
S_agg = 0
index = 0
for (i in data){
index = 1 + index
if(change == "down"){
S = critical_value - i #IN our case we have to see the downward change.
S_agg = max(0, S + S_agg)
}
else {
S = i - critical_value
S_agg = max(0, S + S_agg)
}
if(S_agg > threshold){
return(index)
}
}
}
cusum_func_graph <- function(data, critical_value, threshold, change = "down"){
#data is a vector and the other two are numerical values.
S = 0
S_agg = 0
index = 0
store_agg = vector()
for (i in data){
index = 1 + index
if(change == "down"){
S = critical_value - i #IN our case we have to see the downward change.
S_agg = max(0, S + S_agg)
store_agg = append(store_agg, S_agg)
}
else {
S = i - critical_value
S_agg = max(0, S + S_agg)
store_agg = append(store_agg, S_agg)
}
}
return(store_agg)
}
#Breaking up the data into years
final_data$Year = year(final_data$Date)
increment = 0
cnt = 0
date_vector = vector()
for( i in seq(1996, 2015, 1)){
subset_data = final_data[final_data$Year == i, ]
#A threshold of 35 makes sense as there should be a drop of 5 degrees for each day
#of the week, on an average. Accounting for some hot days in between, a total
#drop of 35 means that has changed.
ind = cusum_func(subset_data$Max_TemperatureF, critical_value = critical, threshold = 35, "down")
print(paste("Summer end date for the year", i, "is", subset_data$Date[ind]))
cnt = cnt + 1
#Making a vector of all the dates
date_vector = append(date_vector, subset_data$Date[ind])
#As we want to find the date when summer ends, we will have to take the average
#of all the dates over the years. To take the average, bringing everything to
#1 year:
year(date_vector[cnt]) = 1970
}
#Taking the mean
final_date = mean(date_vector)
print(final_date)
#The date we have is September 23.
#Thus we conclude that summer in Atlanta ends on 23rd September on an average.
#PLotting the change for 2015
S_t = data.frame(Cusum = cusum_func_graph(subset_data$Max_TemperatureF, critical_value = critical, threshold = 35, "down"), Index = subset_data$Date)
plt = ggplot(S_t, aes(x = Index, y = Cusum)) + geom_point() + geom_line()
plt + geom_hline(yintercept = 35)
#################################### Q4 ===========================================
monthly_data = final_data[,c("Year", "Max_TemperatureF", "Month")]
monthly_data1 = group_by(monthly_data, Year, Month)
#Summarizing on the average max temperature for a month for that year
agg_dataset = summarize(monthly_data1,
Max_TemperatureF = mean(Max_TemperatureF))
#First of all we will only consider the summer months in our analysis.
#Using the answer from the previous question, we will exclude October.
agg_dataset = agg_dataset[!agg_dataset$Month == 10, ]
#We can do two kinds of analysis - see the change over 20 years for each month separtely
#OR take an average temperature for the three summer months and see the change in the
#average.
#################################### Average Analysis =============================
Yearly_av = agg_dataset %>%
group_by(Year) %>%
summarize(Max_Temp_Mean = mean(Max_TemperatureF))
#Again, we take the critical as the average of all the years.
crit_value = mean(Yearly_av$Max_Temp_Mean)
#The threshold should not be very high this time as we are looking at Climate change
#over the years.
Year_of_change = Yearly_av[cusum_func(Yearly_av$Max_Temp_Mean, crit_value, 8, "up"),]
print(Year_of_change)
#We have chosen the threshold of 8 considering that a change of 2 degrees for
#4 years consecutive would be an indication of climate change. Thus, equivalently,
#when the cumsum goes above 8, we call it a change.
#Thus, we conclude that the Atlanta summer has gotten warmer over the years and after
#aggregation over the years, this change can be definitively seen by 2012.
#Plot the same
S_t = data.frame(Cusum = cusum_func_graph(Yearly_av$Max_Temp_Mean, crit_value, 8, "up"), Index = Yearly_av$Year)
plt = ggplot(S_t, aes(x = Index, y = Cusum)) + geom_point() + geom_line()
plt + geom_hline(yintercept = 8)
############################# Per Month Analysis ===============================
#We can do a similar analysis per month - July, Aug, Sep - and then look at the
#median of the results.
agg_july = agg_dataset[agg_dataset$Month == 7,]
agg_aug = agg_dataset[agg_dataset$Month == 8,]
agg_sep = agg_dataset[agg_dataset$Month == 9,]
#July analysis
crit_july = mean(agg_july$Max_TemperatureF)
changeYear_July = agg_july[cusum_func(agg_july$Max_TemperatureF, crit_july, 8, "up"),]
print(changeYear_July)
#August analysis
crit_aug = mean(agg_aug$Max_TemperatureF)
changeYear_Aug = agg_aug[cusum_func(agg_aug$Max_TemperatureF, crit_aug, 8, "up"),]
print(changeYear_Aug)
#September analysis
crit_sep = mean(agg_sep$Max_TemperatureF)
changeYear_Sep = agg_sep[cusum_func(agg_sep$Max_TemperatureF, crit_sep, 8, "up"),]
print(changeYear_Sep)
Final_changeYear = median(c(changeYear_Sep$Year, changeYear_Aug$Year, changeYear_July$Year))
print(Final_changeYear)
#Thus both analysis give year of climate change as 2012. | /HW4_CUSUM.R | no_license | aten2001/Analytical-Models-Assignments | R | false | false | 7,805 | r | ################################# Start of Code =====================================
rm(list = ls())
getwd()
setwd("G:/Georgia Tech/Analytical Models/Assignments")
install.packages("data.table")
install.packages("lubridate")
install.packages("dplyr")
install.packages("weatherData")
require(data.table)
require(lubridate)
require(dplyr)
require(ggplot2)
require(weatherData)
################################### Q3 ============================================
######################### Get Data and manipulate =================================
ATL_station_code = getStationCode("Atlanta",region="GA")
fetchJul2OctoberData <- function (year) {
getSummarizedWeather (station_id = "KATL",
start_date = paste(year,"-07-1", sep = ''),
end_date = paste(year,"-10-31", sep = ''))
}
years = seq(1996,2015)
all_data <- Map(fetchJul2OctoberData, years)
final_data <- Reduce(rbind, all_data)
#CHecking for NAs.
sum(is.na(final_data$Mean_TemperatureF))
sum(is.na(final_data$Min_TemperatureF))
sum(is.na(final_data$Max_TemperatureF))
#Only one NA in mean temp. - entering value for that
final_data$Mean_TemperatureF[535] = (final_data$Max_TemperatureF[535] + final_data$Min_TemperatureF[535])/2
############################ Cusum Analysis ========================================
#We should do our analysis with Mean temp. as Min and Max temperatures can be inflated/
#deflated for a particular day but question says use the high temperature.
#Picking the critical value
#For the critical value of birthdays in class, we picked the middle value.
#Here, we are picking a critical value across all the years. It might have made sense
#to pick a separate critical value for each year due to "global warming" etc. but this
#will also work.
final_data$Month = month(final_data$Date)
#The average temperature for all the months can be taken as the critical value,
#going below which we can say that summer for Atlanta has ended. This is a good
#choice as so far we don't know when summer ends. Could be it ends in December.
#We cannot suppose anything.
###################Question says use Daily High temperature instead of the Mean.
critical = mean(final_data$Max_TemperatureF)
#Considering that the temperature can drop for a few days due to rain or other factors,
#Setting a threshold of >10 seems right. We will try different values of threshold.
cusum_func <- function(data, critical_value, threshold, change = "down"){
#data is a vector and the other two are numerical values.
S = 0
S_agg = 0
index = 0
for (i in data){
index = 1 + index
if(change == "down"){
S = critical_value - i #IN our case we have to see the downward change.
S_agg = max(0, S + S_agg)
}
else {
S = i - critical_value
S_agg = max(0, S + S_agg)
}
if(S_agg > threshold){
return(index)
}
}
}
cusum_func_graph <- function(data, critical_value, threshold, change = "down"){
#data is a vector and the other two are numerical values.
S = 0
S_agg = 0
index = 0
store_agg = vector()
for (i in data){
index = 1 + index
if(change == "down"){
S = critical_value - i #IN our case we have to see the downward change.
S_agg = max(0, S + S_agg)
store_agg = append(store_agg, S_agg)
}
else {
S = i - critical_value
S_agg = max(0, S + S_agg)
store_agg = append(store_agg, S_agg)
}
}
return(store_agg)
}
#Breaking up the data into years
final_data$Year = year(final_data$Date)
increment = 0
cnt = 0
date_vector = vector()
for( i in seq(1996, 2015, 1)){
subset_data = final_data[final_data$Year == i, ]
#A threshold of 35 makes sense as there should be a drop of 5 degrees for each day
#of the week, on an average. Accounting for some hot days in between, a total
#drop of 35 means that has changed.
ind = cusum_func(subset_data$Max_TemperatureF, critical_value = critical, threshold = 35, "down")
print(paste("Summer end date for the year", i, "is", subset_data$Date[ind]))
cnt = cnt + 1
#Making a vector of all the dates
date_vector = append(date_vector, subset_data$Date[ind])
#As we want to find the date when summer ends, we will have to take the average
#of all the dates over the years. To take the average, bringing everything to
#1 year:
year(date_vector[cnt]) = 1970
}
#Taking the mean
final_date = mean(date_vector)
print(final_date)
#The date we have is September 23.
#Thus we conclude that summer in Atlanta ends on 23rd September on an average.
#PLotting the change for 2015
S_t = data.frame(Cusum = cusum_func_graph(subset_data$Max_TemperatureF, critical_value = critical, threshold = 35, "down"), Index = subset_data$Date)
plt = ggplot(S_t, aes(x = Index, y = Cusum)) + geom_point() + geom_line()
plt + geom_hline(yintercept = 35)
#################################### Q4 ===========================================
monthly_data = final_data[,c("Year", "Max_TemperatureF", "Month")]
monthly_data1 = group_by(monthly_data, Year, Month)
#Summarizing on the average max temperature for a month for that year
agg_dataset = summarize(monthly_data1,
Max_TemperatureF = mean(Max_TemperatureF))
#First of all we will only consider the summer months in our analysis.
#Using the answer from the previous question, we will exclude October.
agg_dataset = agg_dataset[!agg_dataset$Month == 10, ]
#We can do two kinds of analysis - see the change over 20 years for each month separtely
#OR take an average temperature for the three summer months and see the change in the
#average.
#################################### Average Analysis =============================
Yearly_av = agg_dataset %>%
group_by(Year) %>%
summarize(Max_Temp_Mean = mean(Max_TemperatureF))
#Again, we take the critical as the average of all the years.
crit_value = mean(Yearly_av$Max_Temp_Mean)
#The threshold should not be very high this time as we are looking at Climate change
#over the years.
Year_of_change = Yearly_av[cusum_func(Yearly_av$Max_Temp_Mean, crit_value, 8, "up"),]
print(Year_of_change)
#We have chosen the threshold of 8 considering that a change of 2 degrees for
#4 years consecutive would be an indication of climate change. Thus, equivalently,
#when the cumsum goes above 8, we call it a change.
#Thus, we conclude that the Atlanta summer has gotten warmer over the years and after
#aggregation over the years, this change can be definitively seen by 2012.
#Plot the same
S_t = data.frame(Cusum = cusum_func_graph(Yearly_av$Max_Temp_Mean, crit_value, 8, "up"), Index = Yearly_av$Year)
plt = ggplot(S_t, aes(x = Index, y = Cusum)) + geom_point() + geom_line()
plt + geom_hline(yintercept = 8)
############################# Per Month Analysis ===============================
#We can do a similar analysis per month - July, Aug, Sep - and then look at the
#median of the results.
agg_july = agg_dataset[agg_dataset$Month == 7,]
agg_aug = agg_dataset[agg_dataset$Month == 8,]
agg_sep = agg_dataset[agg_dataset$Month == 9,]
#July analysis
crit_july = mean(agg_july$Max_TemperatureF)
changeYear_July = agg_july[cusum_func(agg_july$Max_TemperatureF, crit_july, 8, "up"),]
print(changeYear_July)
#August analysis
crit_aug = mean(agg_aug$Max_TemperatureF)
changeYear_Aug = agg_aug[cusum_func(agg_aug$Max_TemperatureF, crit_aug, 8, "up"),]
print(changeYear_Aug)
#September analysis
crit_sep = mean(agg_sep$Max_TemperatureF)
changeYear_Sep = agg_sep[cusum_func(agg_sep$Max_TemperatureF, crit_sep, 8, "up"),]
print(changeYear_Sep)
Final_changeYear = median(c(changeYear_Sep$Year, changeYear_Aug$Year, changeYear_July$Year))
print(Final_changeYear)
#Thus both analysis give year of climate change as 2012. |
###############################################################################
# #
# WIFI | TRAINING RF MODEL FOR LONGITUDE | VERSION 3.0 | by ELSE #
# #
# Sample & subset data with only WAPS as predictors, train model & predict #
# #
###############################################################################
# take a representable sample from the training dataset in order to train a model
# taking a sample will save time while running the first model(s)
# use only WAP columns to predict: LONGITUDE
# libraries & data----
library("caret")
library("dplyr")
library("tidyverse")
library("class")
library("readr")
library("corrplot")
library("plotly")
# load the preprocessed dataframes
trainingData <- readRDS(file = "data/trainingDataProc(V7).rds")
validationData <- readRDS(file = "data/validationDataProc(V7).rds")
trainingData$FLOOR <- as.factor(trainingData$FLOOR)
# partitioning data
set.seed(123)
indexTrain <- createDataPartition(y = trainingData$LONGITUDE, p = .05, list = FALSE)
setTraining <- trainingData[indexTrain,]
setTest <- trainingData[-indexTrain,]
# I want to predict FLOOR only on the dBm measured by the WAPs,
# therefore I remove other columns
setTraining <- select(setTraining, -BUILDINGID, -SPACEID, -RELATIVEPOSITION,
-USERID, -PHONEID, -TIMESTAMP, -FLOOR, -LATITUDE)
# set cross validation parameters ----
# default search = random, change it to grid search if searching with Manual Grid
CrossValidation <- trainControl(method = "repeatedcv", number = 10, repeats = 1,
preProc = c("center", "scale", "range"), verboseIter = TRUE)
# check the models available in caret package by using names(getModelInfo())
# set the training parameters of the model ----
modelKNN <- train(LONGITUDE~., data = setTraining, method = "knn", trControl = CrossValidation)
# check the metrics ----
modelKNN
#see variable importance
varImp(modelKNN)
# make predictions with the model and predict the LONGITUDE of from the TRAININGDATA ----
predLONGITUDE_KNN <- predict(modelKNN, setTest)
#create a new column with predicted data
setTest$predLONGITUDE_KNN <- predLONGITUDE_KNN
setTest$LONGITUDE <- as.numeric(setTest$LONGITUDE)
setTest$predLONGITUDE_KNN <- as.numeric(setTest$predLONGITUDE_KNN)
# check the metrics postResample() for regression and confusionMatrix() for classification ---
postResample(setTest$predLONGITUDE_KNN, setTest$LONGITUDE)
# make predictions with the model and predict the LONGITUDE of from the validationData ----
predLONGITUDE_KNN <- predict(modelKNN, validationData)
#create a new column with predicted data
validationData$predLONGITUDE_KNN <- predLONGITUDE_KNN
validationData$LONGITUDE <- as.numeric(validationData$LONGITUDE)
validationData$predLONGITUDE_KNN <- as.numeric(validationData$predLONGITUDE_KNN)
# check the metrics postResample() for regression and confusionMatrix() for classification ---
postResample(validationData$predLONGITUDE_KNN, validationData$LONGITUDE)
# add column with errors to the dataframe
validationData <- mutate(validationData, errorsLONGITUDE = predLONGITUDE_KNN - LONGITUDE)
# turn the errors back into factors to produce an easy to read plot
plot(validationData$errorsLONGITUDE,
main = "LONGITUDE predictions",
xlab = "meters",
ylab = "count")
# subset the errors
wrongLONGITUDE <-validationData %>%
filter(errorsLONGITUDE >= 100)
rightLONGITUDE <-validationData %>%
filter(errorsLONGITUDE <= 8)
# what do the errors have in common?
wrongLONGITUDE[,521:531]
ggplot(validationData, aes(x=LONGITUDE, y=LATITUDE), colour = "black")+
geom_jitter()+
geom_jitter(aes(colour = (errorsLONGITUDE > 100 | errorsLONGITUDE < -100)))+
theme_classic() +
facet_wrap(~FLOOR) +
labs(title="Errors LONGITUDE > 100 meters",
subtitle = "Divided by FLOOR")
ggplot(validationData, aes(x=LONGITUDE, y=LATITUDE), colour = "black")+
geom_jitter()+
geom_jitter(aes(colour = (errorsLONGITUDE < 8 | errorsLONGITUDE > -8)))+
theme_classic() +
facet_wrap(~FLOOR) +
labs(title="Errors LONGITUDE < 8 meters",
subtitle = "Divided by FLOOR")
#Move the info to the front
wrongLONGITUDE_Gathered <- wrongLONGITUDE[ , c((ncol(wrongLONGITUDE)-10):(ncol(wrongLONGITUDE)), 1:(ncol(wrongLONGITUDE)-11))]
rightLONGITUDE_Gathered <- rightLONGITUDE[ , c((ncol(rightLONGITUDE)-10):(ncol(rightLONGITUDE)), 1:(ncol(rightLONGITUDE)-11))]
# gather the data
wrongLONGITUDE_Gathered <- gather(wrongLONGITUDE_Gathered, WAP, DBM, 12:ncol(wrongLONGITUDE_Gathered))
rightLONGITUDE_Gathered <- gather(rightLONGITUDE_Gathered, WAP, DBM, 12:ncol(rightLONGITUDE_Gathered))
# write CSV to understand which WAPS are making the errors
write.csv(wrongLONGITUDE_Gathered, file = "data/wrongLONGITUDE_Gathered.csv")
write.csv(rightLONGITUDE_Gathered, file = "data/rightLONGITUDE_Gathered.csv")
# save the errors for later
saveRDS(wrongFLOOR, file = "data/errorsFLOOR-training.rds")
# combine the predicted results and the corresponding errors in a tibble or datafrme ---
resultsLONGITUDE <- tibble(.rows = 1111)
# add LONGITUDE and its prediction to the tibble ----
resultsLONGITUDE$predLONGITUDE_KNN <- predLONGITUDE_KNN
resultsLONGITUDE$LONGITUDE <- validationData$LONGITUDE
# mutate the errors and add them to the tibble
resultsLONGITUDE <- mutate(resultsLONGITUDE, errorsLONGITUDE = predLONGITUDE_KNN - LONGITUDE)
resultsLONGITUDE$errorsLONGITUDE <- resultsLONGITUDE$predLONGITUDE_KNN - resultsLONGITUDE$LONGITUDE
# store as RDS
saveRDS(resultsLONGITUDE, file = "resultsLONGITUDE(V7).rds")
| /old scripts/2. Prediction LONGITUDE by LM (V7).R | no_license | elsemaja/WIFI-Fingerprinting | R | false | false | 5,842 | r | ###############################################################################
# #
# WIFI | TRAINING RF MODEL FOR LONGITUDE | VERSION 3.0 | by ELSE #
# #
# Sample & subset data with only WAPS as predictors, train model & predict #
# #
###############################################################################
# take a representable sample from the training dataset in order to train a model
# taking a sample will save time while running the first model(s)
# use only WAP columns to predict: LONGITUDE
# libraries & data----
library("caret")
library("dplyr")
library("tidyverse")
library("class")
library("readr")
library("corrplot")
library("plotly")
# load the preprocessed dataframes
trainingData <- readRDS(file = "data/trainingDataProc(V7).rds")
validationData <- readRDS(file = "data/validationDataProc(V7).rds")
trainingData$FLOOR <- as.factor(trainingData$FLOOR)
# partitioning data
set.seed(123)
indexTrain <- createDataPartition(y = trainingData$LONGITUDE, p = .05, list = FALSE)
setTraining <- trainingData[indexTrain,]
setTest <- trainingData[-indexTrain,]
# I want to predict FLOOR only on the dBm measured by the WAPs,
# therefore I remove other columns
setTraining <- select(setTraining, -BUILDINGID, -SPACEID, -RELATIVEPOSITION,
-USERID, -PHONEID, -TIMESTAMP, -FLOOR, -LATITUDE)
# set cross validation parameters ----
# default search = random, change it to grid search if searching with Manual Grid
CrossValidation <- trainControl(method = "repeatedcv", number = 10, repeats = 1,
preProc = c("center", "scale", "range"), verboseIter = TRUE)
# check the models available in caret package by using names(getModelInfo())
# set the training parameters of the model ----
modelKNN <- train(LONGITUDE~., data = setTraining, method = "knn", trControl = CrossValidation)
# check the metrics ----
modelKNN
#see variable importance
varImp(modelKNN)
# make predictions with the model and predict the LONGITUDE of from the TRAININGDATA ----
predLONGITUDE_KNN <- predict(modelKNN, setTest)
#create a new column with predicted data
setTest$predLONGITUDE_KNN <- predLONGITUDE_KNN
setTest$LONGITUDE <- as.numeric(setTest$LONGITUDE)
setTest$predLONGITUDE_KNN <- as.numeric(setTest$predLONGITUDE_KNN)
# check the metrics postResample() for regression and confusionMatrix() for classification ---
postResample(setTest$predLONGITUDE_KNN, setTest$LONGITUDE)
# make predictions with the model and predict the LONGITUDE of from the validationData ----
predLONGITUDE_KNN <- predict(modelKNN, validationData)
#create a new column with predicted data
validationData$predLONGITUDE_KNN <- predLONGITUDE_KNN
validationData$LONGITUDE <- as.numeric(validationData$LONGITUDE)
validationData$predLONGITUDE_KNN <- as.numeric(validationData$predLONGITUDE_KNN)
# check the metrics postResample() for regression and confusionMatrix() for classification ---
postResample(validationData$predLONGITUDE_KNN, validationData$LONGITUDE)
# add column with errors to the dataframe
validationData <- mutate(validationData, errorsLONGITUDE = predLONGITUDE_KNN - LONGITUDE)
# turn the errors back into factors to produce an easy to read plot
plot(validationData$errorsLONGITUDE,
main = "LONGITUDE predictions",
xlab = "meters",
ylab = "count")
# subset the errors
wrongLONGITUDE <-validationData %>%
filter(errorsLONGITUDE >= 100)
rightLONGITUDE <-validationData %>%
filter(errorsLONGITUDE <= 8)
# what do the errors have in common?
wrongLONGITUDE[,521:531]
ggplot(validationData, aes(x=LONGITUDE, y=LATITUDE), colour = "black")+
geom_jitter()+
geom_jitter(aes(colour = (errorsLONGITUDE > 100 | errorsLONGITUDE < -100)))+
theme_classic() +
facet_wrap(~FLOOR) +
labs(title="Errors LONGITUDE > 100 meters",
subtitle = "Divided by FLOOR")
ggplot(validationData, aes(x=LONGITUDE, y=LATITUDE), colour = "black")+
geom_jitter()+
geom_jitter(aes(colour = (errorsLONGITUDE < 8 | errorsLONGITUDE > -8)))+
theme_classic() +
facet_wrap(~FLOOR) +
labs(title="Errors LONGITUDE < 8 meters",
subtitle = "Divided by FLOOR")
#Move the info to the front
wrongLONGITUDE_Gathered <- wrongLONGITUDE[ , c((ncol(wrongLONGITUDE)-10):(ncol(wrongLONGITUDE)), 1:(ncol(wrongLONGITUDE)-11))]
rightLONGITUDE_Gathered <- rightLONGITUDE[ , c((ncol(rightLONGITUDE)-10):(ncol(rightLONGITUDE)), 1:(ncol(rightLONGITUDE)-11))]
# gather the data
wrongLONGITUDE_Gathered <- gather(wrongLONGITUDE_Gathered, WAP, DBM, 12:ncol(wrongLONGITUDE_Gathered))
rightLONGITUDE_Gathered <- gather(rightLONGITUDE_Gathered, WAP, DBM, 12:ncol(rightLONGITUDE_Gathered))
# write CSV to understand which WAPS are making the errors
write.csv(wrongLONGITUDE_Gathered, file = "data/wrongLONGITUDE_Gathered.csv")
write.csv(rightLONGITUDE_Gathered, file = "data/rightLONGITUDE_Gathered.csv")
# save the errors for later
saveRDS(wrongFLOOR, file = "data/errorsFLOOR-training.rds")
# combine the predicted results and the corresponding errors in a tibble or datafrme ---
resultsLONGITUDE <- tibble(.rows = 1111)
# add LONGITUDE and its prediction to the tibble ----
resultsLONGITUDE$predLONGITUDE_KNN <- predLONGITUDE_KNN
resultsLONGITUDE$LONGITUDE <- validationData$LONGITUDE
# mutate the errors and add them to the tibble
resultsLONGITUDE <- mutate(resultsLONGITUDE, errorsLONGITUDE = predLONGITUDE_KNN - LONGITUDE)
resultsLONGITUDE$errorsLONGITUDE <- resultsLONGITUDE$predLONGITUDE_KNN - resultsLONGITUDE$LONGITUDE
# store as RDS
saveRDS(resultsLONGITUDE, file = "resultsLONGITUDE(V7).rds")
|
#' Plot the Lasso path
#'
#' @description Plot the whole lasso path run by BTdecayLasso() with given lambda and path = TRUE
#' @usage
#' ##S3 method for class "swlasso"
#' @param x Object with class "swlasso"
#' @param ... Further arguments pass to or from other methods
#' @export
#' @import ggplot2
plot.swlasso <- function(x, ...) {
n <- nrow(x$ability.path) - 1
df1 <- data.frame(ability = x$ability.path[1:n, 1], team = seq(1, n, 1), penalty = x$penalty.path[1])
for (i in 1:(length(x$likelihood.path) - 1)) {
df1 <- rbind(df1, data.frame(ability = x$ability.path[1:n, (i + 1)], team = seq(1, n, 1), penalty = x$penalty.path[i + 1]))
}
penalty <- ability <- team <- NULL
ggplot2::ggplot(df1, aes(x = penalty, y = ability, color = team)) + geom_line(aes(group = team))
}
#' Plot the Lasso path
#'
#' @description Plot the whole lasso path run by BTdecayLasso() with lambda = NULL and path = TRUE
#' @usage
#' ##S3 method for class "wlasso"
#' @param x Object with class "wlasso"
#' @param ... Further arguments pass to or from other methods
#' @export
#' @import ggplot2
plot.wlasso <- function(x, ...) {
n <- nrow(x$ability.path) - 1
df1 <- data.frame(ability = x$ability.path[1:n, 1], team = seq(1, n, 1), penalty = x$penalty.path[1])
for (i in 1:(length(x$likelihood.path) - 1)) {
df1 <- rbind(df1, data.frame(ability = x$ability.path[1:n, (i + 1)], team = seq(1, n, 1), penalty = x$penalty.path[i + 1]))
}
penalty <- ability <- team <- NULL
ggplot2::ggplot(df1, aes(x = penalty, y = ability, color = team)) + geom_line(aes(group = team))
} | /R/plot.R | no_license | cran/BTdecayLasso | R | false | false | 1,630 | r | #' Plot the Lasso path
#'
#' @description Plot the whole lasso path run by BTdecayLasso() with given lambda and path = TRUE
#' @usage
#' ##S3 method for class "swlasso"
#' @param x Object with class "swlasso"
#' @param ... Further arguments pass to or from other methods
#' @export
#' @import ggplot2
plot.swlasso <- function(x, ...) {
n <- nrow(x$ability.path) - 1
df1 <- data.frame(ability = x$ability.path[1:n, 1], team = seq(1, n, 1), penalty = x$penalty.path[1])
for (i in 1:(length(x$likelihood.path) - 1)) {
df1 <- rbind(df1, data.frame(ability = x$ability.path[1:n, (i + 1)], team = seq(1, n, 1), penalty = x$penalty.path[i + 1]))
}
penalty <- ability <- team <- NULL
ggplot2::ggplot(df1, aes(x = penalty, y = ability, color = team)) + geom_line(aes(group = team))
}
#' Plot the Lasso path
#'
#' @description Plot the whole lasso path run by BTdecayLasso() with lambda = NULL and path = TRUE
#' @usage
#' ##S3 method for class "wlasso"
#' @param x Object with class "wlasso"
#' @param ... Further arguments pass to or from other methods
#' @export
#' @import ggplot2
plot.wlasso <- function(x, ...) {
n <- nrow(x$ability.path) - 1
df1 <- data.frame(ability = x$ability.path[1:n, 1], team = seq(1, n, 1), penalty = x$penalty.path[1])
for (i in 1:(length(x$likelihood.path) - 1)) {
df1 <- rbind(df1, data.frame(ability = x$ability.path[1:n, (i + 1)], team = seq(1, n, 1), penalty = x$penalty.path[i + 1]))
}
penalty <- ability <- team <- NULL
ggplot2::ggplot(df1, aes(x = penalty, y = ability, color = team)) + geom_line(aes(group = team))
} |
# Step 0 - Set up working environment and load packages ------------------------
# helper function to get packages
# credit Drew Conway, "Machine Learning for Hackers" (O'Reilly 2012)
# https://github.com/johnmyleswhite/ML_for_Hackers/blob/master/package_installer.R
# set list of packages
pckgs <- c("readr", "dplyr", "magrittr", "readxl", "tidyr", "lubridate",
"stringr", "leaflet", "networkD3", "ggplot2")
# install packages if they're not installed
for(p in pckgs) {
if(!suppressWarnings(require(p, character.only = TRUE, quietly = TRUE))) {
cat(paste(p, "missing, will attempt to install\n"))
install.packages(p, dependencies = TRUE, type = "source")
}
else {
cat(paste(p, "installed OK\n"))
}
}
print("### All required packages installed ###")
# load necessary packages
library(readr)
library(dplyr)
library(magrittr)
library(readxl)
library(tidyr)
library(lubridate)
library(stringr)
# SET THE FILE PATH TO WHERE YOU HAVE SAVED THE DATA, E.G.
# C:/USERS/JIM/DESKTOP/oyster_all_raw_20160125.csv
oyster_data_path <- "./data/oyster_all_raw_20160125.csv"
# finding and setting your working directory --------------------------
getwd()
setwd("/path/to/directory")
# Step 1 - read in the data ----------------------------------------------------
oyster <- read_csv(oyster_data_path)
colnames(oyster) <- tolower(colnames(oyster))
# Step 2 - selection examples --------------------------------------------------
# Select columns with names
oyster %>% select(date, journey.action, charge)
# Select columns with positions (e.g. column 1, 2, and 3; 5 and 7)
oyster %>% select(1:3, 5, 7)
# "Negative selection" with names
oyster %>% select(-journey.action, -charge)
# "Negative selection" with numbers
oyster %>% select(-c(4, 6, 7))
# Step 3 - filtering examples --------------------------------------------------
# Numeric conditions
oyster %>% filter(charge != 0)
# Text conditions
oyster %>% filter(note != "")
# Multiple conditions, with assignment
whoops <- oyster %>% filter(balance < 0) # filtering with assignment
noteworthy <- oyster %>% filter(note != "" & charge >= 2) # multiple conditions
# Step 4 - grouping and summarising --------------------------------------------
# Compute a single summary
oyster %>% summarise(avg_charge = mean(charge, na.rm = TRUE)) # average charge
# Compute multiple summaries
oyster %>% summarise(avg_charge = mean(charge, na.rm = TRUE), # average charge
sd_charge = sd(charge, na.rm = TRUE)) # charge std. deviation
# Aggregate and summarise
oyster %>%
group_by(journey.action) %>%
summarise(avg_cost = mean(charge, na.rm = TRUE))
# Summarisation chain to answer question 1
oyster_summary <- oyster %>%
group_by(journey.action) %>%
summarise(journeys = n()) %>%
ungroup() %>%
arrange(-journeys) %>%
head(5)
# Step 5 - Removing duff data --------------------------------------------------
# A quick example of slice - selecting rows based on numbers
oyster %>% slice(1:10)
# Set up the pattern to search for
badRecords <- "Topped-up|Season ticket|Unspecified location"
# Search for those patterns
records <- grep(badRecords, oyster$journey.action)
# Check what grep does:
records
# Use slice to cut out the bad records (note that this "updates" the oyster object)
oyster <- oyster %>% slice(-records)
# Step 6 - Adding fields -------------------------------------------------------
# Set up a new field with a constant value
oyster %>% mutate(newField = 4)
# Set up new field(s) from existing fields
oyster %>% mutate(cost_plus_bal = charge + balance, # add charge to balance
cost_plus_bal_clean = sum(charge, balance, na.rm = TRUE)) # clean up
# Set up new fields with conditional logic
oyster %>% mutate(no_cost = ifelse(charge == 0 | is.na(charge), 1, 0))
# Add variables to update the data
oyster <- oyster %>%
mutate(start.time.clean = paste0(start.time, ":00"), # Create a start time field
end.time.clean = paste0(end.time, ":00")) # Create a end time field
# Split up existing fields in to new ones
oyster <- oyster %>%
separate(col = journey.action,
into = c("from", "to"),
sep = " to ",
remove = FALSE)
# Step 7 - working with dates --------------------------------------------------
# Turn text that looks like a date in to an actual date
oyster <- oyster %>% mutate(date.clean = dmy(date))
# Add some text date-times
oyster <- oyster %>%
mutate(start.datetime = paste(date, start.time.clean, sep = " "),
end.datetime = paste(date, end.time.clean, sep = " "))
# And then turn them in to actual datetimes (note mutate also updates fields)
oyster <- oyster %>%
mutate(start.datetime = dmy_hms(start.datetime),
end.datetime = dmy_hms(end.datetime))
# Step 8 - Date manipulation --------------- -----------------------------------
# Find all the times a journey started after midnight
afterMidnightSrt <- grep("00|01|02", substring(oyster$start.time,1,2))
# Find all the times a journey ended after midnight
afterMidnightEnd <- grep("00|01|02", substring(oyster$end.time,1,2))
# Find the records starting before midnight but ending after
afterMidnight <- afterMidnightEnd[!(afterMidnightEnd == afterMidnightSrt)]
# Use lubridate to add a day:
oyster[afterMidnight, "end.datetime"] <- oyster[afterMidnight, "end.datetime"] + days(1)
# Final transformations - add a journey time and a day of the week for each journey
oyster <- oyster %>%
mutate(journey.time = difftime(end.datetime,
start.datetime, units = "mins"),
journey.weekday = wday(date.clean,
label = TRUE,
abbr = FALSE))
# Step 9 - answering more detailed questions -----------------------------------
# Longest journey
oyster %>%
filter(journey.time == max(oyster$journey.time, na.rm = TRUE)) %>%
select(journey.action, journey.time, date)
# Average journey time by day
oyster %>%
group_by(journey.weekday) %>%
summarise(avg_time = floor(mean(journey.time, na.rm = TRUE)))
# Average journeys per day
oyster %>%
group_by(date.clean, journey.weekday) %>%
summarise(journeys = n()) %>%
group_by(journey.weekday) %>%
summarise(avg_journeys = mean(journeys))
| /r/02_r101_session_2.R | no_license | Jim89/r101 | R | false | false | 6,502 | r | # Step 0 - Set up working environment and load packages ------------------------
# helper function to get packages
# credit Drew Conway, "Machine Learning for Hackers" (O'Reilly 2012)
# https://github.com/johnmyleswhite/ML_for_Hackers/blob/master/package_installer.R
# set list of packages
pckgs <- c("readr", "dplyr", "magrittr", "readxl", "tidyr", "lubridate",
"stringr", "leaflet", "networkD3", "ggplot2")
# install packages if they're not installed
for(p in pckgs) {
if(!suppressWarnings(require(p, character.only = TRUE, quietly = TRUE))) {
cat(paste(p, "missing, will attempt to install\n"))
install.packages(p, dependencies = TRUE, type = "source")
}
else {
cat(paste(p, "installed OK\n"))
}
}
print("### All required packages installed ###")
# load necessary packages
library(readr)
library(dplyr)
library(magrittr)
library(readxl)
library(tidyr)
library(lubridate)
library(stringr)
# SET THE FILE PATH TO WHERE YOU HAVE SAVED THE DATA, E.G.
# C:/USERS/JIM/DESKTOP/oyster_all_raw_20160125.csv
oyster_data_path <- "./data/oyster_all_raw_20160125.csv"
# finding and setting your working directory --------------------------
getwd()
setwd("/path/to/directory")
# Step 1 - read in the data ----------------------------------------------------
oyster <- read_csv(oyster_data_path)
colnames(oyster) <- tolower(colnames(oyster))
# Step 2 - selection examples --------------------------------------------------
# Select columns with names
oyster %>% select(date, journey.action, charge)
# Select columns with positions (e.g. column 1, 2, and 3; 5 and 7)
oyster %>% select(1:3, 5, 7)
# "Negative selection" with names
oyster %>% select(-journey.action, -charge)
# "Negative selection" with numbers
oyster %>% select(-c(4, 6, 7))
# Step 3 - filtering examples --------------------------------------------------
# Numeric conditions
oyster %>% filter(charge != 0)
# Text conditions
oyster %>% filter(note != "")
# Multiple conditions, with assignment
whoops <- oyster %>% filter(balance < 0) # filtering with assignment
noteworthy <- oyster %>% filter(note != "" & charge >= 2) # multiple conditions
# Step 4 - grouping and summarising --------------------------------------------
# Compute a single summary
oyster %>% summarise(avg_charge = mean(charge, na.rm = TRUE)) # average charge
# Compute multiple summaries
oyster %>% summarise(avg_charge = mean(charge, na.rm = TRUE), # average charge
sd_charge = sd(charge, na.rm = TRUE)) # charge std. deviation
# Aggregate and summarise
oyster %>%
group_by(journey.action) %>%
summarise(avg_cost = mean(charge, na.rm = TRUE))
# Summarisation chain to answer question 1
oyster_summary <- oyster %>%
group_by(journey.action) %>%
summarise(journeys = n()) %>%
ungroup() %>%
arrange(-journeys) %>%
head(5)
# Step 5 - Removing duff data --------------------------------------------------
# A quick example of slice - selecting rows based on numbers
oyster %>% slice(1:10)
# Set up the pattern to search for
badRecords <- "Topped-up|Season ticket|Unspecified location"
# Search for those patterns
records <- grep(badRecords, oyster$journey.action)
# Check what grep does:
records
# Use slice to cut out the bad records (note that this "updates" the oyster object)
oyster <- oyster %>% slice(-records)
# Step 6 - Adding fields -------------------------------------------------------
# Set up a new field with a constant value
oyster %>% mutate(newField = 4)
# Set up new field(s) from existing fields
oyster %>% mutate(cost_plus_bal = charge + balance, # add charge to balance
cost_plus_bal_clean = sum(charge, balance, na.rm = TRUE)) # clean up
# Set up new fields with conditional logic
oyster %>% mutate(no_cost = ifelse(charge == 0 | is.na(charge), 1, 0))
# Add variables to update the data
oyster <- oyster %>%
mutate(start.time.clean = paste0(start.time, ":00"), # Create a start time field
end.time.clean = paste0(end.time, ":00")) # Create a end time field
# Split up existing fields in to new ones
oyster <- oyster %>%
separate(col = journey.action,
into = c("from", "to"),
sep = " to ",
remove = FALSE)
# Step 7 - working with dates --------------------------------------------------
# Turn text that looks like a date in to an actual date
oyster <- oyster %>% mutate(date.clean = dmy(date))
# Add some text date-times
oyster <- oyster %>%
mutate(start.datetime = paste(date, start.time.clean, sep = " "),
end.datetime = paste(date, end.time.clean, sep = " "))
# And then turn them in to actual datetimes (note mutate also updates fields)
oyster <- oyster %>%
mutate(start.datetime = dmy_hms(start.datetime),
end.datetime = dmy_hms(end.datetime))
# Step 8 - Date manipulation --------------- -----------------------------------
# Find all the times a journey started after midnight
afterMidnightSrt <- grep("00|01|02", substring(oyster$start.time,1,2))
# Find all the times a journey ended after midnight
afterMidnightEnd <- grep("00|01|02", substring(oyster$end.time,1,2))
# Find the records starting before midnight but ending after
afterMidnight <- afterMidnightEnd[!(afterMidnightEnd == afterMidnightSrt)]
# Use lubridate to add a day:
oyster[afterMidnight, "end.datetime"] <- oyster[afterMidnight, "end.datetime"] + days(1)
# Final transformations - add a journey time and a day of the week for each journey
oyster <- oyster %>%
mutate(journey.time = difftime(end.datetime,
start.datetime, units = "mins"),
journey.weekday = wday(date.clean,
label = TRUE,
abbr = FALSE))
# Step 9 - answering more detailed questions -----------------------------------
# Longest journey
oyster %>%
filter(journey.time == max(oyster$journey.time, na.rm = TRUE)) %>%
select(journey.action, journey.time, date)
# Average journey time by day
oyster %>%
group_by(journey.weekday) %>%
summarise(avg_time = floor(mean(journey.time, na.rm = TRUE)))
# Average journeys per day
oyster %>%
group_by(date.clean, journey.weekday) %>%
summarise(journeys = n()) %>%
group_by(journey.weekday) %>%
summarise(avg_journeys = mean(journeys))
|
`designdist` <-
function (x, method = "(A+B-2*J)/(A+B)",
terms = c("binary", "quadratic", "minimum"),
abcd = FALSE, alphagamma = FALSE, name)
{
terms <- match.arg(terms)
if ((abcd || alphagamma) && terms != "binary")
warning("Perhaps terms should be 'binary' with 'abcd' or 'alphagamma'?")
x <- as.matrix(x)
N <- nrow(x)
P <- ncol(x)
if (terms == "binary")
x <- ifelse(x > 0, 1, 0)
if (terms == "binary" || terms == "quadratic")
x <- tcrossprod(x)
if (terms == "minimum") {
r <- rowSums(x)
x <- dist(x, "manhattan")
x <- (outer(r, r, "+") - as.matrix(x))/2
}
d <- diag(x)
A <- as.dist(outer(rep(1, N), d))
B <- as.dist(outer(d, rep(1, N)))
J <- as.dist(x)
## 2x2 contingency table notation
if (abcd) {
a <- J
b <- A - J
c <- B - J
d <- P - A - B + J
}
## beta diversity notation
if (alphagamma) {
alpha <- (A + B)/2
gamma <- A + B - J
delta <- abs(A - B)/2
}
dis <- eval(parse(text = method))
attributes(dis) <- attributes(J)
attr(dis, "call") <- match.call()
if (missing(name))
attr(dis, "method") <- paste(terms, method)
else attr(dis, "method") <- name
dis
}
| /R/designdist.R | no_license | Eric-1986/vegan | R | false | false | 1,310 | r | `designdist` <-
function (x, method = "(A+B-2*J)/(A+B)",
terms = c("binary", "quadratic", "minimum"),
abcd = FALSE, alphagamma = FALSE, name)
{
terms <- match.arg(terms)
if ((abcd || alphagamma) && terms != "binary")
warning("Perhaps terms should be 'binary' with 'abcd' or 'alphagamma'?")
x <- as.matrix(x)
N <- nrow(x)
P <- ncol(x)
if (terms == "binary")
x <- ifelse(x > 0, 1, 0)
if (terms == "binary" || terms == "quadratic")
x <- tcrossprod(x)
if (terms == "minimum") {
r <- rowSums(x)
x <- dist(x, "manhattan")
x <- (outer(r, r, "+") - as.matrix(x))/2
}
d <- diag(x)
A <- as.dist(outer(rep(1, N), d))
B <- as.dist(outer(d, rep(1, N)))
J <- as.dist(x)
## 2x2 contingency table notation
if (abcd) {
a <- J
b <- A - J
c <- B - J
d <- P - A - B + J
}
## beta diversity notation
if (alphagamma) {
alpha <- (A + B)/2
gamma <- A + B - J
delta <- abs(A - B)/2
}
dis <- eval(parse(text = method))
attributes(dis) <- attributes(J)
attr(dis, "call") <- match.call()
if (missing(name))
attr(dis, "method") <- paste(terms, method)
else attr(dis, "method") <- name
dis
}
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
#' Estimates the BKMR-DLM for a multiple time-varying predictor.
#'
#' @param yz a matrix that is cbind(y,Z) where Z is a matrix of covariates that does not include an intercept and y is the vector of outcomes.
#' @param Xlist a list of matrices each for a single exposure in time order.
#' @param b1 first parameter for prior on tau^{-2} in the text.
#' @param a1 first parameter for prior on sigma^{-2}.
#' @param a2 second parameter for prior on sigma^{-2}.
#' @param kappa scale parameter, rho/kappa~chisq(1).
#' @param n_inner number of MCMC iterations to run in the inner loop. This is equivelent the the thinning number. n_outer*n_inner iteraction will be run and n_outer iterations will be saved.
#' @param n_outer number of MCMC iteration in the outer loop. The output for each is saved.
#' @param n_burn number of MCMC iteration to be discarded as burn-in.
#' @export
bkmrdlm_multi <- function(yz, Xlist, b1, a1, a2, kappa, n_inner, n_outer, n_burn) {
.Call('_regimes_bkmrdlm_multi', PACKAGE = 'regimes', yz, Xlist, b1, a1, a2, kappa, n_inner, n_outer, n_burn)
}
#' Estimates the BKMR-DLM for a multiple time-varying predictor.
#'
#' @param yz a matrix that is cbind(y,Z) where Z is a matrix of covariates that does not include an intercept and y is the vector of outcomes.
#' @param Xlist a list of matrices each for a single exposure in time order.
#' @param b1 first parameter for prior on tau^{-2} in the text.
#' @param a1 first parameter for prior on sigma^{-2}.
#' @param a2 second parameter for prior on sigma^{-2}.
#' @param kappa scale parameter, rho/kappa~chisq(1).
#' @param n_inner number of MCMC iterations to run in the inner loop. This is equivelent the the thinning number. n_outer*n_inner iteraction will be run and n_outer iterations will be saved.
#' @param n_outer number of MCMC iteration in the outer loop. The output for each is saved.
#' @param n_burn number of MCMC iteration to be discarded as burn-in.
#' @param d the degree of polynomial for a polynomial kernel.
#' @export
bkmrdlm_multi_polynomial <- function(yz, Xlist, b1, a1, a2, kappa, n_inner, n_outer, n_burn, d) {
.Call('_regimes_bkmrdlm_multi_polynomial', PACKAGE = 'regimes', yz, Xlist, b1, a1, a2, kappa, n_inner, n_outer, n_burn, d)
}
#' Estimates the BKMR-DLM for a multiple time-varying predictor.
#'
#' @param yz a matrix that is cbind(y,Z) where Z is a matrix of covariates that does not include an intercept and y is the vector of outcomes.
#' @param Xlist a list of matrices each for a single exposure in time order.
#' @param b1 first parameter for prior on tau^{-2} in the text.
#' @param a1 first parameter for prior on sigma^{-2}.
#' @param a2 second parameter for prior on sigma^{-2}.
#' @param kappa scale parameter, rho/kappa~chisq(1).
#' @param n_inner number of MCMC iterations to run in the inner loop. This is equivelent the the thinning number. n_outer*n_inner iteraction will be run and n_outer iterations will be saved.
#' @param n_outer number of MCMC iteration in the outer loop. The output for each is saved.
#' @param n_burn number of MCMC iteration to be discarded as burn-in.
#' @export
bkmrdlm_multi_shrink <- function(yz, Xlist, b1, a1, a2, kappa, n_inner, n_outer, n_burn) {
.Call('_regimes_bkmrdlm_multi_shrink', PACKAGE = 'regimes', yz, Xlist, b1, a1, a2, kappa, n_inner, n_outer, n_burn)
}
#' Estimates the BKMR-DLM for a multiple time-varying predictor.
#'
#' @param yz a matrix that is cbind(y,Z) where Z is a matrix of covariates that does not include an intercept and y is the vector of outcomes.
#' @param Xlist a list of matrices each for a single exposure in time order.
#' @param b1 first parameter for prior on tau^{-2} in the text.
#' @param a1 first parameter for prior on sigma^{-2}.
#' @param a2 second parameter for prior on sigma^{-2}.
#' @param kappa scale parameter, rho/kappa~chisq(1).
#' @param n_inner number of MCMC iterations to run in the inner loop. This is equivelent the the thinning number. n_outer*n_inner iteraction will be run and n_outer iterations will be saved.
#' @param n_outer number of MCMC iteration in the outer loop. The output for each is saved.
#' @param n_burn number of MCMC iteration to be discarded as burn-in.
#' @param d the degree of polynomial for a polynomial kernel.
#' @export
bkmrdlm_multi_shrink_polynomial <- function(yz, Xlist, b1, a1, a2, kappa, n_inner, n_outer, n_burn, d) {
.Call('_regimes_bkmrdlm_multi_shrink_polynomial', PACKAGE = 'regimes', yz, Xlist, b1, a1, a2, kappa, n_inner, n_outer, n_burn, d)
}
| /R/RcppExports.R | no_license | niehs-prime/regimes | R | false | false | 4,663 | r | # Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
#' Estimates the BKMR-DLM for a multiple time-varying predictor.
#'
#' @param yz a matrix that is cbind(y,Z) where Z is a matrix of covariates that does not include an intercept and y is the vector of outcomes.
#' @param Xlist a list of matrices each for a single exposure in time order.
#' @param b1 first parameter for prior on tau^{-2} in the text.
#' @param a1 first parameter for prior on sigma^{-2}.
#' @param a2 second parameter for prior on sigma^{-2}.
#' @param kappa scale parameter, rho/kappa~chisq(1).
#' @param n_inner number of MCMC iterations to run in the inner loop. This is equivelent the the thinning number. n_outer*n_inner iteraction will be run and n_outer iterations will be saved.
#' @param n_outer number of MCMC iteration in the outer loop. The output for each is saved.
#' @param n_burn number of MCMC iteration to be discarded as burn-in.
#' @export
bkmrdlm_multi <- function(yz, Xlist, b1, a1, a2, kappa, n_inner, n_outer, n_burn) {
.Call('_regimes_bkmrdlm_multi', PACKAGE = 'regimes', yz, Xlist, b1, a1, a2, kappa, n_inner, n_outer, n_burn)
}
#' Estimates the BKMR-DLM for a multiple time-varying predictor.
#'
#' @param yz a matrix that is cbind(y,Z) where Z is a matrix of covariates that does not include an intercept and y is the vector of outcomes.
#' @param Xlist a list of matrices each for a single exposure in time order.
#' @param b1 first parameter for prior on tau^{-2} in the text.
#' @param a1 first parameter for prior on sigma^{-2}.
#' @param a2 second parameter for prior on sigma^{-2}.
#' @param kappa scale parameter, rho/kappa~chisq(1).
#' @param n_inner number of MCMC iterations to run in the inner loop. This is equivelent the the thinning number. n_outer*n_inner iteraction will be run and n_outer iterations will be saved.
#' @param n_outer number of MCMC iteration in the outer loop. The output for each is saved.
#' @param n_burn number of MCMC iteration to be discarded as burn-in.
#' @param d the degree of polynomial for a polynomial kernel.
#' @export
bkmrdlm_multi_polynomial <- function(yz, Xlist, b1, a1, a2, kappa, n_inner, n_outer, n_burn, d) {
.Call('_regimes_bkmrdlm_multi_polynomial', PACKAGE = 'regimes', yz, Xlist, b1, a1, a2, kappa, n_inner, n_outer, n_burn, d)
}
#' Estimates the BKMR-DLM for a multiple time-varying predictor.
#'
#' @param yz a matrix that is cbind(y,Z) where Z is a matrix of covariates that does not include an intercept and y is the vector of outcomes.
#' @param Xlist a list of matrices each for a single exposure in time order.
#' @param b1 first parameter for prior on tau^{-2} in the text.
#' @param a1 first parameter for prior on sigma^{-2}.
#' @param a2 second parameter for prior on sigma^{-2}.
#' @param kappa scale parameter, rho/kappa~chisq(1).
#' @param n_inner number of MCMC iterations to run in the inner loop. This is equivelent the the thinning number. n_outer*n_inner iteraction will be run and n_outer iterations will be saved.
#' @param n_outer number of MCMC iteration in the outer loop. The output for each is saved.
#' @param n_burn number of MCMC iteration to be discarded as burn-in.
#' @export
bkmrdlm_multi_shrink <- function(yz, Xlist, b1, a1, a2, kappa, n_inner, n_outer, n_burn) {
.Call('_regimes_bkmrdlm_multi_shrink', PACKAGE = 'regimes', yz, Xlist, b1, a1, a2, kappa, n_inner, n_outer, n_burn)
}
#' Estimates the BKMR-DLM for a multiple time-varying predictor.
#'
#' @param yz a matrix that is cbind(y,Z) where Z is a matrix of covariates that does not include an intercept and y is the vector of outcomes.
#' @param Xlist a list of matrices each for a single exposure in time order.
#' @param b1 first parameter for prior on tau^{-2} in the text.
#' @param a1 first parameter for prior on sigma^{-2}.
#' @param a2 second parameter for prior on sigma^{-2}.
#' @param kappa scale parameter, rho/kappa~chisq(1).
#' @param n_inner number of MCMC iterations to run in the inner loop. This is equivelent the the thinning number. n_outer*n_inner iteraction will be run and n_outer iterations will be saved.
#' @param n_outer number of MCMC iteration in the outer loop. The output for each is saved.
#' @param n_burn number of MCMC iteration to be discarded as burn-in.
#' @param d the degree of polynomial for a polynomial kernel.
#' @export
bkmrdlm_multi_shrink_polynomial <- function(yz, Xlist, b1, a1, a2, kappa, n_inner, n_outer, n_burn, d) {
.Call('_regimes_bkmrdlm_multi_shrink_polynomial', PACKAGE = 'regimes', yz, Xlist, b1, a1, a2, kappa, n_inner, n_outer, n_burn, d)
}
|
#' Render Scene
#'
#' Takes the scene description and renders an image, either to the device or to a filename.
#'
#' @param scene Tibble of object locations and properties.
#' @param width Default `400`. Width of the render, in pixels.
#' @param height Default `400`. Height of the render, in pixels.
#' @param fov Default `20`. Field of view, in degrees. If this is zero, the camera will use an orthographic projection. The size of the plane
#' used to create the orthographic projection is given in argument `ortho_dimensions`.
#' @param samples Default `100`. The maximum number of samples for each pixel. If this is a length-2
#' vector and the `sample_method` is `stratified`, this will control the number of strata in each dimension.
#' The total number of samples in this case will be the product of the two numbers.
#' @param min_variance Default `0.00005`. Minimum acceptable variance for a block of pixels for the
#' adaptive sampler. Smaller numbers give higher quality images, at the expense of longer rendering times.
#' If this is set to zero, the adaptive sampler will be turned off and the renderer
#' will use the maximum number of samples everywhere.
#' @param min_adaptive_size Default `8`. Width of the minimum block size in the adaptive sampler.
#' @param sample_method Default `random`. The type of sampling method used to generate
#' random numbers. The other option is `stratified`, which can improve the render quality (at the cost
#' of increased time allocating the random samples).
#' @param max_depth Default `50`. Maximum number of bounces a ray can make in a scene.
#' @param roulette_active_depth Default `10`. Number of ray bounces until a ray can stop bouncing via
#' Russian roulette.
#' @param ambient_light Default `FALSE`, unless there are no emitting objects in the scene.
#' If `TRUE`, the background will be a gradient varying from `backgroundhigh` directly up (+y) to
#' `backgroundlow` directly down (-y).
#' @param lookfrom Default `c(0,1,10)`. Location of the camera.
#' @param lookat Default `c(0,0,0)`. Location where the camera is pointed.
#' @param camera_up Default `c(0,1,0)`. Vector indicating the "up" position of the camera.
#' @param aperture Default `0.1`. Aperture of the camera. Smaller numbers will increase depth of field, causing
#' less blurring in areas not in focus.
#' @param clamp_value Default `Inf`. If a bright light or a reflective material is in the scene, occasionally
#' there will be bright spots that will not go away even with a large number of samples. These
#' can be removed (at the cost of slightly darkening the image) by setting this to a small number greater than 1.
#' @param filename Default `NULL`. If present, the renderer will write to the filename instead
#' of the current device.
#' @param backgroundhigh Default `#80b4ff`. The "high" color in the background gradient. Can be either
#' a hexadecimal code, or a numeric rgb vector listing three intensities between `0` and `1`.
#' @param backgroundlow Default `#ffffff`. The "low" color in the background gradient. Can be either
#' a hexadecimal code, or a numeric rgb vector listing three intensities between `0` and `1`.
#' @param shutteropen Default `0`. Time at which the shutter is open. Only affects moving objects.
#' @param shutterclose Default `1`. Time at which the shutter is open. Only affects moving objects.
#' @param focal_distance Default `NULL`, automatically set to the `lookfrom-lookat` distance unless
#' otherwise specified.
#' @param ortho_dimensions Default `c(1,1)`. Width and height of the orthographic camera. Will only be used if `fov = 0`.
#' @param tonemap Default `gamma`. Choose the tone mapping function,
#' Default `gamma` solely adjusts for gamma and clamps values greater than 1 to 1.
#' `reinhold` scales values by their individual color channels `color/(1+color)` and then performs the
#' gamma adjustment. `uncharted` uses the mapping developed for Uncharted 2 by John Hable. `hbd` uses an
#' optimized formula by Jim Hejl and Richard Burgess-Dawson. Note: If set to anything other than `gamma`,
#' objects with material `light()` may not be anti-aliased. If `raw`, the raw array of HDR values will be
#' returned, rather than an image or a plot.
#' @param bloom Default `TRUE`. Set to `FALSE` to get the raw, pathtraced image. Otherwise,
#' this performs a convolution of the HDR image of the scene with a sharp, long-tailed
#' exponential kernel, which does not visibly affect dimly pixels, but does result in emitters light
#' slightly bleeding into adjacent pixels. This provides an antialiasing effect for lights, even when
#' tonemapping the image. Pass in a matrix to specify the convolution kernel manually, or a positive number
#' to control the intensity of the bloom (higher number = more bloom).
#' @param environment_light Default `NULL`. An image to be used for the background for rays that escape
#' the scene. Supports both HDR (`.hdr`) and low-dynamic range (`.png`, `.jpg`) images.
#' @param rotate_env Default `0`. The number of degrees to rotate the environment map around the scene.
#' @param intensity_env Default `1`. The amount to increase the intensity of the environment lighting. Useful
#' if using a LDR (JPEG or PNG) image as an environment map.
#' @param debug_channel Default `none`. If `depth`, function will return a depth map of rays into the scene
#' instead of an image. If `normals`, function will return an image of scene normals, mapped from 0 to 1.
#' If `uv`, function will return an image of the uv coords. If `variance`, function will return an image
#' showing the number of samples needed to take for each block to converge. If `dpdu` or `dpdv`, function will return
#' an image showing the differential `u` and `u` coordinates. If `color`, function will return the raw albedo
#' values (with white for `metal` and `dielectric` materials).
#' @param return_raw_array Default `FALSE`. If `TRUE`, function will return raw array with RGB intensity
#' information.
#' @param parallel Default `FALSE`. If `TRUE`, it will use all available cores to render the image
#' (or the number specified in `options("cores")` if that option is not `NULL`).
#' @param progress Default `TRUE` if interactive session, `FALSE` otherwise.
#' @param verbose Default `FALSE`. Prints information and timing information about scene
#' construction and raytracing progress.
#' @export
#' @importFrom grDevices col2rgb
#' @return Raytraced plot to current device, or an image saved to a file.
#'
#' @examples
#' #Generate a large checkered sphere as the ground
#' \donttest{
#' scene = generate_ground(depth=-0.5, material = diffuse(color="white", checkercolor="darkgreen"))
#' render_scene(scene,parallel=TRUE,samples=500)
#' }
#'
#' #Add a sphere to the center
#' \donttest{
#' scene = scene %>%
#' add_object(sphere(x=0,y=0,z=0,radius=0.5,material = diffuse(color=c(1,0,1))))
#' render_scene(scene,fov=20,parallel=TRUE,samples=500)
#' }
#'
#' #Add a marbled cube
#' \donttest{
#' scene = scene %>%
#' add_object(cube(x=1.1,y=0,z=0,material = diffuse(noise=3)))
#' render_scene(scene,fov=20,parallel=TRUE,samples=500)
#' }
#'
#' #Add a metallic gold sphere, using stratified sampling for a higher quality render
#' \donttest{
#' scene = scene %>%
#' add_object(sphere(x=-1.1,y=0,z=0,radius=0.5,material = metal(color="gold",fuzz=0.1)))
#' render_scene(scene,fov=20,parallel=TRUE,samples=500, sample_method = "stratified")
#' }
#'
#' #Lower the number of samples to render more quickly (here, we also use only one core).
#' \donttest{
#' render_scene(scene, samples=4)
#' }
#'
#' #Add a floating R plot using the iris dataset as a png onto a floating 2D rectangle
#'
#' \donttest{
#' tempfileplot = tempfile()
#' png(filename=tempfileplot,height=400,width=800)
#' plot(iris$Petal.Length,iris$Sepal.Width,col=iris$Species,pch=18,cex=4)
#' dev.off()
#'
#' image_array = aperm(png::readPNG(tempfileplot),c(2,1,3))
#' scene = scene %>%
#' add_object(xy_rect(x=0,y=1.1,z=0,xwidth=2,angle = c(0,180,0),
#' material = diffuse(image_texture = image_array)))
#' render_scene(scene,fov=20,parallel=TRUE,samples=500)
#' }
#'
#' #Move the camera
#' \donttest{
#' render_scene(scene,lookfrom = c(7,1.5,10),lookat = c(0,0.5,0),fov=15,parallel=TRUE)
#' }
#'
#' #Change the background gradient to a night time ambiance
#' \donttest{
#' render_scene(scene,lookfrom = c(7,1.5,10),lookat = c(0,0.5,0),fov=15,
#' backgroundhigh = "#282375", backgroundlow = "#7e77ea", parallel=TRUE,
#' samples=500)
#' }
#'
#'#Increase the aperture to blur objects that are further from the focal plane.
#' \donttest{
#' render_scene(scene,lookfrom = c(7,1.5,10),lookat = c(0,0.5,0),fov=15,
#' aperture = 0.5,parallel=TRUE,samples=500)
#' }
#'
#'#Spin the camera around the scene, decreasing the number of samples to render faster. To make
#'#an animation, specify the a filename in `render_scene` for each frame and use the `av` package
#'#or ffmpeg to combine them all into a movie.
#'
#'t=1:30
#'xpos = 10 * sin(t*12*pi/180+pi/2)
#'zpos = 10 * cos(t*12*pi/180+pi/2)
#'\donttest{
#'#Save old par() settings
#'old.par = par(no.readonly = TRUE)
#'on.exit(par(old.par))
#'par(mfrow=c(5,6))
#'for(i in 1:30) {
#' render_scene(scene, samples=16,
#' lookfrom = c(xpos[i],1.5,zpos[i]),lookat = c(0,0.5,0), parallel=TRUE)
#'}
#'}
render_scene = function(scene, width = 400, height = 400, fov = 20,
samples = 100, min_variance = 0.00005, min_adaptive_size = 8,
sample_method = "random",
max_depth = 50, roulette_active_depth = 10,
ambient_light = FALSE,
lookfrom = c(0,1,10), lookat = c(0,0,0), camera_up = c(0,1,0),
aperture = 0.1, clamp_value = Inf,
filename = NULL, backgroundhigh = "#80b4ff",backgroundlow = "#ffffff",
shutteropen = 0.0, shutterclose = 1.0, focal_distance=NULL, ortho_dimensions = c(1,1),
tonemap ="gamma", bloom = TRUE, parallel=TRUE,
environment_light = NULL, rotate_env = 0, intensity_env = 1,
debug_channel = "none", return_raw_array = FALSE,
progress = interactive(), verbose = FALSE) {
if(verbose) {
currenttime = proc.time()
cat("Building Scene: ")
}
#Check if Cornell Box scene and set camera if user did not:
if(!is.null(attr(scene,"cornell"))) {
corn_message = "Setting default values for Cornell box: "
missing_corn = FALSE
if(missing(lookfrom)) {
lookfrom = c(278, 278, -800)
corn_message = paste0(corn_message, "lookfrom `c(278,278,-800)` ")
missing_corn = TRUE
}
if(missing(lookat)) {
lookat = c(278, 278, 0)
corn_message = paste0(corn_message, "lookat `c(278,278,0)` ")
missing_corn = TRUE
}
if(missing(fov)) {
fov=40
corn_message = paste0(corn_message, "fov `40` ")
missing_corn = TRUE
}
if(fov == 0 && missing(ortho_dimensions)) {
ortho_dimensions = c(580,580)
corn_message = paste0(corn_message, "ortho_dimensions `c(580, 580)` ")
missing_corn = TRUE
}
corn_message = paste0(corn_message,".")
if(missing_corn) {
message(corn_message)
}
}
lookvec = (lookat - lookfrom)
i1 = c(2,3,1)
i2 = c(3,1,2)
if(all(lookvec[i1]*camera_up[i2] - lookvec[i2]*camera_up[i1] == 0)) {
stop("camera_up value c(", paste(camera_up, collapse=","), ") is aligned exactly with camera vector (lookat - lookfrom). Choose a different value for camera_up.")
}
backgroundhigh = convert_color(backgroundhigh)
backgroundlow = convert_color(backgroundlow)
position_list = list()
position_list$xvec = scene$x
position_list$yvec = scene$y
position_list$zvec = scene$z
rvec = scene$radius
shapevec = unlist(lapply(tolower(scene$shape),switch,
"sphere" = 1,"xy_rect" = 2, "xz_rect" = 3,"yz_rect" = 4,"box" = 5, "triangle" = 6,
"obj" = 7, "objcolor" = 8, "disk" = 9, "cylinder" = 10, "ellipsoid" = 11,
"objvertexcolor" = 12, "cone" = 13, "curve" = 14, "csg_object" = 15))
typevec = unlist(lapply(tolower(scene$type),switch,
"diffuse" = 1,"metal" = 2,"dielectric" = 3,
"oren-nayar" = 4, "light" = 5, "microfacet" = 6,
"glossy" = 7, "spotlight" = 8))
sigmavec = unlist(scene$sigma)
assertthat::assert_that(tonemap %in% c("gamma","reinhold","uncharted", "hbd", "raw"))
toneval = switch(tonemap, "gamma" = 1,"reinhold" = 2,"uncharted" = 3,"hbd" = 4, "raw" = 5)
movingvec = purrr::map_lgl(scene$velocity,.f = ~any(.x != 0))
proplist = scene$properties
vel_list = scene$velocity
checkeredlist = scene$checkercolor
checkeredbool = purrr::map_lgl(checkeredlist,.f = ~all(!is.na(.x)))
#glossy
glossyinfo = scene$glossyinfo
#gradient handler
gradient_info = list()
gradient_info$gradient_colors = scene$gradient_color
gradient_info$isgradient = purrr::map_lgl(gradient_info$gradient_colors,.f = ~all(!is.na(.x)))
gradient_info$gradient_trans = scene$gradient_transpose
gradient_info$is_world_gradient = scene$world_gradient
gradient_info$gradient_control_points = scene$gradient_point_info
gradient_info$type = unlist(lapply(tolower(scene$gradient_type),switch,
"hsv" = TRUE, "rgb" = FALSE, FALSE))
#noise handler
noisebool = purrr::map_lgl(scene$noise, .f = ~.x > 0)
noisevec = scene$noise
noisephasevec = scene$noisephase * pi/180
noiseintvec = scene$noiseintensity
noisecolorlist = scene$noisecolor
#rotation handler
rot_angle_list = scene$angle
#fog handler
fog_bool = scene$fog
fog_vec = scene$fogdensity
#flip handler
flip_vec = scene$flipped
#light handler
light_prop_vec = scene$lightintensity
if(!any(typevec == 5) && !any(typevec == 8) && missing(ambient_light) && missing(environment_light)) {
ambient_light = TRUE
}
#texture handler
image_array_list = scene$image
image_tex_bool = purrr::map_lgl(image_array_list,.f = ~is.array(.x))
image_filename_bool = purrr::map_lgl(image_array_list,.f = ~is.character(.x))
temp_file_names = purrr::map_chr(image_tex_bool,.f = ~ifelse(.x, tempfile(fileext = ".png"),""))
for(i in 1:length(image_array_list)) {
if(image_tex_bool[i]) {
if(dim(image_array_list[[i]])[3] == 4) {
png::writePNG(fliplr(aperm(image_array_list[[i]][,,1:3],c(2,1,3))),temp_file_names[i])
} else if(dim(image_array_list[[i]])[3] == 3){
png::writePNG(fliplr(aperm(image_array_list[[i]],c(2,1,3))),temp_file_names[i])
}
}
if(image_filename_bool[i]) {
if(any(!file.exists(path.expand(image_array_list[[i]])) & nchar(image_array_list[[i]]) > 0)) {
stop(paste0("Cannot find the following texture file:\n",
paste(image_array_list[[i]], collapse="\n")))
}
temp_file_names[i] = path.expand(image_array_list[[i]])
}
}
image_tex_bool = image_tex_bool | image_filename_bool
image_repeat = scene$image_repeat
#alpha texture handler
alpha_array_list = scene$alphaimage
alpha_tex_bool = purrr::map_lgl(alpha_array_list,.f = ~is.array(.x[[1]]))
alpha_filename_bool = purrr::map_lgl(alpha_array_list,.f = ~is.character(.x[[1]]))
alpha_temp_file_names = purrr::map_chr(alpha_tex_bool, .f = (function(.x) tempfile(fileext = ".png")))
for(i in 1:length(alpha_array_list)) {
if(alpha_tex_bool[i]) {
if(length(dim(alpha_array_list[[i]][[1]])) == 2) {
png::writePNG(fliplr(t(alpha_array_list[[i]][[1]])), alpha_temp_file_names[i])
} else if(dim(alpha_array_list[[i]][[1]])[3] == 4) {
alpha_array_list[[i]][[1]][,,1] = alpha_array_list[[i]][[1]][,,4]
alpha_array_list[[i]][[1]][,,2] = alpha_array_list[[i]][[1]][,,4]
alpha_array_list[[i]][[1]][,,3] = alpha_array_list[[i]][[1]][,,4]
png::writePNG(fliplr(aperm(alpha_array_list[[i]][[1]][,,1:3],c(2,1,3))), alpha_temp_file_names[i])
} else if(dim(alpha_array_list[[i]][[1]])[3] == 3) {
png::writePNG(fliplr(aperm(alpha_array_list[[i]][[1]],c(2,1,3))), alpha_temp_file_names[i])
} else {
stop("alpha texture dims: c(", paste(dim(alpha_array_list[[i]][[1]]),collapse=", "), ") not valid for texture.")
}
}
if(alpha_filename_bool[i]) {
if(any(!file.exists(path.expand(alpha_array_list[[i]][[1]])) & nchar(alpha_array_list[[i]][[1]]) > 0)) {
stop(paste0("Cannot find the following texture file:\n",
paste(alpha_array_list[[i]][[1]], collapse="\n")))
}
temp_array = png::readPNG(alpha_array_list[[i]][[1]])
if(dim(temp_array)[3] == 4 && any(temp_array[,,4] != 1)) {
temp_array[,,1] = temp_array[,,4]
temp_array[,,2] = temp_array[,,4]
temp_array[,,3] = temp_array[,,4]
}
png::writePNG(temp_array,alpha_temp_file_names[i])
}
}
alpha_tex_bool = alpha_tex_bool | alpha_filename_bool
alphalist = list()
alphalist$alpha_temp_file_names = alpha_temp_file_names
alphalist$alpha_tex_bool = alpha_tex_bool
#bump texture handler
bump_array_list = scene$bump_texture
bump_tex_bool = purrr::map_lgl(bump_array_list,.f = ~is.array(.x[[1]]))
bump_filename_bool = purrr::map_lgl(bump_array_list,.f = ~is.character(.x[[1]]))
bump_temp_file_names = purrr::map_chr(bump_tex_bool,.f = ~ifelse(.x, tempfile(fileext = ".png"),""))
for(i in 1:length(bump_array_list)) {
if(bump_tex_bool[i]) {
bump_dims = dim(bump_array_list[[i]][[1]])
if(length(bump_dims) == 2) {
temp_array = array(0, dim = c(bump_dims,3))
temp_array[,,1] = bump_array_list[[i]][[1]]
temp_array[,,2] = bump_array_list[[i]][[1]]
temp_array[,,3] = bump_array_list[[i]][[1]]
bump_dims = c(bump_dims,3)
} else {
temp_array = bump_array_list[[i]][[1]]
}
if(bump_dims[3] == 4) {
png::writePNG(fliplr(aperm(temp_array[,,1:3],c(2,1,3))),bump_temp_file_names[i])
} else if(bump_dims[3] == 3){
png::writePNG(fliplr(aperm(temp_array,c(2,1,3))),bump_temp_file_names[i])
}
}
if(bump_filename_bool[i]) {
if(any(!file.exists(path.expand(bump_array_list[[i]][[1]])) & nchar(bump_array_list[[i]][[1]]) > 0)) {
stop(paste0("Cannot find the following texture file:\n",
paste(bump_array_list[[i]][[1]], collapse="\n")))
}
bump_temp_file_names[i] = path.expand(bump_array_list[[i]][[1]])
}
}
bump_tex_bool = bump_tex_bool | bump_filename_bool
bump_intensity = scene$bump_intensity
alphalist$bump_temp_file_names = bump_temp_file_names
alphalist$bump_tex_bool = bump_tex_bool
alphalist$bump_intensity = bump_intensity
#movement handler
if(shutteropen == shutterclose) {
movingvec = rep(FALSE,length(movingvec))
}
#implicit sampling handler
implicit_vec = scene$implicit_sample
#order rotation handler
order_rotation_list = scene$order_rotation
#group handler
group_bool = purrr::map_lgl(scene$pivot_point,.f = ~all(!is.na(.x)))
group_pivot = scene$pivot_point
group_angle = scene$group_angle
group_order_rotation = scene$group_order_rotation
group_translate = scene$group_translate
group_scale = scene$group_scale
#triangle normal handler
tri_normal_bools = purrr::map2_lgl(shapevec,proplist,.f = ~.x == 6 && all(!is.na(.y)))
tri_color_vert = scene$tricolorinfo
is_tri_color = purrr::map_lgl(tri_color_vert,.f = ~all(!is.na(.x)))
#obj handler
fileinfovec = scene$fileinfo
fileinfovec[is.na(fileinfovec)] = ""
objfilenamevec = purrr::map_chr(fileinfovec, path.expand)
if(any(!file.exists(objfilenamevec) & nchar(objfilenamevec) > 0)) {
stop(paste0("Cannot find the following .obj files:\n",
paste(objfilenamevec[!file.exists(objfilenamevec) & nchar(objfilenamevec) > 0],
collapse="\n")
))
}
objbasedirvec = purrr::map_chr(objfilenamevec, dirname)
#bg image handler
if(!is.null(environment_light)) {
hasbackground = TRUE
backgroundstring = path.expand(environment_light)
if(!file.exists(environment_light)) {
hasbackground = FALSE
warning("file '", environment_light, "' cannot be found, not using background image.")
}
if(dir.exists(environment_light)) {
stop("environment_light argument '", environment_light, "' is a directory, not a file.")
}
} else {
hasbackground = FALSE
backgroundstring = ""
}
#scale handler
scale_factor = scene$scale_factor
assertthat::assert_that(all(c(length(position_list$xvec),length(position_list$yvec),length(position_list$zvec),length(rvec),length(typevec),length(proplist)) == length(position_list$xvec)))
assertthat::assert_that(all(!is.null(typevec)))
assertthat::assert_that(length(lookfrom) == 3)
assertthat::assert_that(length(lookat) == 3)
if(is.null(focal_distance)) {
focal_distance = sqrt(sum((lookfrom-lookat)^2))
}
if(!is.null(options("cores")[[1]])) {
numbercores = options("cores")[[1]]
} else {
numbercores = parallel::detectCores()
}
if(!parallel) {
numbercores = 1
}
if(!is.numeric(debug_channel)) {
debug_channel = unlist(lapply(tolower(debug_channel),switch,
"none" = 0,"depth" = 1,"normals" = 2, "uv" = 3, "bvh" = 4,
"variance" = 5, "normal" = 2, "dpdu" = 6, "dpdv" = 7, "color" = 8,
0))
light_direction = c(0,1,0)
} else {
light_direction = debug_channel
debug_channel = 9
}
if(debug_channel == 4) {
message("rayrender must be compiled with option DEBUGBVH for this debug option to work")
}
if(fov == 0) {
assertthat::assert_that(length(ortho_dimensions) == 2)
}
if(verbose) {
buildingtime = proc.time() - currenttime
cat(sprintf("%0.3f seconds \n",buildingtime[3]))
}
sample_method = unlist(lapply(tolower(sample_method),switch,
"random" = 0,"stratified" = 1, 0))
camera_info = list()
strat_dim = c()
if(length(samples) == 2) {
strat_dim = samples
samples = samples[1]*samples[2]
} else {
strat_dim = rep(min(floor(sqrt(samples)),8),2)
}
camera_info$nx = width
camera_info$ny = height
camera_info$ns = samples
camera_info$fov = fov
camera_info$lookfrom = lookfrom
camera_info$lookat = lookat
camera_info$aperture = aperture
camera_info$camera_up = camera_up
camera_info$shutteropen = shutteropen
camera_info$shutterclose = shutterclose
camera_info$ortho_dimensions = ortho_dimensions
camera_info$focal_distance = focal_distance
camera_info$max_depth = max_depth
camera_info$roulette_active_depth = roulette_active_depth
camera_info$sample_method = sample_method
camera_info$stratified_dim = strat_dim
camera_info$light_direction = light_direction
assertthat::assert_that(max_depth > 0)
assertthat::assert_that(roulette_active_depth > 0)
#Spotlight handler
if(any(typevec == 8)) {
if(any(shapevec[typevec == 8] > 4)) {
stop("spotlights are only supported for spheres and rects")
}
for(i in 1:length(proplist)) {
if(typevec[i] == 8) {
proplist[[i]][4:6] = proplist[[i]][4:6] - c(position_list$xvec[i],position_list$yvec[i],position_list$zvec[i])
}
}
}
#Material ID handler; these must show up in increasing order. Note, this will
#cause problems if `match` is every changed to return doubles when matching in
#long vectors as has happened with `which` recently.
material_id = scene$material_id
material_id = as.integer(match(material_id, unique(material_id)) - 1L)
material_id_bool = !is.na(scene$material_id)
if(min_adaptive_size < 1) {
warning("min_adaptive_size cannot be less than one: setting to one")
min_adaptive_size = 1
}
if(min_variance < 0) {
stop("min_variance cannot be less than zero")
}
#CSG handler
csg_list = scene$csg_object
csg_info = list()
csg_info$csg = csg_list
rgb_mat = render_scene_rcpp(camera_info = camera_info, ambient_light = ambient_light,
type = typevec, shape = shapevec, radius = rvec,
position_list = position_list,
properties = proplist, velocity = vel_list, moving = movingvec,
n = length(typevec),
bghigh = backgroundhigh, bglow = backgroundlow,
ischeckered = checkeredbool, checkercolors = checkeredlist,
gradient_info = gradient_info,
noise=noisevec,isnoise=noisebool,noisephase=noisephasevec,
noiseintensity=noiseintvec, noisecolorlist = noisecolorlist,
angle = rot_angle_list, isimage = image_tex_bool, filelocation = temp_file_names,
alphalist = alphalist,
lightintensity = light_prop_vec,isflipped = flip_vec,
isvolume=fog_bool, voldensity = fog_vec,
implicit_sample = implicit_vec, order_rotation_list = order_rotation_list, clampval = clamp_value,
isgrouped = group_bool, group_pivot=group_pivot, group_translate = group_translate,
group_angle = group_angle, group_order_rotation = group_order_rotation, group_scale = group_scale,
tri_normal_bools = tri_normal_bools, is_tri_color = is_tri_color, tri_color_vert= tri_color_vert,
fileinfo = objfilenamevec, filebasedir = objbasedirvec,
progress_bar = progress, numbercores = numbercores,
hasbackground = hasbackground, background = backgroundstring, scale_list = scale_factor,
sigmavec = sigmavec, rotate_env = rotate_env, intensity_env = intensity_env,
verbose = verbose, debug_channel = debug_channel,
shared_id_mat=material_id, is_shared_mat=material_id_bool,
min_variance = min_variance, min_adaptive_size = min_adaptive_size,
glossyinfo = glossyinfo, image_repeat = image_repeat, csg_info = csg_info)
full_array = array(0,c(ncol(rgb_mat$r),nrow(rgb_mat$r),3))
full_array[,,1] = flipud(t(rgb_mat$r))
full_array[,,2] = flipud(t(rgb_mat$g))
full_array[,,3] = flipud(t(rgb_mat$b))
if(debug_channel == 1) {
returnmat = full_array[,,1]
returnmat[is.infinite(returnmat)] = NA
if(is.null(filename)) {
if(!return_raw_array) {
plot_map((full_array-min(full_array,na.rm=TRUE))/(max(full_array,na.rm=TRUE) - min(full_array,na.rm=TRUE)))
}
return(invisible(full_array))
} else {
save_png((full_array-min(full_array,na.rm=TRUE))/(max(full_array,na.rm=TRUE) - min(full_array,na.rm=TRUE)),
filename)
return(invisible(full_array))
}
} else if (debug_channel %in% c(2,3,4,5)) {
if(is.null(filename)) {
if(!return_raw_array) {
if(debug_channel == 4) {
plot_map(full_array/(max(full_array,na.rm=TRUE)))
} else {
plot_map(full_array)
}
}
return(invisible(full_array))
} else {
save_png(full_array,filename)
return(invisible(full_array))
}
}
if(!is.matrix(bloom)) {
if(is.numeric(bloom) && length(bloom) == 1) {
kernel = rayimage::generate_2d_exponential(0.1,11,3*1/bloom)
full_array = rayimage::render_convolution(image = full_array, kernel = kernel, min_value = 1, preview=FALSE)
} else {
if(bloom) {
kernel = rayimage::generate_2d_exponential(0.1,11,3)
full_array = rayimage::render_convolution(image = full_array, kernel = kernel, min_value = 1, preview=FALSE)
}
}
} else {
kernel = bloom
if(ncol(kernel) %% 2 == 0) {
newkernel = matrix(0, ncol = ncol(kernel) + 1, nrow = nrow(kernel))
newkernel[,1:ncol(kernel)] = kernel
kernel = newkernel
}
if(nrow(kernel) %% 2 == 0) {
newkernel = matrix(0, ncol = ncol(kernel), nrow = nrow(kernel) + 1)
newkernel[1:nrow(kernel),] = kernel
kernel = newkernel
}
full_array = rayimage::render_convolution(image = full_array, kernel = kernel, min_value = 1, preview=FALSE)
}
tonemapped_channels = tonemap_image(height,width,full_array[,,1],full_array[,,2],full_array[,,3],toneval)
full_array = array(0,c(nrow(tonemapped_channels$r),ncol(tonemapped_channels$r),3))
full_array[,,1] = tonemapped_channels$r
full_array[,,2] = tonemapped_channels$g
full_array[,,3] = tonemapped_channels$b
if(toneval == 5) {
return(full_array)
}
array_from_mat = array(full_array,dim=c(nrow(full_array),ncol(full_array),3))
if(any(is.na(array_from_mat ))) {
array_from_mat[is.na(array_from_mat)] = 0
}
if(any(array_from_mat > 1 | array_from_mat < 0,na.rm = TRUE)) {
array_from_mat[array_from_mat > 1] = 1
array_from_mat[array_from_mat < 0] = 0
}
if(is.null(filename)) {
if(!return_raw_array) {
plot_map(array_from_mat)
}
} else {
save_png(array_from_mat,filename)
}
return(invisible(array_from_mat))
}
| /R/render_scene.R | no_license | salma-rodriguez/rayrender | R | false | false | 29,583 | r | #' Render Scene
#'
#' Takes the scene description and renders an image, either to the device or to a filename.
#'
#' @param scene Tibble of object locations and properties.
#' @param width Default `400`. Width of the render, in pixels.
#' @param height Default `400`. Height of the render, in pixels.
#' @param fov Default `20`. Field of view, in degrees. If this is zero, the camera will use an orthographic projection. The size of the plane
#' used to create the orthographic projection is given in argument `ortho_dimensions`.
#' @param samples Default `100`. The maximum number of samples for each pixel. If this is a length-2
#' vector and the `sample_method` is `stratified`, this will control the number of strata in each dimension.
#' The total number of samples in this case will be the product of the two numbers.
#' @param min_variance Default `0.00005`. Minimum acceptable variance for a block of pixels for the
#' adaptive sampler. Smaller numbers give higher quality images, at the expense of longer rendering times.
#' If this is set to zero, the adaptive sampler will be turned off and the renderer
#' will use the maximum number of samples everywhere.
#' @param min_adaptive_size Default `8`. Width of the minimum block size in the adaptive sampler.
#' @param sample_method Default `random`. The type of sampling method used to generate
#' random numbers. The other option is `stratified`, which can improve the render quality (at the cost
#' of increased time allocating the random samples).
#' @param max_depth Default `50`. Maximum number of bounces a ray can make in a scene.
#' @param roulette_active_depth Default `10`. Number of ray bounces until a ray can stop bouncing via
#' Russian roulette.
#' @param ambient_light Default `FALSE`, unless there are no emitting objects in the scene.
#' If `TRUE`, the background will be a gradient varying from `backgroundhigh` directly up (+y) to
#' `backgroundlow` directly down (-y).
#' @param lookfrom Default `c(0,1,10)`. Location of the camera.
#' @param lookat Default `c(0,0,0)`. Location where the camera is pointed.
#' @param camera_up Default `c(0,1,0)`. Vector indicating the "up" position of the camera.
#' @param aperture Default `0.1`. Aperture of the camera. Smaller numbers will increase depth of field, causing
#' less blurring in areas not in focus.
#' @param clamp_value Default `Inf`. If a bright light or a reflective material is in the scene, occasionally
#' there will be bright spots that will not go away even with a large number of samples. These
#' can be removed (at the cost of slightly darkening the image) by setting this to a small number greater than 1.
#' @param filename Default `NULL`. If present, the renderer will write to the filename instead
#' of the current device.
#' @param backgroundhigh Default `#80b4ff`. The "high" color in the background gradient. Can be either
#' a hexadecimal code, or a numeric rgb vector listing three intensities between `0` and `1`.
#' @param backgroundlow Default `#ffffff`. The "low" color in the background gradient. Can be either
#' a hexadecimal code, or a numeric rgb vector listing three intensities between `0` and `1`.
#' @param shutteropen Default `0`. Time at which the shutter is open. Only affects moving objects.
#' @param shutterclose Default `1`. Time at which the shutter is open. Only affects moving objects.
#' @param focal_distance Default `NULL`, automatically set to the `lookfrom-lookat` distance unless
#' otherwise specified.
#' @param ortho_dimensions Default `c(1,1)`. Width and height of the orthographic camera. Will only be used if `fov = 0`.
#' @param tonemap Default `gamma`. Choose the tone mapping function,
#' Default `gamma` solely adjusts for gamma and clamps values greater than 1 to 1.
#' `reinhold` scales values by their individual color channels `color/(1+color)` and then performs the
#' gamma adjustment. `uncharted` uses the mapping developed for Uncharted 2 by John Hable. `hbd` uses an
#' optimized formula by Jim Hejl and Richard Burgess-Dawson. Note: If set to anything other than `gamma`,
#' objects with material `light()` may not be anti-aliased. If `raw`, the raw array of HDR values will be
#' returned, rather than an image or a plot.
#' @param bloom Default `TRUE`. Set to `FALSE` to get the raw, pathtraced image. Otherwise,
#' this performs a convolution of the HDR image of the scene with a sharp, long-tailed
#' exponential kernel, which does not visibly affect dimly pixels, but does result in emitters light
#' slightly bleeding into adjacent pixels. This provides an antialiasing effect for lights, even when
#' tonemapping the image. Pass in a matrix to specify the convolution kernel manually, or a positive number
#' to control the intensity of the bloom (higher number = more bloom).
#' @param environment_light Default `NULL`. An image to be used for the background for rays that escape
#' the scene. Supports both HDR (`.hdr`) and low-dynamic range (`.png`, `.jpg`) images.
#' @param rotate_env Default `0`. The number of degrees to rotate the environment map around the scene.
#' @param intensity_env Default `1`. The amount to increase the intensity of the environment lighting. Useful
#' if using a LDR (JPEG or PNG) image as an environment map.
#' @param debug_channel Default `none`. If `depth`, function will return a depth map of rays into the scene
#' instead of an image. If `normals`, function will return an image of scene normals, mapped from 0 to 1.
#' If `uv`, function will return an image of the uv coords. If `variance`, function will return an image
#' showing the number of samples needed to take for each block to converge. If `dpdu` or `dpdv`, function will return
#' an image showing the differential `u` and `u` coordinates. If `color`, function will return the raw albedo
#' values (with white for `metal` and `dielectric` materials).
#' @param return_raw_array Default `FALSE`. If `TRUE`, function will return raw array with RGB intensity
#' information.
#' @param parallel Default `FALSE`. If `TRUE`, it will use all available cores to render the image
#' (or the number specified in `options("cores")` if that option is not `NULL`).
#' @param progress Default `TRUE` if interactive session, `FALSE` otherwise.
#' @param verbose Default `FALSE`. Prints information and timing information about scene
#' construction and raytracing progress.
#' @export
#' @importFrom grDevices col2rgb
#' @return Raytraced plot to current device, or an image saved to a file.
#'
#' @examples
#' #Generate a large checkered sphere as the ground
#' \donttest{
#' scene = generate_ground(depth=-0.5, material = diffuse(color="white", checkercolor="darkgreen"))
#' render_scene(scene,parallel=TRUE,samples=500)
#' }
#'
#' #Add a sphere to the center
#' \donttest{
#' scene = scene %>%
#' add_object(sphere(x=0,y=0,z=0,radius=0.5,material = diffuse(color=c(1,0,1))))
#' render_scene(scene,fov=20,parallel=TRUE,samples=500)
#' }
#'
#' #Add a marbled cube
#' \donttest{
#' scene = scene %>%
#' add_object(cube(x=1.1,y=0,z=0,material = diffuse(noise=3)))
#' render_scene(scene,fov=20,parallel=TRUE,samples=500)
#' }
#'
#' #Add a metallic gold sphere, using stratified sampling for a higher quality render
#' \donttest{
#' scene = scene %>%
#' add_object(sphere(x=-1.1,y=0,z=0,radius=0.5,material = metal(color="gold",fuzz=0.1)))
#' render_scene(scene,fov=20,parallel=TRUE,samples=500, sample_method = "stratified")
#' }
#'
#' #Lower the number of samples to render more quickly (here, we also use only one core).
#' \donttest{
#' render_scene(scene, samples=4)
#' }
#'
#' #Add a floating R plot using the iris dataset as a png onto a floating 2D rectangle
#'
#' \donttest{
#' tempfileplot = tempfile()
#' png(filename=tempfileplot,height=400,width=800)
#' plot(iris$Petal.Length,iris$Sepal.Width,col=iris$Species,pch=18,cex=4)
#' dev.off()
#'
#' image_array = aperm(png::readPNG(tempfileplot),c(2,1,3))
#' scene = scene %>%
#' add_object(xy_rect(x=0,y=1.1,z=0,xwidth=2,angle = c(0,180,0),
#' material = diffuse(image_texture = image_array)))
#' render_scene(scene,fov=20,parallel=TRUE,samples=500)
#' }
#'
#' #Move the camera
#' \donttest{
#' render_scene(scene,lookfrom = c(7,1.5,10),lookat = c(0,0.5,0),fov=15,parallel=TRUE)
#' }
#'
#' #Change the background gradient to a night time ambiance
#' \donttest{
#' render_scene(scene,lookfrom = c(7,1.5,10),lookat = c(0,0.5,0),fov=15,
#' backgroundhigh = "#282375", backgroundlow = "#7e77ea", parallel=TRUE,
#' samples=500)
#' }
#'
#'#Increase the aperture to blur objects that are further from the focal plane.
#' \donttest{
#' render_scene(scene,lookfrom = c(7,1.5,10),lookat = c(0,0.5,0),fov=15,
#' aperture = 0.5,parallel=TRUE,samples=500)
#' }
#'
#'#Spin the camera around the scene, decreasing the number of samples to render faster. To make
#'#an animation, specify the a filename in `render_scene` for each frame and use the `av` package
#'#or ffmpeg to combine them all into a movie.
#'
#'t=1:30
#'xpos = 10 * sin(t*12*pi/180+pi/2)
#'zpos = 10 * cos(t*12*pi/180+pi/2)
#'\donttest{
#'#Save old par() settings
#'old.par = par(no.readonly = TRUE)
#'on.exit(par(old.par))
#'par(mfrow=c(5,6))
#'for(i in 1:30) {
#' render_scene(scene, samples=16,
#' lookfrom = c(xpos[i],1.5,zpos[i]),lookat = c(0,0.5,0), parallel=TRUE)
#'}
#'}
render_scene = function(scene, width = 400, height = 400, fov = 20,
samples = 100, min_variance = 0.00005, min_adaptive_size = 8,
sample_method = "random",
max_depth = 50, roulette_active_depth = 10,
ambient_light = FALSE,
lookfrom = c(0,1,10), lookat = c(0,0,0), camera_up = c(0,1,0),
aperture = 0.1, clamp_value = Inf,
filename = NULL, backgroundhigh = "#80b4ff",backgroundlow = "#ffffff",
shutteropen = 0.0, shutterclose = 1.0, focal_distance=NULL, ortho_dimensions = c(1,1),
tonemap ="gamma", bloom = TRUE, parallel=TRUE,
environment_light = NULL, rotate_env = 0, intensity_env = 1,
debug_channel = "none", return_raw_array = FALSE,
progress = interactive(), verbose = FALSE) {
if(verbose) {
currenttime = proc.time()
cat("Building Scene: ")
}
#Check if Cornell Box scene and set camera if user did not:
if(!is.null(attr(scene,"cornell"))) {
corn_message = "Setting default values for Cornell box: "
missing_corn = FALSE
if(missing(lookfrom)) {
lookfrom = c(278, 278, -800)
corn_message = paste0(corn_message, "lookfrom `c(278,278,-800)` ")
missing_corn = TRUE
}
if(missing(lookat)) {
lookat = c(278, 278, 0)
corn_message = paste0(corn_message, "lookat `c(278,278,0)` ")
missing_corn = TRUE
}
if(missing(fov)) {
fov=40
corn_message = paste0(corn_message, "fov `40` ")
missing_corn = TRUE
}
if(fov == 0 && missing(ortho_dimensions)) {
ortho_dimensions = c(580,580)
corn_message = paste0(corn_message, "ortho_dimensions `c(580, 580)` ")
missing_corn = TRUE
}
corn_message = paste0(corn_message,".")
if(missing_corn) {
message(corn_message)
}
}
lookvec = (lookat - lookfrom)
i1 = c(2,3,1)
i2 = c(3,1,2)
if(all(lookvec[i1]*camera_up[i2] - lookvec[i2]*camera_up[i1] == 0)) {
stop("camera_up value c(", paste(camera_up, collapse=","), ") is aligned exactly with camera vector (lookat - lookfrom). Choose a different value for camera_up.")
}
backgroundhigh = convert_color(backgroundhigh)
backgroundlow = convert_color(backgroundlow)
position_list = list()
position_list$xvec = scene$x
position_list$yvec = scene$y
position_list$zvec = scene$z
rvec = scene$radius
shapevec = unlist(lapply(tolower(scene$shape),switch,
"sphere" = 1,"xy_rect" = 2, "xz_rect" = 3,"yz_rect" = 4,"box" = 5, "triangle" = 6,
"obj" = 7, "objcolor" = 8, "disk" = 9, "cylinder" = 10, "ellipsoid" = 11,
"objvertexcolor" = 12, "cone" = 13, "curve" = 14, "csg_object" = 15))
typevec = unlist(lapply(tolower(scene$type),switch,
"diffuse" = 1,"metal" = 2,"dielectric" = 3,
"oren-nayar" = 4, "light" = 5, "microfacet" = 6,
"glossy" = 7, "spotlight" = 8))
sigmavec = unlist(scene$sigma)
assertthat::assert_that(tonemap %in% c("gamma","reinhold","uncharted", "hbd", "raw"))
toneval = switch(tonemap, "gamma" = 1,"reinhold" = 2,"uncharted" = 3,"hbd" = 4, "raw" = 5)
movingvec = purrr::map_lgl(scene$velocity,.f = ~any(.x != 0))
proplist = scene$properties
vel_list = scene$velocity
checkeredlist = scene$checkercolor
checkeredbool = purrr::map_lgl(checkeredlist,.f = ~all(!is.na(.x)))
#glossy
glossyinfo = scene$glossyinfo
#gradient handler
gradient_info = list()
gradient_info$gradient_colors = scene$gradient_color
gradient_info$isgradient = purrr::map_lgl(gradient_info$gradient_colors,.f = ~all(!is.na(.x)))
gradient_info$gradient_trans = scene$gradient_transpose
gradient_info$is_world_gradient = scene$world_gradient
gradient_info$gradient_control_points = scene$gradient_point_info
gradient_info$type = unlist(lapply(tolower(scene$gradient_type),switch,
"hsv" = TRUE, "rgb" = FALSE, FALSE))
#noise handler
noisebool = purrr::map_lgl(scene$noise, .f = ~.x > 0)
noisevec = scene$noise
noisephasevec = scene$noisephase * pi/180
noiseintvec = scene$noiseintensity
noisecolorlist = scene$noisecolor
#rotation handler
rot_angle_list = scene$angle
#fog handler
fog_bool = scene$fog
fog_vec = scene$fogdensity
#flip handler
flip_vec = scene$flipped
#light handler
light_prop_vec = scene$lightintensity
if(!any(typevec == 5) && !any(typevec == 8) && missing(ambient_light) && missing(environment_light)) {
ambient_light = TRUE
}
#texture handler
image_array_list = scene$image
image_tex_bool = purrr::map_lgl(image_array_list,.f = ~is.array(.x))
image_filename_bool = purrr::map_lgl(image_array_list,.f = ~is.character(.x))
temp_file_names = purrr::map_chr(image_tex_bool,.f = ~ifelse(.x, tempfile(fileext = ".png"),""))
for(i in 1:length(image_array_list)) {
if(image_tex_bool[i]) {
if(dim(image_array_list[[i]])[3] == 4) {
png::writePNG(fliplr(aperm(image_array_list[[i]][,,1:3],c(2,1,3))),temp_file_names[i])
} else if(dim(image_array_list[[i]])[3] == 3){
png::writePNG(fliplr(aperm(image_array_list[[i]],c(2,1,3))),temp_file_names[i])
}
}
if(image_filename_bool[i]) {
if(any(!file.exists(path.expand(image_array_list[[i]])) & nchar(image_array_list[[i]]) > 0)) {
stop(paste0("Cannot find the following texture file:\n",
paste(image_array_list[[i]], collapse="\n")))
}
temp_file_names[i] = path.expand(image_array_list[[i]])
}
}
image_tex_bool = image_tex_bool | image_filename_bool
image_repeat = scene$image_repeat
#alpha texture handler
alpha_array_list = scene$alphaimage
alpha_tex_bool = purrr::map_lgl(alpha_array_list,.f = ~is.array(.x[[1]]))
alpha_filename_bool = purrr::map_lgl(alpha_array_list,.f = ~is.character(.x[[1]]))
alpha_temp_file_names = purrr::map_chr(alpha_tex_bool, .f = (function(.x) tempfile(fileext = ".png")))
for(i in 1:length(alpha_array_list)) {
if(alpha_tex_bool[i]) {
if(length(dim(alpha_array_list[[i]][[1]])) == 2) {
png::writePNG(fliplr(t(alpha_array_list[[i]][[1]])), alpha_temp_file_names[i])
} else if(dim(alpha_array_list[[i]][[1]])[3] == 4) {
alpha_array_list[[i]][[1]][,,1] = alpha_array_list[[i]][[1]][,,4]
alpha_array_list[[i]][[1]][,,2] = alpha_array_list[[i]][[1]][,,4]
alpha_array_list[[i]][[1]][,,3] = alpha_array_list[[i]][[1]][,,4]
png::writePNG(fliplr(aperm(alpha_array_list[[i]][[1]][,,1:3],c(2,1,3))), alpha_temp_file_names[i])
} else if(dim(alpha_array_list[[i]][[1]])[3] == 3) {
png::writePNG(fliplr(aperm(alpha_array_list[[i]][[1]],c(2,1,3))), alpha_temp_file_names[i])
} else {
stop("alpha texture dims: c(", paste(dim(alpha_array_list[[i]][[1]]),collapse=", "), ") not valid for texture.")
}
}
if(alpha_filename_bool[i]) {
if(any(!file.exists(path.expand(alpha_array_list[[i]][[1]])) & nchar(alpha_array_list[[i]][[1]]) > 0)) {
stop(paste0("Cannot find the following texture file:\n",
paste(alpha_array_list[[i]][[1]], collapse="\n")))
}
temp_array = png::readPNG(alpha_array_list[[i]][[1]])
if(dim(temp_array)[3] == 4 && any(temp_array[,,4] != 1)) {
temp_array[,,1] = temp_array[,,4]
temp_array[,,2] = temp_array[,,4]
temp_array[,,3] = temp_array[,,4]
}
png::writePNG(temp_array,alpha_temp_file_names[i])
}
}
alpha_tex_bool = alpha_tex_bool | alpha_filename_bool
alphalist = list()
alphalist$alpha_temp_file_names = alpha_temp_file_names
alphalist$alpha_tex_bool = alpha_tex_bool
#bump texture handler
bump_array_list = scene$bump_texture
bump_tex_bool = purrr::map_lgl(bump_array_list,.f = ~is.array(.x[[1]]))
bump_filename_bool = purrr::map_lgl(bump_array_list,.f = ~is.character(.x[[1]]))
bump_temp_file_names = purrr::map_chr(bump_tex_bool,.f = ~ifelse(.x, tempfile(fileext = ".png"),""))
for(i in 1:length(bump_array_list)) {
if(bump_tex_bool[i]) {
bump_dims = dim(bump_array_list[[i]][[1]])
if(length(bump_dims) == 2) {
temp_array = array(0, dim = c(bump_dims,3))
temp_array[,,1] = bump_array_list[[i]][[1]]
temp_array[,,2] = bump_array_list[[i]][[1]]
temp_array[,,3] = bump_array_list[[i]][[1]]
bump_dims = c(bump_dims,3)
} else {
temp_array = bump_array_list[[i]][[1]]
}
if(bump_dims[3] == 4) {
png::writePNG(fliplr(aperm(temp_array[,,1:3],c(2,1,3))),bump_temp_file_names[i])
} else if(bump_dims[3] == 3){
png::writePNG(fliplr(aperm(temp_array,c(2,1,3))),bump_temp_file_names[i])
}
}
if(bump_filename_bool[i]) {
if(any(!file.exists(path.expand(bump_array_list[[i]][[1]])) & nchar(bump_array_list[[i]][[1]]) > 0)) {
stop(paste0("Cannot find the following texture file:\n",
paste(bump_array_list[[i]][[1]], collapse="\n")))
}
bump_temp_file_names[i] = path.expand(bump_array_list[[i]][[1]])
}
}
bump_tex_bool = bump_tex_bool | bump_filename_bool
bump_intensity = scene$bump_intensity
alphalist$bump_temp_file_names = bump_temp_file_names
alphalist$bump_tex_bool = bump_tex_bool
alphalist$bump_intensity = bump_intensity
#movement handler
if(shutteropen == shutterclose) {
movingvec = rep(FALSE,length(movingvec))
}
#implicit sampling handler
implicit_vec = scene$implicit_sample
#order rotation handler
order_rotation_list = scene$order_rotation
#group handler
group_bool = purrr::map_lgl(scene$pivot_point,.f = ~all(!is.na(.x)))
group_pivot = scene$pivot_point
group_angle = scene$group_angle
group_order_rotation = scene$group_order_rotation
group_translate = scene$group_translate
group_scale = scene$group_scale
#triangle normal handler
tri_normal_bools = purrr::map2_lgl(shapevec,proplist,.f = ~.x == 6 && all(!is.na(.y)))
tri_color_vert = scene$tricolorinfo
is_tri_color = purrr::map_lgl(tri_color_vert,.f = ~all(!is.na(.x)))
#obj handler
fileinfovec = scene$fileinfo
fileinfovec[is.na(fileinfovec)] = ""
objfilenamevec = purrr::map_chr(fileinfovec, path.expand)
if(any(!file.exists(objfilenamevec) & nchar(objfilenamevec) > 0)) {
stop(paste0("Cannot find the following .obj files:\n",
paste(objfilenamevec[!file.exists(objfilenamevec) & nchar(objfilenamevec) > 0],
collapse="\n")
))
}
objbasedirvec = purrr::map_chr(objfilenamevec, dirname)
#bg image handler
if(!is.null(environment_light)) {
hasbackground = TRUE
backgroundstring = path.expand(environment_light)
if(!file.exists(environment_light)) {
hasbackground = FALSE
warning("file '", environment_light, "' cannot be found, not using background image.")
}
if(dir.exists(environment_light)) {
stop("environment_light argument '", environment_light, "' is a directory, not a file.")
}
} else {
hasbackground = FALSE
backgroundstring = ""
}
#scale handler
scale_factor = scene$scale_factor
assertthat::assert_that(all(c(length(position_list$xvec),length(position_list$yvec),length(position_list$zvec),length(rvec),length(typevec),length(proplist)) == length(position_list$xvec)))
assertthat::assert_that(all(!is.null(typevec)))
assertthat::assert_that(length(lookfrom) == 3)
assertthat::assert_that(length(lookat) == 3)
if(is.null(focal_distance)) {
focal_distance = sqrt(sum((lookfrom-lookat)^2))
}
if(!is.null(options("cores")[[1]])) {
numbercores = options("cores")[[1]]
} else {
numbercores = parallel::detectCores()
}
if(!parallel) {
numbercores = 1
}
if(!is.numeric(debug_channel)) {
debug_channel = unlist(lapply(tolower(debug_channel),switch,
"none" = 0,"depth" = 1,"normals" = 2, "uv" = 3, "bvh" = 4,
"variance" = 5, "normal" = 2, "dpdu" = 6, "dpdv" = 7, "color" = 8,
0))
light_direction = c(0,1,0)
} else {
light_direction = debug_channel
debug_channel = 9
}
if(debug_channel == 4) {
message("rayrender must be compiled with option DEBUGBVH for this debug option to work")
}
if(fov == 0) {
assertthat::assert_that(length(ortho_dimensions) == 2)
}
if(verbose) {
buildingtime = proc.time() - currenttime
cat(sprintf("%0.3f seconds \n",buildingtime[3]))
}
sample_method = unlist(lapply(tolower(sample_method),switch,
"random" = 0,"stratified" = 1, 0))
camera_info = list()
strat_dim = c()
if(length(samples) == 2) {
strat_dim = samples
samples = samples[1]*samples[2]
} else {
strat_dim = rep(min(floor(sqrt(samples)),8),2)
}
camera_info$nx = width
camera_info$ny = height
camera_info$ns = samples
camera_info$fov = fov
camera_info$lookfrom = lookfrom
camera_info$lookat = lookat
camera_info$aperture = aperture
camera_info$camera_up = camera_up
camera_info$shutteropen = shutteropen
camera_info$shutterclose = shutterclose
camera_info$ortho_dimensions = ortho_dimensions
camera_info$focal_distance = focal_distance
camera_info$max_depth = max_depth
camera_info$roulette_active_depth = roulette_active_depth
camera_info$sample_method = sample_method
camera_info$stratified_dim = strat_dim
camera_info$light_direction = light_direction
assertthat::assert_that(max_depth > 0)
assertthat::assert_that(roulette_active_depth > 0)
#Spotlight handler
if(any(typevec == 8)) {
if(any(shapevec[typevec == 8] > 4)) {
stop("spotlights are only supported for spheres and rects")
}
for(i in 1:length(proplist)) {
if(typevec[i] == 8) {
proplist[[i]][4:6] = proplist[[i]][4:6] - c(position_list$xvec[i],position_list$yvec[i],position_list$zvec[i])
}
}
}
#Material ID handler; these must show up in increasing order. Note, this will
#cause problems if `match` is every changed to return doubles when matching in
#long vectors as has happened with `which` recently.
material_id = scene$material_id
material_id = as.integer(match(material_id, unique(material_id)) - 1L)
material_id_bool = !is.na(scene$material_id)
if(min_adaptive_size < 1) {
warning("min_adaptive_size cannot be less than one: setting to one")
min_adaptive_size = 1
}
if(min_variance < 0) {
stop("min_variance cannot be less than zero")
}
#CSG handler
csg_list = scene$csg_object
csg_info = list()
csg_info$csg = csg_list
rgb_mat = render_scene_rcpp(camera_info = camera_info, ambient_light = ambient_light,
type = typevec, shape = shapevec, radius = rvec,
position_list = position_list,
properties = proplist, velocity = vel_list, moving = movingvec,
n = length(typevec),
bghigh = backgroundhigh, bglow = backgroundlow,
ischeckered = checkeredbool, checkercolors = checkeredlist,
gradient_info = gradient_info,
noise=noisevec,isnoise=noisebool,noisephase=noisephasevec,
noiseintensity=noiseintvec, noisecolorlist = noisecolorlist,
angle = rot_angle_list, isimage = image_tex_bool, filelocation = temp_file_names,
alphalist = alphalist,
lightintensity = light_prop_vec,isflipped = flip_vec,
isvolume=fog_bool, voldensity = fog_vec,
implicit_sample = implicit_vec, order_rotation_list = order_rotation_list, clampval = clamp_value,
isgrouped = group_bool, group_pivot=group_pivot, group_translate = group_translate,
group_angle = group_angle, group_order_rotation = group_order_rotation, group_scale = group_scale,
tri_normal_bools = tri_normal_bools, is_tri_color = is_tri_color, tri_color_vert= tri_color_vert,
fileinfo = objfilenamevec, filebasedir = objbasedirvec,
progress_bar = progress, numbercores = numbercores,
hasbackground = hasbackground, background = backgroundstring, scale_list = scale_factor,
sigmavec = sigmavec, rotate_env = rotate_env, intensity_env = intensity_env,
verbose = verbose, debug_channel = debug_channel,
shared_id_mat=material_id, is_shared_mat=material_id_bool,
min_variance = min_variance, min_adaptive_size = min_adaptive_size,
glossyinfo = glossyinfo, image_repeat = image_repeat, csg_info = csg_info)
full_array = array(0,c(ncol(rgb_mat$r),nrow(rgb_mat$r),3))
full_array[,,1] = flipud(t(rgb_mat$r))
full_array[,,2] = flipud(t(rgb_mat$g))
full_array[,,3] = flipud(t(rgb_mat$b))
if(debug_channel == 1) {
returnmat = full_array[,,1]
returnmat[is.infinite(returnmat)] = NA
if(is.null(filename)) {
if(!return_raw_array) {
plot_map((full_array-min(full_array,na.rm=TRUE))/(max(full_array,na.rm=TRUE) - min(full_array,na.rm=TRUE)))
}
return(invisible(full_array))
} else {
save_png((full_array-min(full_array,na.rm=TRUE))/(max(full_array,na.rm=TRUE) - min(full_array,na.rm=TRUE)),
filename)
return(invisible(full_array))
}
} else if (debug_channel %in% c(2,3,4,5)) {
if(is.null(filename)) {
if(!return_raw_array) {
if(debug_channel == 4) {
plot_map(full_array/(max(full_array,na.rm=TRUE)))
} else {
plot_map(full_array)
}
}
return(invisible(full_array))
} else {
save_png(full_array,filename)
return(invisible(full_array))
}
}
if(!is.matrix(bloom)) {
if(is.numeric(bloom) && length(bloom) == 1) {
kernel = rayimage::generate_2d_exponential(0.1,11,3*1/bloom)
full_array = rayimage::render_convolution(image = full_array, kernel = kernel, min_value = 1, preview=FALSE)
} else {
if(bloom) {
kernel = rayimage::generate_2d_exponential(0.1,11,3)
full_array = rayimage::render_convolution(image = full_array, kernel = kernel, min_value = 1, preview=FALSE)
}
}
} else {
kernel = bloom
if(ncol(kernel) %% 2 == 0) {
newkernel = matrix(0, ncol = ncol(kernel) + 1, nrow = nrow(kernel))
newkernel[,1:ncol(kernel)] = kernel
kernel = newkernel
}
if(nrow(kernel) %% 2 == 0) {
newkernel = matrix(0, ncol = ncol(kernel), nrow = nrow(kernel) + 1)
newkernel[1:nrow(kernel),] = kernel
kernel = newkernel
}
full_array = rayimage::render_convolution(image = full_array, kernel = kernel, min_value = 1, preview=FALSE)
}
tonemapped_channels = tonemap_image(height,width,full_array[,,1],full_array[,,2],full_array[,,3],toneval)
full_array = array(0,c(nrow(tonemapped_channels$r),ncol(tonemapped_channels$r),3))
full_array[,,1] = tonemapped_channels$r
full_array[,,2] = tonemapped_channels$g
full_array[,,3] = tonemapped_channels$b
if(toneval == 5) {
return(full_array)
}
array_from_mat = array(full_array,dim=c(nrow(full_array),ncol(full_array),3))
if(any(is.na(array_from_mat ))) {
array_from_mat[is.na(array_from_mat)] = 0
}
if(any(array_from_mat > 1 | array_from_mat < 0,na.rm = TRUE)) {
array_from_mat[array_from_mat > 1] = 1
array_from_mat[array_from_mat < 0] = 0
}
if(is.null(filename)) {
if(!return_raw_array) {
plot_map(array_from_mat)
}
} else {
save_png(array_from_mat,filename)
}
return(invisible(array_from_mat))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mxnet_generated.R
\name{mx.symbol.log10}
\alias{mx.symbol.log10}
\title{log10:Returns element-wise Base-10 logarithmic value of the input.}
\usage{
mx.symbol.log10(...)
}
\arguments{
\item{data}{NDArray-or-Symbol
The input array.}
\item{name}{string, optional
Name of the resulting symbol.}
}
\value{
out The result mx.symbol
}
\description{
``10**log10(x) = x``
}
\details{
The storage type of ``log10`` output is always dense
Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L945
}
| /Rsite/source/api/man/mx.symbol.log10.Rd | no_license | mli/new-docs | R | false | true | 576 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mxnet_generated.R
\name{mx.symbol.log10}
\alias{mx.symbol.log10}
\title{log10:Returns element-wise Base-10 logarithmic value of the input.}
\usage{
mx.symbol.log10(...)
}
\arguments{
\item{data}{NDArray-or-Symbol
The input array.}
\item{name}{string, optional
Name of the resulting symbol.}
}
\value{
out The result mx.symbol
}
\description{
``10**log10(x) = x``
}
\details{
The storage type of ``log10`` output is always dense
Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L945
}
|
# Vetor
# Sequencia de elementos do mesmo tipo
x <- c(1,3,5,6,8)
m = 2:20
x+2
x/2
x*2
x-2
# média
mean(x)
# mediana
median(x)
# soma
sum(x)
# desvio padrão
sd(x)
# plotando boxplot
boxplot(x)
boxplot(m)
| /Curso em portugues da Udemy/01 funções básicas.R | no_license | diegofsousa/LearningMachineLearningWithR | R | false | false | 213 | r | # Vetor
# Sequencia de elementos do mesmo tipo
x <- c(1,3,5,6,8)
m = 2:20
x+2
x/2
x*2
x-2
# média
mean(x)
# mediana
median(x)
# soma
sum(x)
# desvio padrão
sd(x)
# plotando boxplot
boxplot(x)
boxplot(m)
|
#!/usr/local/bin/Rscript
require("WGCNA")
options(stringsAsFactors = FALSE)
load("results/E.RData")
load("results/WCGNA_part1.RData")
load("results/results.RData")
load("data/TOM.RData")
###take genes in the turquoise module, get their TOM matrix components and then send to cytoscape
tur <- results[which(results[,"ModuleName"]=="turquoise"),]
tur <- tur[order(tur[,"MEturquoise"],decreasing=TRUE),]
tur <- tur[!duplicated(tur[,"symbol"]),]
#label TOM matrix with gene symbols
symbol <- results[,"symbol"]
rownames(TOM) <- symbol
colnames(TOM) <- symbol
#get gene symbols in turquoise module, remove NAs and Riken genes and then pull out relevant columns of TOM matrix
tur.symbol <- tur[,"symbol"]
tur.symbol <- tur.symbol[which(tur.symbol!="NA")]
tur.sym <- tur.symbol[grep(".*Rik$",tur.symbol,invert=TRUE)]
TOM.tur <- TOM[tur.sym,tur.sym]
TOM.tur <- TOM.tur^7
TOM.32 <- TOM.tur[1:32,1:32]
cyt = exportNetworkToCytoscape(
TOM.32,
edgeFile = "Turquoise32-CytoscapeInput-edges-TOM.txt",
nodeFile = "Turquoise32-CytoscapeInput-nodes-TOM.txt",
weighted = TRUE,
threshold = 0.029,
)
###############take top 20 genes in each module and make a cytoscape network out of them...
results <- results[grep(".*Rik$",results[,"symbol"],invert=TRUE),]
results <- results[which(results[,"symbol"]!="NA"),]
ens.id <- results[,"EnsemblID"]
library(biomaRt)
ensmart<- useMart("ensembl",dataset="mmusculus_gene_ensembl")
results.reg<- getBM(filters="ensembl_gene_id", values=ens.id, attributes=c("ensembl_gene_id", "go_biological_process_id"), mart=ensmart)
results.reg<- unique(results.reg[results.reg[,2]=="GO:0045449","ensembl_gene_id"])
results.tf<- getBM(filters="ensembl_gene_id", values=ens.id, attributes=c("ensembl_gene_id", "go_molecular_function_id"), mart=ensmart)
results.tf <- unique(results.tf[results.tf[,2]=="GO:0003700","ensembl_gene_id"])
results.go <- c(results.reg, results.tf)
results.go <- results.go[order(results.go,decreasing=FALSE)]
results.go <- results.go[!duplicated(results.go)]
symbols.go <- results[which(results[,"EnsemblID"] %in% results.go),"symbol"]
blue <- results[which(results[,"ModuleName"]=="blue"),]
blue <- blue[order(blue[,"MEblue"],decreasing=TRUE),]
blue <- blue[!duplicated(blue[,"symbol"]),]
tur <- results[which(results[,"ModuleName"]=="turquoise"),]
tur <- tur[order(tur[,"MEturquoise"],decreasing=TRUE),]
tur <- tur[!duplicated(tur[,"symbol"]),]
pink <- results[which(results[,"ModuleName"]=="pink"),]
pink <- pink[order(pink[,"MEpink"],decreasing=TRUE),]
pink <- pink[!duplicated(pink[,"symbol"]),]
black <- results[which(results[,"ModuleName"]=="black"),]
black <- black[order(black[,"MEblack"],decreasing=TRUE),]
black <- black[!duplicated(black[,"symbol"]),]
yellow <- results[which(results[,"ModuleName"]=="yellow"),]
yellow <- yellow[order(yellow[,"MEyellow"],decreasing=TRUE),]
yellow <- yellow[!duplicated(yellow[,"symbol"]),]
green <- results[which(results[,"ModuleName"]=="green"),]
green <- green[order(green[,"MEgreen"],decreasing=TRUE),]
green <- green[!duplicated(green[,"symbol"]),]
eigengene <- c(blue[1,"symbol"],tur[1,"symbol"],pink[1,"symbol"],black[1,"symbol"],yellow[1,"symbol"],green[1,"symbol"])
##take top x genes from each module
top <- 1:100
blue <- blue[top,"symbol"]
tur <- tur[top,"symbol"]
pink <- pink[top,"symbol"]
black <- black[top,"symbol"]
yellow <- yellow[top,"symbol"]
green <- green[top,"symbol"]
sym <- c(blue,tur,pink,black,yellow,green)
sym.go <- sym[which(sym %in% symbols.go)]
sym.go <- c(sym.go,eigengene)
##get genes out of TOM matrix and send to cytoscape
TOM.7 <- TOM^7
TOM.sym.go <- TOM.7[sym.go,sym.go]
gg
| /scripts/plot_turquoise | no_license | Bongomountainthesis/wgcna | R | false | false | 3,650 | #!/usr/local/bin/Rscript
require("WGCNA")
options(stringsAsFactors = FALSE)
load("results/E.RData")
load("results/WCGNA_part1.RData")
load("results/results.RData")
load("data/TOM.RData")
###take genes in the turquoise module, get their TOM matrix components and then send to cytoscape
tur <- results[which(results[,"ModuleName"]=="turquoise"),]
tur <- tur[order(tur[,"MEturquoise"],decreasing=TRUE),]
tur <- tur[!duplicated(tur[,"symbol"]),]
#label TOM matrix with gene symbols
symbol <- results[,"symbol"]
rownames(TOM) <- symbol
colnames(TOM) <- symbol
#get gene symbols in turquoise module, remove NAs and Riken genes and then pull out relevant columns of TOM matrix
tur.symbol <- tur[,"symbol"]
tur.symbol <- tur.symbol[which(tur.symbol!="NA")]
tur.sym <- tur.symbol[grep(".*Rik$",tur.symbol,invert=TRUE)]
TOM.tur <- TOM[tur.sym,tur.sym]
TOM.tur <- TOM.tur^7
TOM.32 <- TOM.tur[1:32,1:32]
cyt = exportNetworkToCytoscape(
TOM.32,
edgeFile = "Turquoise32-CytoscapeInput-edges-TOM.txt",
nodeFile = "Turquoise32-CytoscapeInput-nodes-TOM.txt",
weighted = TRUE,
threshold = 0.029,
)
###############take top 20 genes in each module and make a cytoscape network out of them...
results <- results[grep(".*Rik$",results[,"symbol"],invert=TRUE),]
results <- results[which(results[,"symbol"]!="NA"),]
ens.id <- results[,"EnsemblID"]
library(biomaRt)
ensmart<- useMart("ensembl",dataset="mmusculus_gene_ensembl")
results.reg<- getBM(filters="ensembl_gene_id", values=ens.id, attributes=c("ensembl_gene_id", "go_biological_process_id"), mart=ensmart)
results.reg<- unique(results.reg[results.reg[,2]=="GO:0045449","ensembl_gene_id"])
results.tf<- getBM(filters="ensembl_gene_id", values=ens.id, attributes=c("ensembl_gene_id", "go_molecular_function_id"), mart=ensmart)
results.tf <- unique(results.tf[results.tf[,2]=="GO:0003700","ensembl_gene_id"])
results.go <- c(results.reg, results.tf)
results.go <- results.go[order(results.go,decreasing=FALSE)]
results.go <- results.go[!duplicated(results.go)]
symbols.go <- results[which(results[,"EnsemblID"] %in% results.go),"symbol"]
blue <- results[which(results[,"ModuleName"]=="blue"),]
blue <- blue[order(blue[,"MEblue"],decreasing=TRUE),]
blue <- blue[!duplicated(blue[,"symbol"]),]
tur <- results[which(results[,"ModuleName"]=="turquoise"),]
tur <- tur[order(tur[,"MEturquoise"],decreasing=TRUE),]
tur <- tur[!duplicated(tur[,"symbol"]),]
pink <- results[which(results[,"ModuleName"]=="pink"),]
pink <- pink[order(pink[,"MEpink"],decreasing=TRUE),]
pink <- pink[!duplicated(pink[,"symbol"]),]
black <- results[which(results[,"ModuleName"]=="black"),]
black <- black[order(black[,"MEblack"],decreasing=TRUE),]
black <- black[!duplicated(black[,"symbol"]),]
yellow <- results[which(results[,"ModuleName"]=="yellow"),]
yellow <- yellow[order(yellow[,"MEyellow"],decreasing=TRUE),]
yellow <- yellow[!duplicated(yellow[,"symbol"]),]
green <- results[which(results[,"ModuleName"]=="green"),]
green <- green[order(green[,"MEgreen"],decreasing=TRUE),]
green <- green[!duplicated(green[,"symbol"]),]
eigengene <- c(blue[1,"symbol"],tur[1,"symbol"],pink[1,"symbol"],black[1,"symbol"],yellow[1,"symbol"],green[1,"symbol"])
##take top x genes from each module
top <- 1:100
blue <- blue[top,"symbol"]
tur <- tur[top,"symbol"]
pink <- pink[top,"symbol"]
black <- black[top,"symbol"]
yellow <- yellow[top,"symbol"]
green <- green[top,"symbol"]
sym <- c(blue,tur,pink,black,yellow,green)
sym.go <- sym[which(sym %in% symbols.go)]
sym.go <- c(sym.go,eigengene)
##get genes out of TOM matrix and send to cytoscape
TOM.7 <- TOM^7
TOM.sym.go <- TOM.7[sym.go,sym.go]
gg
| |
library(ggplot2)
library(lme4)
library(grid)
library(gridExtra)
library(stringr)
library(jtools)
library(lattice)
library(plotrix)
#Plot layout settings
basic.theme <- theme(
panel.background = element_rect(
fill = "transparent",colour = NA),
panel.grid.major = element_line(colour = "grey95"),
panel.grid.minor = element_blank(),
plot.background = element_rect(
fill = "transparent",colour = NA),
legend.background = element_rect(
fill="transparent"),
legend.text = element_text(size=24),
legend.title = element_text(size=30),
legend.key.height = unit(2, "lines"),
legend.key = element_rect(colour = NA, fill = NA),
axis.text.x = element_text(size=30, angle=45, hjust=1),
axis.title.x = element_text(size=30),
axis.text.y = element_text(size=28),
axis.title.y = element_text(size=32),
strip.text = element_text(size=30),
panel.spacing = unit(2, "lines"))
# Set this source file's directory as the working directory
# NOTE: If you want the following command to work, use Source in Rstudio rather than Run
here <- dirname(parent.frame(2)$ofile)
setwd(here)
# Global variables
rawdata.local.path <- "data/main/local/"
rawdata.cumulative.path <- "data/main/cumulative/"
childuttdata.path <- "data/main/childutt/"
suppl.rawdata.local.path <- "data/suppl/local/"
suppl.rawdata.cumulative.path <- "data/suppl/cumulative/"
plot.path <- "plots/"
print.model.output <- "Y"
# Read in local simulation data
filenames <- list.files(path = rawdata.local.path, pattern="*productiontask-modified.csv")
local.data.list <- lapply(paste0(rawdata.local.path,filenames),na.strings=c("NaN","Nan"), stringsAsFactors = FALSE,read.csv)
local.data <- do.call(rbind, local.data.list)
# Prepare data for analyses
local.data$num = 1:nrow(local.data) #overwrite utterance number to avoid double numbers
local.data$age <- gsub("_", ".", local.data$age) #converting age variable to numeric values and months into years
local.data$age <- gsub("6", "5", local.data$age)
local.data$age <- as.numeric(local.data$age)
local.data <- subset(local.data, select = c(2:13))
# Read in cumulative simulation data
filenames <- list.files(path = rawdata.cumulative.path, pattern="*productiontask-modified.csv")
cumu.data.list <- lapply(paste0(rawdata.cumulative.path,filenames),na.strings=c("NaN","Nan"), stringsAsFactors = FALSE,read.csv)
cumu.data <- do.call(rbind, cumu.data.list)
# Prepare data for analyses
cumu.data$num = 1:nrow(cumu.data) #overwrite utterance number to avoid double numbers
cumu.data$age <- gsub("_", ".", cumu.data$age) #converting age variable to numeric values and months into years
cumu.data$age <- gsub("6", "5", cumu.data$age)
cumu.data$age <- as.numeric(cumu.data$age)
cumu.data <- subset(cumu.data, select = c(2:13))
# Read in child utterances from input data
filenames <- list.files(path=childuttdata.path, pattern="*.txt")
childutt.data <- NULL
for (file in filenames){
temp.data <- read.delim(paste0(childuttdata.path, file))
colnames(temp.data) <- c("utterance")
temp.data$child <- unlist(strsplit(unlist(strsplit(file, "_age"))[1],"child"))[2]
temp.data$age <- unlist(strsplit(unlist(strsplit(file, "_age"))[2],".txt"))
childutt.data <- rbind(childutt.data,temp.data)
}
# Prepare data for analyses
childutt.data$age <- gsub("_", ".", childutt.data$age) #converting age variable to numeric values and months into years
childutt.data$age <- gsub("6", "5", childutt.data$age)
childutt.data$age <- as.numeric(childutt.data$age)
childutt.data$numwords <- childutt.data$numwords <- str_count(childutt.data$utterance," ")
## Read in local sample data for suppl materials
filenames <- list.files(path = suppl.rawdata.local.path,pattern="*productiontask_keep_all-modified.csv")
suppl.local.data.list <- lapply(paste0(suppl.rawdata.local.path,filenames),na.strings=c("NaN","Nan"), stringsAsFactors = FALSE,read.csv)
suppl.local.data <- do.call(rbind, suppl.local.data.list)
# Prepare data for analyses
suppl.local.data$num = 1:nrow(suppl.local.data) # overwrite utterance number to avoid double numbers
suppl.local.data$age <- gsub("_", ".", suppl.local.data$age) #converting age variable to numeric values and months into years
suppl.local.data$age <- gsub("6", "5", suppl.local.data$age)
suppl.local.data$age <- as.numeric(suppl.local.data$age)
suppl.local.data <- subset(suppl.local.data, select = c(2:13))
## Read in cumulative sample data for suppl materials
filenames <- list.files(path = suppl.rawdata.cumulative.path,pattern="*productiontask_keep_all-modified.csv")
suppl.cumu.data.list <- lapply(paste0(suppl.rawdata.cumulative.path,filenames),na.strings=c("NaN","Nan"), stringsAsFactors = FALSE,read.csv)
suppl.cumu.data <- do.call(rbind, suppl.cumu.data.list)
# Prepare data for analyses
suppl.cumu.data$num = 1:nrow(suppl.cumu.data) # overwrite utterance number to avoid double numbers
suppl.cumu.data$age <- gsub("_", ".", suppl.cumu.data$age) #converting age variable to numeric values and months into years
suppl.cumu.data$age <- gsub("6", "5", suppl.cumu.data$age)
suppl.cumu.data$age <- as.numeric(suppl.cumu.data$age)
suppl.cumu.data <- subset(suppl.cumu.data, select = c(2:13))
# Run models and generate plots
source("1-UncorrectedAccuracy.R")
source("2-CorrectedAccuracy.R")
source("3-UnseenWords.R")
source("4-ChilduttAnalysis.R")
source("5-SupplMaterials.R")
# Print model output if requested in the global variables
if (print.model.output == "Y") {
# Uncorrected accuracy
print ("##### Uncorrected accuracy: Local #####")
print(summary(model_local_uncorrected))
print ("##### Uncorrected accuracy: Cumulative #####")
print(summary(model_cumu_uncorrected))
print ("##### Uncorrected accuracy: Local (original Mc & C) #####")
print(summary(model_local_uncorrected_suppl))
print ("##### Uncorrected accuracy: Cumulative (original Mc & C) #####")
print(summary(model_cumu_uncorrected_suppl))
# Corrected accuracy
print ("##### Corrected accuracy: Local #####")
print(summary(model_local_corrected))
print ("##### Corrected accuracy: Cumulative #####")
print(summary(model_cumu_corrected))
print ("##### Corrected accuracy: Local (original Mc & C) #####")
print(summary(model_local_corrected_suppl))
print ("##### Corrected accuracy: Cumulative (original Mc & C) #####")
print(summary(model_cumu_corrected_suppl))
# Unseen words
print ("##### Unseen words: Local #####")
print(summary(model_local_unseenwords))
print ("##### Unseen words: Cumulative #####")
print(summary(model_cumu_unseenwords))
}
| /analysis/0main.R | no_license | marisacasillas/CBL-Roete | R | false | false | 6,515 | r | library(ggplot2)
library(lme4)
library(grid)
library(gridExtra)
library(stringr)
library(jtools)
library(lattice)
library(plotrix)
#Plot layout settings
basic.theme <- theme(
panel.background = element_rect(
fill = "transparent",colour = NA),
panel.grid.major = element_line(colour = "grey95"),
panel.grid.minor = element_blank(),
plot.background = element_rect(
fill = "transparent",colour = NA),
legend.background = element_rect(
fill="transparent"),
legend.text = element_text(size=24),
legend.title = element_text(size=30),
legend.key.height = unit(2, "lines"),
legend.key = element_rect(colour = NA, fill = NA),
axis.text.x = element_text(size=30, angle=45, hjust=1),
axis.title.x = element_text(size=30),
axis.text.y = element_text(size=28),
axis.title.y = element_text(size=32),
strip.text = element_text(size=30),
panel.spacing = unit(2, "lines"))
# Set this source file's directory as the working directory
# NOTE: If you want the following command to work, use Source in Rstudio rather than Run
here <- dirname(parent.frame(2)$ofile)
setwd(here)
# Global variables
rawdata.local.path <- "data/main/local/"
rawdata.cumulative.path <- "data/main/cumulative/"
childuttdata.path <- "data/main/childutt/"
suppl.rawdata.local.path <- "data/suppl/local/"
suppl.rawdata.cumulative.path <- "data/suppl/cumulative/"
plot.path <- "plots/"
print.model.output <- "Y"
# Read in local simulation data
filenames <- list.files(path = rawdata.local.path, pattern="*productiontask-modified.csv")
local.data.list <- lapply(paste0(rawdata.local.path,filenames),na.strings=c("NaN","Nan"), stringsAsFactors = FALSE,read.csv)
local.data <- do.call(rbind, local.data.list)
# Prepare data for analyses
local.data$num = 1:nrow(local.data) #overwrite utterance number to avoid double numbers
local.data$age <- gsub("_", ".", local.data$age) #converting age variable to numeric values and months into years
local.data$age <- gsub("6", "5", local.data$age)
local.data$age <- as.numeric(local.data$age)
local.data <- subset(local.data, select = c(2:13))
# Read in cumulative simulation data
filenames <- list.files(path = rawdata.cumulative.path, pattern="*productiontask-modified.csv")
cumu.data.list <- lapply(paste0(rawdata.cumulative.path,filenames),na.strings=c("NaN","Nan"), stringsAsFactors = FALSE,read.csv)
cumu.data <- do.call(rbind, cumu.data.list)
# Prepare data for analyses
cumu.data$num = 1:nrow(cumu.data) #overwrite utterance number to avoid double numbers
cumu.data$age <- gsub("_", ".", cumu.data$age) #converting age variable to numeric values and months into years
cumu.data$age <- gsub("6", "5", cumu.data$age)
cumu.data$age <- as.numeric(cumu.data$age)
cumu.data <- subset(cumu.data, select = c(2:13))
# Read in child utterances from input data
filenames <- list.files(path=childuttdata.path, pattern="*.txt")
childutt.data <- NULL
for (file in filenames){
temp.data <- read.delim(paste0(childuttdata.path, file))
colnames(temp.data) <- c("utterance")
temp.data$child <- unlist(strsplit(unlist(strsplit(file, "_age"))[1],"child"))[2]
temp.data$age <- unlist(strsplit(unlist(strsplit(file, "_age"))[2],".txt"))
childutt.data <- rbind(childutt.data,temp.data)
}
# Prepare data for analyses
childutt.data$age <- gsub("_", ".", childutt.data$age) #converting age variable to numeric values and months into years
childutt.data$age <- gsub("6", "5", childutt.data$age)
childutt.data$age <- as.numeric(childutt.data$age)
childutt.data$numwords <- childutt.data$numwords <- str_count(childutt.data$utterance," ")
## Read in local sample data for suppl materials
filenames <- list.files(path = suppl.rawdata.local.path,pattern="*productiontask_keep_all-modified.csv")
suppl.local.data.list <- lapply(paste0(suppl.rawdata.local.path,filenames),na.strings=c("NaN","Nan"), stringsAsFactors = FALSE,read.csv)
suppl.local.data <- do.call(rbind, suppl.local.data.list)
# Prepare data for analyses
suppl.local.data$num = 1:nrow(suppl.local.data) # overwrite utterance number to avoid double numbers
suppl.local.data$age <- gsub("_", ".", suppl.local.data$age) #converting age variable to numeric values and months into years
suppl.local.data$age <- gsub("6", "5", suppl.local.data$age)
suppl.local.data$age <- as.numeric(suppl.local.data$age)
suppl.local.data <- subset(suppl.local.data, select = c(2:13))
## Read in cumulative sample data for suppl materials
filenames <- list.files(path = suppl.rawdata.cumulative.path,pattern="*productiontask_keep_all-modified.csv")
suppl.cumu.data.list <- lapply(paste0(suppl.rawdata.cumulative.path,filenames),na.strings=c("NaN","Nan"), stringsAsFactors = FALSE,read.csv)
suppl.cumu.data <- do.call(rbind, suppl.cumu.data.list)
# Prepare data for analyses
suppl.cumu.data$num = 1:nrow(suppl.cumu.data) # overwrite utterance number to avoid double numbers
suppl.cumu.data$age <- gsub("_", ".", suppl.cumu.data$age) #converting age variable to numeric values and months into years
suppl.cumu.data$age <- gsub("6", "5", suppl.cumu.data$age)
suppl.cumu.data$age <- as.numeric(suppl.cumu.data$age)
suppl.cumu.data <- subset(suppl.cumu.data, select = c(2:13))
# Run models and generate plots
source("1-UncorrectedAccuracy.R")
source("2-CorrectedAccuracy.R")
source("3-UnseenWords.R")
source("4-ChilduttAnalysis.R")
source("5-SupplMaterials.R")
# Print model output if requested in the global variables
if (print.model.output == "Y") {
# Uncorrected accuracy
print ("##### Uncorrected accuracy: Local #####")
print(summary(model_local_uncorrected))
print ("##### Uncorrected accuracy: Cumulative #####")
print(summary(model_cumu_uncorrected))
print ("##### Uncorrected accuracy: Local (original Mc & C) #####")
print(summary(model_local_uncorrected_suppl))
print ("##### Uncorrected accuracy: Cumulative (original Mc & C) #####")
print(summary(model_cumu_uncorrected_suppl))
# Corrected accuracy
print ("##### Corrected accuracy: Local #####")
print(summary(model_local_corrected))
print ("##### Corrected accuracy: Cumulative #####")
print(summary(model_cumu_corrected))
print ("##### Corrected accuracy: Local (original Mc & C) #####")
print(summary(model_local_corrected_suppl))
print ("##### Corrected accuracy: Cumulative (original Mc & C) #####")
print(summary(model_cumu_corrected_suppl))
# Unseen words
print ("##### Unseen words: Local #####")
print(summary(model_local_unseenwords))
print ("##### Unseen words: Cumulative #####")
print(summary(model_cumu_unseenwords))
}
|
#' Run a built dockerfile locally, accessed through the 8787 port
#' Assumes your built image is named after your dockerhub username
#'
#' @param dockerhub_username username for dockerhub
#' @param project_name built image name
#'
#' @return Opens url with container running
#' @export
#'
#' @examples run_local_dockerfile('my_username', 'my_project')
run_local_dockerfile <- function (dockerhub_username, project_name) {
system(paste0('docker run -v $(pwd):/home/rstudio/ -p 8787:8787 -e DISABLE_AUTH=true ',
dockerhub_username, '/', project_name))
browseURL('localhost:8787')
}
| /R/run_local_dockerfile.R | permissive | smwindecker/dockertools | R | false | false | 603 | r | #' Run a built dockerfile locally, accessed through the 8787 port
#' Assumes your built image is named after your dockerhub username
#'
#' @param dockerhub_username username for dockerhub
#' @param project_name built image name
#'
#' @return Opens url with container running
#' @export
#'
#' @examples run_local_dockerfile('my_username', 'my_project')
run_local_dockerfile <- function (dockerhub_username, project_name) {
system(paste0('docker run -v $(pwd):/home/rstudio/ -p 8787:8787 -e DISABLE_AUTH=true ',
dockerhub_username, '/', project_name))
browseURL('localhost:8787')
}
|
## app.R ##
library(shinydashboard)
ui <- dashboardPage(
dashboardHeader(title = '镇铭的osu毕设'),
dashboardSidebar(
sidebarMenu(
menuItem('Dashboard',tabName='dashboard',icon=icon('dashboard')),
menuItem('Widgets',tabName = 'widgets',icon=icon('th'))
)
),
dashboardBody(
tabItems(
tabItem(
tabName='dashboard',
h2("第一个页面"),
fluidRow(
box(plotOutput("plot1", height = 250)),
box(
title = "Controls",
sliderInput("slider", "Number of observations:", 1, 100, 50)
)
),
fluidRow(
box(plotOutput("plot2", height = 250)),
box(
title = "Controls",
sliderInput("select", "Number of observations:", 1, 100, 50)
)
)
),
tabItem(
tabName='widgets',
h2("第二个页面")
)
)
# Boxes need to be put in a row (or column)
)
)
server <- function(input, output) {
set.seed(122)
histdata <- rnorm(500)
output$plot1 <- renderPlot({
data <- histdata[seq_len(input$slider)]
hist(data)
})
}
shinyApp(ui, server) | /shiny.R | no_license | git874997967/graduate | R | false | false | 1,160 | r | ## app.R ##
library(shinydashboard)
ui <- dashboardPage(
dashboardHeader(title = '镇铭的osu毕设'),
dashboardSidebar(
sidebarMenu(
menuItem('Dashboard',tabName='dashboard',icon=icon('dashboard')),
menuItem('Widgets',tabName = 'widgets',icon=icon('th'))
)
),
dashboardBody(
tabItems(
tabItem(
tabName='dashboard',
h2("第一个页面"),
fluidRow(
box(plotOutput("plot1", height = 250)),
box(
title = "Controls",
sliderInput("slider", "Number of observations:", 1, 100, 50)
)
),
fluidRow(
box(plotOutput("plot2", height = 250)),
box(
title = "Controls",
sliderInput("select", "Number of observations:", 1, 100, 50)
)
)
),
tabItem(
tabName='widgets',
h2("第二个页面")
)
)
# Boxes need to be put in a row (or column)
)
)
server <- function(input, output) {
set.seed(122)
histdata <- rnorm(500)
output$plot1 <- renderPlot({
data <- histdata[seq_len(input$slider)]
hist(data)
})
}
shinyApp(ui, server) |
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 10954
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 10954
c
c Input Parameter (command line, file):
c input filename QBFLIB/Biere/tipfixpoint/vis.coherence^3.E-f2.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 3727
c no.of clauses 10954
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 10954
c
c QBFLIB/Biere/tipfixpoint/vis.coherence^3.E-f2.qdimacs 3727 10954 E1 [] 0 35 3692 10954 NONE
| /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Biere/tipfixpoint/vis.coherence^3.E-f2/vis.coherence^3.E-f2.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 643 | r | c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 10954
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 10954
c
c Input Parameter (command line, file):
c input filename QBFLIB/Biere/tipfixpoint/vis.coherence^3.E-f2.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 3727
c no.of clauses 10954
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 10954
c
c QBFLIB/Biere/tipfixpoint/vis.coherence^3.E-f2.qdimacs 3727 10954 E1 [] 0 35 3692 10954 NONE
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/setAxis.R
\name{setAxis}
\alias{setAxis}
\title{Set up an Axis}
\usage{
setAxis(data, axis.range, axis.log, axis.rev, axis.labels, ...)
}
\arguments{
\item{data}{the coordinates for the particular axis}
\item{axis.range}{set axis range.}
\item{axis.log}{logical, if \code{TRUE}, then log transform the axis.}
\item{axis.rev}{logical, if \code{TRUE}, then reverse the axis direction.}
\item{axis.labels}{set axis labels.}
\item{\dots}{additional arguments to the "pretty" functions.}
}
\value{
Information about the axis
}
\description{
Sets up axis information (support function).
}
\seealso{
\code{\link{linearPretty}}, \code{\link{logPretty}}
}
\keyword{dplot}
| /man/setAxis.Rd | permissive | ldecicco-USGS/smwrGraphs | R | false | true | 746 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/setAxis.R
\name{setAxis}
\alias{setAxis}
\title{Set up an Axis}
\usage{
setAxis(data, axis.range, axis.log, axis.rev, axis.labels, ...)
}
\arguments{
\item{data}{the coordinates for the particular axis}
\item{axis.range}{set axis range.}
\item{axis.log}{logical, if \code{TRUE}, then log transform the axis.}
\item{axis.rev}{logical, if \code{TRUE}, then reverse the axis direction.}
\item{axis.labels}{set axis labels.}
\item{\dots}{additional arguments to the "pretty" functions.}
}
\value{
Information about the axis
}
\description{
Sets up axis information (support function).
}
\seealso{
\code{\link{linearPretty}}, \code{\link{logPretty}}
}
\keyword{dplot}
|
#' Save Colorized and Juxtaposed Images
#'
#' \code{clsave} saves images that have been colorized using \code{colorize} or
#' juxtaposed with \code{juxtapose}.
#'
#' @param response a response object of a \code{colorize} function call.
#' @param destfile a character string or vector with the name where the images are saved.
#'
#' @return Besides saving, the function returns the response object invisibly.
#'
#' @examples
#' \dontrun{
#' # Save colorized images
#' res <- colorize(img = "https://upload.wikimedia.org/wikipedia/commons/9/9e/Breadfruit.jpg")
#' clsave(res, destfile = "colorized_version.jpg")
#' }
#' @export
#' @importFrom dplyr filter
#' @importFrom stringr str_detect str_remove_all str_replace_all
#' @importFrom purrr walk2
clsave <- function(response, destfile = "") {
# Remove Non-Responses
response <- check_response(response)
# Save Colorized Images from URL
if (ncol(response) == 2) {
i <- c(1:nrow(response))
if (destfile == "") destfile <- rep("", nrow(response))
purrr::pwalk(list(response$response, destfile, i), save_col_wh)
}
# Save Juxtaposed Images
if (ncol(response) == 4) {
i <- c(1:nrow(response))
if (destfile == "") destfile <- rep("", nrow(response))
purrr::pwalk(list(response$jp_type, response$jp, destfile, i), save_jp_wh)
}
# Return response
return(invisible(response))
}
| /R/clsave.R | no_license | zumbov2/colorizer | R | false | false | 1,376 | r | #' Save Colorized and Juxtaposed Images
#'
#' \code{clsave} saves images that have been colorized using \code{colorize} or
#' juxtaposed with \code{juxtapose}.
#'
#' @param response a response object of a \code{colorize} function call.
#' @param destfile a character string or vector with the name where the images are saved.
#'
#' @return Besides saving, the function returns the response object invisibly.
#'
#' @examples
#' \dontrun{
#' # Save colorized images
#' res <- colorize(img = "https://upload.wikimedia.org/wikipedia/commons/9/9e/Breadfruit.jpg")
#' clsave(res, destfile = "colorized_version.jpg")
#' }
#' @export
#' @importFrom dplyr filter
#' @importFrom stringr str_detect str_remove_all str_replace_all
#' @importFrom purrr walk2
clsave <- function(response, destfile = "") {
# Remove Non-Responses
response <- check_response(response)
# Save Colorized Images from URL
if (ncol(response) == 2) {
i <- c(1:nrow(response))
if (destfile == "") destfile <- rep("", nrow(response))
purrr::pwalk(list(response$response, destfile, i), save_col_wh)
}
# Save Juxtaposed Images
if (ncol(response) == 4) {
i <- c(1:nrow(response))
if (destfile == "") destfile <- rep("", nrow(response))
purrr::pwalk(list(response$jp_type, response$jp, destfile, i), save_jp_wh)
}
# Return response
return(invisible(response))
}
|
######################################################
### Fit the regression model with testing data ###
######################################################
### Author: Chengliang Tang
### Project 3
XGBtest <- function(modelList, dat_test){
### Fit the classfication model with testing data
### Input:
### - the fitted classification model list using training data
### - processed features from testing images
### Output: training model specification
### load libraries
library("xgboost")
predArr <- array(NA, c(dim(dat_test)[1], 4, 3))
for (i in 1:12){
fit_train <- modelList[[i]]
### calculate column and channel
c1 <- (i-1) %% 4 + 1
c2 <- (i-c1) %/% 4 + 1
featMat <- dat_test[, , c2]
### make predictions
predArr[, c1, c2] <- predict(fit_train[[1]], newdata=featMat)
}
return(as.numeric(predArr))
}
| /lib/test_xgboost.R | no_license | Levichasedream/xgboost1 | R | false | false | 878 | r | ######################################################
### Fit the regression model with testing data ###
######################################################
### Author: Chengliang Tang
### Project 3
XGBtest <- function(modelList, dat_test){
### Fit the classfication model with testing data
### Input:
### - the fitted classification model list using training data
### - processed features from testing images
### Output: training model specification
### load libraries
library("xgboost")
predArr <- array(NA, c(dim(dat_test)[1], 4, 3))
for (i in 1:12){
fit_train <- modelList[[i]]
### calculate column and channel
c1 <- (i-1) %% 4 + 1
c2 <- (i-c1) %/% 4 + 1
featMat <- dat_test[, , c2]
### make predictions
predArr[, c1, c2] <- predict(fit_train[[1]], newdata=featMat)
}
return(as.numeric(predArr))
}
|
#' get_ecocrop
#'
#' get new ecocrop entry for a crop
#'
#' data scraped from FAO website 2017, see scraping script in data-raw/21_ExtractEcoCropSheets.R
#'
#' @param cropname an ecocrop cropname
#' @param field a field to select from the ecocrop database
#' @param ecocrop_object whether to return results as an ecocrop object default FALSE
#'
#' @import dplyr stringr
#'
#' @export
#'
#'
#' @examples
#' potato <- get_ecocrop('potato')
#' get_ecocrop('maize','phmin')
#' #comparing new & old versions of database
#' cropname <- 'maize'
#' library(dismo)
#' cropold <- dismo::getCrop(cropname)
#' cropnew <- get_ecocrop(cropname)
get_ecocrop <- function(cropname,
field = NULL,
ecocrop_object = FALSE) {
data("df_ecocrop")
#TODO add some warning about if field not present
#TODO vectorise to work on a vector of crops
# checking if the cropname appears as the first word in the COMNAME field
#to test outside function
#which(str_detect(df_ecocrop$COMNAME, regex(paste0("^",cropname,","), ignore_case = TRUE)))
#case insensitive
out <- dplyr::filter( df_ecocrop, str_detect(COMNAME, regex(paste0("^",cropname,","), ignore_case = TRUE)))
if (nrow(out)==0) stop('crop ',cropname,' not found, check df_ecocrop$NAME')
# do I want to offer option to return as an ecocrop object ?
# e.g. to use within ecocrop_a_raster ??
if (ecocrop_object)
{
#dismo - I would prefer not to be reliant on it
crop <- new('ECOCROPcrop')
crop@GMIN <- as.numeric(out[,'GMIN'])
crop@GMAX <- as.numeric(out[,'GMAX'])
crop@KTMP <- as.numeric(out[,'KTMP'])
crop@TMIN <- as.numeric(out[,'TMIN'])
crop@TOPMN <- as.numeric(out[,'TOPMN'])
crop@TOPMX <- as.numeric(out[,'TOPMX'])
crop@TMAX <- as.numeric(out[,'TMAX'])
crop@RMIN <- as.numeric(out[,'RMIN'])
crop@ROPMN <- as.numeric(out[,'ROPMN'])
crop@ROPMX <- as.numeric(out[,'ROPMX'])
crop@RMAX <- as.numeric(out[,'RMAX'])
#if no kill temp set it to 0
#this is what dismo::ecocrop does
if (is.na(crop@KTMP)) crop@KTMP <- 0
return(crop)
}
#select just a single field if one is specified
if (!is.null(field))
{
out <- dplyr::select(out, str_to_upper(field))
#i could put something here to allow multiple fields to be returned
#by only doing coversions below if a single field
#if (length(field)==1)
out <- out[[1]] #to return a single value rather than a dataframe
#return factors as character
if (is.factor(out)) out <- as.character(out)
}
return(out)
}
# ph functions below, replaced by generic versions above
# #get_phmin('maize')
# get_phmin <- function(cropname) {
#
# ph <- get_ecocrop(cropname)$PHMIN
#
# # to protect against numeric(0)
# if (length(ph)==0) ph <- NA
#
# return(ph)
# }
#
#
# get_phmax <- function(cropname) {
#
# ph <- get_ecocrop(cropname)$PHMAX
#
# # to protect against numeric(0)
# if (length(ph)==0) ph <- NA
#
# return(ph)
# }
#
# get_phopmin <- function(cropname) {
#
# ph <- get_ecocrop(cropname)$PHOPMN
#
# # to protect against numeric(0)
# if (length(ph)==0) ph <- NA
#
# return(ph)
# }
#
#
# get_phopmax <- function(cropname) {
#
# ph <- get_ecocrop(cropname)$PHOPMX
#
# # to protect against numeric(0)
# if (length(ph)==0) ph <- NA
#
# return(ph)
# }
| /R/get_ecocrop.r | permissive | KuldeepSJadon/climcropr | R | false | false | 3,346 | r | #' get_ecocrop
#'
#' get new ecocrop entry for a crop
#'
#' data scraped from FAO website 2017, see scraping script in data-raw/21_ExtractEcoCropSheets.R
#'
#' @param cropname an ecocrop cropname
#' @param field a field to select from the ecocrop database
#' @param ecocrop_object whether to return results as an ecocrop object default FALSE
#'
#' @import dplyr stringr
#'
#' @export
#'
#'
#' @examples
#' potato <- get_ecocrop('potato')
#' get_ecocrop('maize','phmin')
#' #comparing new & old versions of database
#' cropname <- 'maize'
#' library(dismo)
#' cropold <- dismo::getCrop(cropname)
#' cropnew <- get_ecocrop(cropname)
get_ecocrop <- function(cropname,
field = NULL,
ecocrop_object = FALSE) {
data("df_ecocrop")
#TODO add some warning about if field not present
#TODO vectorise to work on a vector of crops
# checking if the cropname appears as the first word in the COMNAME field
#to test outside function
#which(str_detect(df_ecocrop$COMNAME, regex(paste0("^",cropname,","), ignore_case = TRUE)))
#case insensitive
out <- dplyr::filter( df_ecocrop, str_detect(COMNAME, regex(paste0("^",cropname,","), ignore_case = TRUE)))
if (nrow(out)==0) stop('crop ',cropname,' not found, check df_ecocrop$NAME')
# do I want to offer option to return as an ecocrop object ?
# e.g. to use within ecocrop_a_raster ??
if (ecocrop_object)
{
#dismo - I would prefer not to be reliant on it
crop <- new('ECOCROPcrop')
crop@GMIN <- as.numeric(out[,'GMIN'])
crop@GMAX <- as.numeric(out[,'GMAX'])
crop@KTMP <- as.numeric(out[,'KTMP'])
crop@TMIN <- as.numeric(out[,'TMIN'])
crop@TOPMN <- as.numeric(out[,'TOPMN'])
crop@TOPMX <- as.numeric(out[,'TOPMX'])
crop@TMAX <- as.numeric(out[,'TMAX'])
crop@RMIN <- as.numeric(out[,'RMIN'])
crop@ROPMN <- as.numeric(out[,'ROPMN'])
crop@ROPMX <- as.numeric(out[,'ROPMX'])
crop@RMAX <- as.numeric(out[,'RMAX'])
#if no kill temp set it to 0
#this is what dismo::ecocrop does
if (is.na(crop@KTMP)) crop@KTMP <- 0
return(crop)
}
#select just a single field if one is specified
if (!is.null(field))
{
out <- dplyr::select(out, str_to_upper(field))
#i could put something here to allow multiple fields to be returned
#by only doing coversions below if a single field
#if (length(field)==1)
out <- out[[1]] #to return a single value rather than a dataframe
#return factors as character
if (is.factor(out)) out <- as.character(out)
}
return(out)
}
# ph functions below, replaced by generic versions above
# #get_phmin('maize')
# get_phmin <- function(cropname) {
#
# ph <- get_ecocrop(cropname)$PHMIN
#
# # to protect against numeric(0)
# if (length(ph)==0) ph <- NA
#
# return(ph)
# }
#
#
# get_phmax <- function(cropname) {
#
# ph <- get_ecocrop(cropname)$PHMAX
#
# # to protect against numeric(0)
# if (length(ph)==0) ph <- NA
#
# return(ph)
# }
#
# get_phopmin <- function(cropname) {
#
# ph <- get_ecocrop(cropname)$PHOPMN
#
# # to protect against numeric(0)
# if (length(ph)==0) ph <- NA
#
# return(ph)
# }
#
#
# get_phopmax <- function(cropname) {
#
# ph <- get_ecocrop(cropname)$PHOPMX
#
# # to protect against numeric(0)
# if (length(ph)==0) ph <- NA
#
# return(ph)
# }
|
library(tidyverse)
library(ggplot2)
library(trend)
library(timetk)
library(lubridate)
library(astsa)
library(forecast)
library(tseries)
library(TSA)
library(rmarkdown)
library(fpp2)
library(readxl)
sar
install.packages("TSA")
install.packages("testcorr")
install.packages("rmarkdown")
install.packages('fpp2')
#Read data from excel sheet
crimes <- read_xlsx('data/crime.xlsx',
sheet = 'crimes')
#names columns by creating vector of names
colnames(crimes) <- c('date', 'crime', 'desc')
#read in population worksheet
pop <- read_xlsx('data/pop.xlsx',
sheet = 'pop')
#name column names of population data frame
colnames(pop) <- c('month', 'year', 'pop')
#new data frame to combine with crime occurrence to make rate.
crimes_trans <- crimes %>%
mutate(month = month(date),
year = year(date)) %>%
filter(year < 2021) %>%
group_by(month, year, crime) %>%
summarize(count = n())
#merge dataframes to reflect crime rate per occurrence
crime_pop <- left_join(crimes_trans,
pop,
by = c('month', 'year')) %>%
mutate(rate = count/pop,
date = date(paste0(year,'-',month,'-1'))) %>%
select(month, year, date, crime, rate) %>%
pivot_wider(names_from = 'crime',
values_from = c('rate'))
crime_name <- 'ASSAULT'
p <- crime_pop %>%
ggplot() +
geom_line(aes(x = date, y = get(crime_name))) +
ggtitle(label = paste0('Crime Rate for ', crime_name),
subtitle = 'From 2011 to 2020') +
ylab(crime_name) +
xlab('Date') +
theme(plot.title = element_text(hjust = 0.5),
plot.subtitle = element_text(hjust = 0.5)) +
geom_smooth(aes(x = date, y = get(crime_name)))
ggplotly(p)
#Make TS objects of each of the crime type.
theft_ts <- ts(crime_pop$THEFT,
start = c(2011, 1),
frequency = 12)
homicide_ts<- ts(crime_pop$HOMICIDE, start = c(2011, 1),
frequency = 12)
narcotics_ts <- ts(crime_pop$NARCOTICS, start = c(2011, 1),
frequency = 12)
sexual_assault_ts <- ts(crime_pop$`SEXUAL ASSAULT`, start = c(2011, 1),
frequency = 12)
assault_battery_ts <- ts(crime_pop$ASSAULT +crime_pop$BATTERY, start = c(2011, 1),
frequency = 12)
#First difference objects of crimes.
first_theft <- ts(diff(theft_ts))
first_homicide <- ts(diff(homicide_ts))
first_sexual_assault <- ts(diff(sexual_assault_ts))
first_narcotics <- ts(diff(narcotics_ts))
first_battery_assault <- ts(diff(assault_battery_ts))
#Second difference objects of crimes.
second_theft <- ts(diff(theft_ts,differences = 2))
second_homicide <- ts(diff(homicide_ts,difference = 2))
second_sexual_assault <- ts(diff(sexual_assault_ts, differences = 2))
second_narcotics <- ts(diff(narcotics_ts, differences = 2))
second_battery_assault <- ts(diff(assault_battery_ts, differences = 2))
bartels.test(sexual_assault_ts)
#Plots made using GGPlot
theftplot <- crime_pop %>%
ggplot() +
geom_line(aes(x = date, y = theft_ts)) +
ggtitle(label = paste0('Crime Rate for Theft'),
subtitle = 'From 2011 to 2020') +
ylab('Theft Rate per Capita') +
xlab('Date') +
theme(plot.title = element_text(hjust = 0.5),
plot.subtitle = element_text(hjust = 0.5)) +
geom_smooth(aes(x = date, y = theft_ts))
ggplotly(theftplot)
homicideplot <- crime_pop %>%
ggplot() +
geom_line(aes(x = date, y = homicide_ts)) +
ggtitle(label = paste0('Crime Rate for Homicide'),
subtitle = 'From 2011 to 2020') +
ylab('Homice Rate per Capita') +
xlab('Date') +
theme(plot.title = element_text(hjust = 0.5),
plot.subtitle = element_text(hjust = 0.5)) +
geom_smooth(aes(x = date, y = homicide_ts))
sexualplot <- crime_pop %>%
ggplot() +
geom_line(aes(x = date, y = sexual_assault_ts)) +
ggtitle(label = paste0('Crime Rate for Sexual Assault'),
subtitle = 'From 2011 to 2020') +
ylab('Sexual Assault Rate per Capita') +
xlab('Date') +
theme(plot.title = element_text(hjust = 0.5),
plot.subtitle = element_text(hjust = 0.5)) +
geom_smooth(aes(x = date, y = sexual_assault_ts))
narcoticsplot <- crime_pop %>%
ggplot() +
geom_line(aes(x = date, y = narcotics_ts)) +
ggtitle(label = paste0('Crime Rate for Narcotics'),
subtitle = 'From 2011 to 2020') +
ylab('Narcotics Rate per Capita') +
xlab('Date') +
theme(plot.title = element_text(hjust = 0.5),
plot.subtitle = element_text(hjust = 0.5)) +
geom_smooth(aes(x = date, y = narcotics_ts))
batt_ass_plot <- crime_pop %>%
ggplot() +
geom_line(aes(x = date, y = assault_battery_ts)) +
ggtitle(label = paste0('Crime Rate for Assault/Battery'),
subtitle = 'From 2011 to 2020') +
ylab('Assault/Battery Rate per Capita') +
xlab('Date') +
theme(plot.title = element_text(hjust = 0.5),
plot.subtitle = element_text(hjust = 0.5)) +
geom_smooth(aes(x = date, y = assault_battery_ts))
ggplotly(sexualplot)
ggplotly(theftplot)
ggplotly(narcoticsplot)
ggplotly(batt_ass_plot)
ggplotly(homicideplot)
#Raw data plots
ts.plot(homicide_ts)
ts.plot(theft_ts)
ts.plot(narcotics_ts)
ts.plot(sexual_assault_ts)
ts.plot(assault_battery_ts)
diff_theft <- crime_pop %>%
ggplot() +
geom_line(aes(x = date, y = first_theft)) +
ggtitle(label = paste0('Crime Rate for First Difference of Theft'),
subtitle = 'From 2011 to 2020') +
ylab('Theft Rate per Capita') +
xlab('Date') +
theme(plot.title = element_text(hjust = 0.5),
plot.subtitle = element_text(hjust = 0.5)) +
geom_smooth(aes(x = date, y = first_theft))
#Narcotics EDA
ts.plot(narcotics_ts, main = 'Narcotics Raw Data Time Series',
ylab= 'Narcotics Rate per Capita')
ts.plot(diff(narcotics_ts), main = 'First Difference of Narcotics
Time Series', ylab= 'Narcotics Rate per Capita')
ndiffs(narcotics_ts)
acf2(narcotics_ts, main = 'Narcotics Raw Data ACF/PACF Plots')
auto.arima(narcotics_ts)
acf2(first_narcotics, main = 'First Difference Narcotics
ACF/PACF Plots')
adf.test(narcotics_ts)
adf.test(first_narcotics)
plot(decompose(narcotics_ts))
plot(decompose(diff(narcotics_ts)))
#Theft EDA
ts.plot(theft_ts, main = 'Theft Raw Data Time Series',
ylab= 'Theft Rate per Capita')
ts.plot(diff(theft_ts), main = 'First Difference of Theft Time Series',
ylab= 'Theft Rate per Capita')
ndiffs(theft_ts)
acf2(theft_ts, main = 'Theft Raw Data ACF/PACF Plots')
auto.arima(theft_ts)
acf2(first_theft, main = 'First Difference Theft ACF/PACF Plots')
adf.test(theft_ts)
adf.test(first_theft)
plot(decompose(theft_ts))
plot(decompose(diff(theft_ts)))
#Homicide EDA
ts.plot(homicide_ts, main = 'Homicide
Raw Data Time Series',
ylab= 'Homicide Rate per Capita')
ts.plot(diff(homicide_ts), main = 'First Difference of
Homicide Time Series',
ylab= 'Homicide Rate per Capita')
ndiffs(homicide_ts)
acf2(homicide_ts, main = 'Homicide
Raw Data ACF/PACF Plots')
acf2(diff(homicide_ts), main = 'First Difference of
Homicide ACF/PACF Plots')
auto.arima(homicide_ts)
adf.test(homicide_ts)
adf.test(diff(homicide_ts))
plot(decompose(homicide_ts))
plot(decompose(diff(homicide_ts)))
McLeod
#Sexual Assault EDA
ts.plot(sexual_assault_ts, main = 'Sexual Assault
Raw Data Time Series',
ylab= 'Sexual Assault Rate per Capita')
ts.plot(diff(sexual_assault_ts), main = 'First Difference of
Sexual Assault Time Series',
ylab= 'Sexual Assault Rate per Capita')
ndiffs(sexual_assault_ts)
acf2(sexual_assault_ts, main = 'Sexual Assault
Raw Data ACF/PACF Plots')
acf2(diff(sexual_assault_ts), main = 'First Difference of
Sexual Assault ACF/PACF Plots')
auto.arima(sexual_assault_ts)
adf.test(sexual_assault_ts)
adf.test(diff(sexual_assault_ts))
plot(decompose(sexual_assault_ts))
plot(decompose(diff(sexual_assault_ts)))
#Assault/Battery EDA
ts.plot(assault_battery_ts, main = 'Assault/Battery
Raw Data Time Series',
ylab= 'Assault/Battery Rate per Capita')
ts.plot(diff(assault_battery_ts), main = 'First Difference of
Assault/Battery Time Series',
ylab= 'Assault/Battery Rate per Capita')
ndiffs(assault_battery_ts)
acf2(assault_battery_ts, main = 'Assault/Battery
Raw Data ACF/PACF Plots')
acf2(diff(assault_battery_ts), main = 'First Difference of
Assault/Battery ACF/PACF Plots')
auto.arima(assault_battery_ts)
adf.test(assault_battery_ts)
adf.test(diff(assault_battery_ts))
plot(decompose(assault_battery_ts))
plot(decompose(diff(assault_battery_ts)))
| /script.R | no_license | nolafatazz/crime_ts | R | false | false | 8,599 | r | library(tidyverse)
library(ggplot2)
library(trend)
library(timetk)
library(lubridate)
library(astsa)
library(forecast)
library(tseries)
library(TSA)
library(rmarkdown)
library(fpp2)
library(readxl)
sar
install.packages("TSA")
install.packages("testcorr")
install.packages("rmarkdown")
install.packages('fpp2')
#Read data from excel sheet
crimes <- read_xlsx('data/crime.xlsx',
sheet = 'crimes')
#names columns by creating vector of names
colnames(crimes) <- c('date', 'crime', 'desc')
#read in population worksheet
pop <- read_xlsx('data/pop.xlsx',
sheet = 'pop')
#name column names of population data frame
colnames(pop) <- c('month', 'year', 'pop')
#new data frame to combine with crime occurrence to make rate.
crimes_trans <- crimes %>%
mutate(month = month(date),
year = year(date)) %>%
filter(year < 2021) %>%
group_by(month, year, crime) %>%
summarize(count = n())
#merge dataframes to reflect crime rate per occurrence
crime_pop <- left_join(crimes_trans,
pop,
by = c('month', 'year')) %>%
mutate(rate = count/pop,
date = date(paste0(year,'-',month,'-1'))) %>%
select(month, year, date, crime, rate) %>%
pivot_wider(names_from = 'crime',
values_from = c('rate'))
crime_name <- 'ASSAULT'
p <- crime_pop %>%
ggplot() +
geom_line(aes(x = date, y = get(crime_name))) +
ggtitle(label = paste0('Crime Rate for ', crime_name),
subtitle = 'From 2011 to 2020') +
ylab(crime_name) +
xlab('Date') +
theme(plot.title = element_text(hjust = 0.5),
plot.subtitle = element_text(hjust = 0.5)) +
geom_smooth(aes(x = date, y = get(crime_name)))
ggplotly(p)
#Make TS objects of each of the crime type.
theft_ts <- ts(crime_pop$THEFT,
start = c(2011, 1),
frequency = 12)
homicide_ts<- ts(crime_pop$HOMICIDE, start = c(2011, 1),
frequency = 12)
narcotics_ts <- ts(crime_pop$NARCOTICS, start = c(2011, 1),
frequency = 12)
sexual_assault_ts <- ts(crime_pop$`SEXUAL ASSAULT`, start = c(2011, 1),
frequency = 12)
assault_battery_ts <- ts(crime_pop$ASSAULT +crime_pop$BATTERY, start = c(2011, 1),
frequency = 12)
#First difference objects of crimes.
first_theft <- ts(diff(theft_ts))
first_homicide <- ts(diff(homicide_ts))
first_sexual_assault <- ts(diff(sexual_assault_ts))
first_narcotics <- ts(diff(narcotics_ts))
first_battery_assault <- ts(diff(assault_battery_ts))
#Second difference objects of crimes.
second_theft <- ts(diff(theft_ts,differences = 2))
second_homicide <- ts(diff(homicide_ts,difference = 2))
second_sexual_assault <- ts(diff(sexual_assault_ts, differences = 2))
second_narcotics <- ts(diff(narcotics_ts, differences = 2))
second_battery_assault <- ts(diff(assault_battery_ts, differences = 2))
bartels.test(sexual_assault_ts)
#Plots made using GGPlot
theftplot <- crime_pop %>%
ggplot() +
geom_line(aes(x = date, y = theft_ts)) +
ggtitle(label = paste0('Crime Rate for Theft'),
subtitle = 'From 2011 to 2020') +
ylab('Theft Rate per Capita') +
xlab('Date') +
theme(plot.title = element_text(hjust = 0.5),
plot.subtitle = element_text(hjust = 0.5)) +
geom_smooth(aes(x = date, y = theft_ts))
ggplotly(theftplot)
homicideplot <- crime_pop %>%
ggplot() +
geom_line(aes(x = date, y = homicide_ts)) +
ggtitle(label = paste0('Crime Rate for Homicide'),
subtitle = 'From 2011 to 2020') +
ylab('Homice Rate per Capita') +
xlab('Date') +
theme(plot.title = element_text(hjust = 0.5),
plot.subtitle = element_text(hjust = 0.5)) +
geom_smooth(aes(x = date, y = homicide_ts))
sexualplot <- crime_pop %>%
ggplot() +
geom_line(aes(x = date, y = sexual_assault_ts)) +
ggtitle(label = paste0('Crime Rate for Sexual Assault'),
subtitle = 'From 2011 to 2020') +
ylab('Sexual Assault Rate per Capita') +
xlab('Date') +
theme(plot.title = element_text(hjust = 0.5),
plot.subtitle = element_text(hjust = 0.5)) +
geom_smooth(aes(x = date, y = sexual_assault_ts))
narcoticsplot <- crime_pop %>%
ggplot() +
geom_line(aes(x = date, y = narcotics_ts)) +
ggtitle(label = paste0('Crime Rate for Narcotics'),
subtitle = 'From 2011 to 2020') +
ylab('Narcotics Rate per Capita') +
xlab('Date') +
theme(plot.title = element_text(hjust = 0.5),
plot.subtitle = element_text(hjust = 0.5)) +
geom_smooth(aes(x = date, y = narcotics_ts))
batt_ass_plot <- crime_pop %>%
ggplot() +
geom_line(aes(x = date, y = assault_battery_ts)) +
ggtitle(label = paste0('Crime Rate for Assault/Battery'),
subtitle = 'From 2011 to 2020') +
ylab('Assault/Battery Rate per Capita') +
xlab('Date') +
theme(plot.title = element_text(hjust = 0.5),
plot.subtitle = element_text(hjust = 0.5)) +
geom_smooth(aes(x = date, y = assault_battery_ts))
ggplotly(sexualplot)
ggplotly(theftplot)
ggplotly(narcoticsplot)
ggplotly(batt_ass_plot)
ggplotly(homicideplot)
#Raw data plots
ts.plot(homicide_ts)
ts.plot(theft_ts)
ts.plot(narcotics_ts)
ts.plot(sexual_assault_ts)
ts.plot(assault_battery_ts)
diff_theft <- crime_pop %>%
ggplot() +
geom_line(aes(x = date, y = first_theft)) +
ggtitle(label = paste0('Crime Rate for First Difference of Theft'),
subtitle = 'From 2011 to 2020') +
ylab('Theft Rate per Capita') +
xlab('Date') +
theme(plot.title = element_text(hjust = 0.5),
plot.subtitle = element_text(hjust = 0.5)) +
geom_smooth(aes(x = date, y = first_theft))
#Narcotics EDA
ts.plot(narcotics_ts, main = 'Narcotics Raw Data Time Series',
ylab= 'Narcotics Rate per Capita')
ts.plot(diff(narcotics_ts), main = 'First Difference of Narcotics
Time Series', ylab= 'Narcotics Rate per Capita')
ndiffs(narcotics_ts)
acf2(narcotics_ts, main = 'Narcotics Raw Data ACF/PACF Plots')
auto.arima(narcotics_ts)
acf2(first_narcotics, main = 'First Difference Narcotics
ACF/PACF Plots')
adf.test(narcotics_ts)
adf.test(first_narcotics)
plot(decompose(narcotics_ts))
plot(decompose(diff(narcotics_ts)))
#Theft EDA
ts.plot(theft_ts, main = 'Theft Raw Data Time Series',
ylab= 'Theft Rate per Capita')
ts.plot(diff(theft_ts), main = 'First Difference of Theft Time Series',
ylab= 'Theft Rate per Capita')
ndiffs(theft_ts)
acf2(theft_ts, main = 'Theft Raw Data ACF/PACF Plots')
auto.arima(theft_ts)
acf2(first_theft, main = 'First Difference Theft ACF/PACF Plots')
adf.test(theft_ts)
adf.test(first_theft)
plot(decompose(theft_ts))
plot(decompose(diff(theft_ts)))
#Homicide EDA
ts.plot(homicide_ts, main = 'Homicide
Raw Data Time Series',
ylab= 'Homicide Rate per Capita')
ts.plot(diff(homicide_ts), main = 'First Difference of
Homicide Time Series',
ylab= 'Homicide Rate per Capita')
ndiffs(homicide_ts)
acf2(homicide_ts, main = 'Homicide
Raw Data ACF/PACF Plots')
acf2(diff(homicide_ts), main = 'First Difference of
Homicide ACF/PACF Plots')
auto.arima(homicide_ts)
adf.test(homicide_ts)
adf.test(diff(homicide_ts))
plot(decompose(homicide_ts))
plot(decompose(diff(homicide_ts)))
McLeod
#Sexual Assault EDA
ts.plot(sexual_assault_ts, main = 'Sexual Assault
Raw Data Time Series',
ylab= 'Sexual Assault Rate per Capita')
ts.plot(diff(sexual_assault_ts), main = 'First Difference of
Sexual Assault Time Series',
ylab= 'Sexual Assault Rate per Capita')
ndiffs(sexual_assault_ts)
acf2(sexual_assault_ts, main = 'Sexual Assault
Raw Data ACF/PACF Plots')
acf2(diff(sexual_assault_ts), main = 'First Difference of
Sexual Assault ACF/PACF Plots')
auto.arima(sexual_assault_ts)
adf.test(sexual_assault_ts)
adf.test(diff(sexual_assault_ts))
plot(decompose(sexual_assault_ts))
plot(decompose(diff(sexual_assault_ts)))
#Assault/Battery EDA
ts.plot(assault_battery_ts, main = 'Assault/Battery
Raw Data Time Series',
ylab= 'Assault/Battery Rate per Capita')
ts.plot(diff(assault_battery_ts), main = 'First Difference of
Assault/Battery Time Series',
ylab= 'Assault/Battery Rate per Capita')
ndiffs(assault_battery_ts)
acf2(assault_battery_ts, main = 'Assault/Battery
Raw Data ACF/PACF Plots')
acf2(diff(assault_battery_ts), main = 'First Difference of
Assault/Battery ACF/PACF Plots')
auto.arima(assault_battery_ts)
adf.test(assault_battery_ts)
adf.test(diff(assault_battery_ts))
plot(decompose(assault_battery_ts))
plot(decompose(diff(assault_battery_ts)))
|
oppe_readmit_zscore_plt <- function(data){
# Readmit Plots for OPPE
if(!nrow(readmit_tbl) >= 10){
return(NA)
} else {
# Readmit Trends - Expected, Actual, CMI, SOI ----
# Make tbl
readmit_trend_tbl <- readmit_tbl %>%
mutate(dsch_date = ymd(dsch_date)) %>%
collapse_by("monthly") %>%
select(
dsch_date
, pt_count
, readmit_count
, readmit_rate_bench
, severity_of_illness
, drg_cost_weight
, z_minus_score
) %>%
group_by(dsch_date, add = T) %>%
summarize(
Total_Discharges = sum(pt_count, na.rm = TRUE)
, rr = round((sum(readmit_count, na.rm = TRUE) / Total_Discharges), 2)
, perf = round(mean(readmit_rate_bench, na.rm = TRUE), 2)
, Excess = (rr - perf)
, mean_soi = round(mean(severity_of_illness, na.rm = TRUE), 2)
, cmi = round(mean(drg_cost_weight, na.rm = TRUE), 2)
, z_score = round(mean(z_minus_score, na.rm = TRUE), 2)
) %>%
ungroup()
# Z-Score ----
plt <- readmit_trend_tbl %>%
ggplot(
mapping = aes(
x = dsch_date
, y = z_score
)
) +
# Z-Score
geom_point(size = 2) +
geom_line() +
labs(
x = "Discharge Month"
, y = "Z-Score"
, title = "Readmit Rate Z-Score"
) +
# linear trend z-score
geom_smooth(
method = "lm"
, se = F
, color = "black"
, linetype = "dashed"
) +
geom_hline(
yintercept = 0
, color = "green"
, size = 1
, linetype = "dashed"
) +
scale_y_continuous(labels = scales::number_format(accuracy = 0.1)) +
theme_tq()
print(plt)
}
} | /R/Functions/oppe_readmit_zscore_plt.R | no_license | spsanderson/bmhmc-sql | R | false | false | 2,192 | r | oppe_readmit_zscore_plt <- function(data){
# Readmit Plots for OPPE
if(!nrow(readmit_tbl) >= 10){
return(NA)
} else {
# Readmit Trends - Expected, Actual, CMI, SOI ----
# Make tbl
readmit_trend_tbl <- readmit_tbl %>%
mutate(dsch_date = ymd(dsch_date)) %>%
collapse_by("monthly") %>%
select(
dsch_date
, pt_count
, readmit_count
, readmit_rate_bench
, severity_of_illness
, drg_cost_weight
, z_minus_score
) %>%
group_by(dsch_date, add = T) %>%
summarize(
Total_Discharges = sum(pt_count, na.rm = TRUE)
, rr = round((sum(readmit_count, na.rm = TRUE) / Total_Discharges), 2)
, perf = round(mean(readmit_rate_bench, na.rm = TRUE), 2)
, Excess = (rr - perf)
, mean_soi = round(mean(severity_of_illness, na.rm = TRUE), 2)
, cmi = round(mean(drg_cost_weight, na.rm = TRUE), 2)
, z_score = round(mean(z_minus_score, na.rm = TRUE), 2)
) %>%
ungroup()
# Z-Score ----
plt <- readmit_trend_tbl %>%
ggplot(
mapping = aes(
x = dsch_date
, y = z_score
)
) +
# Z-Score
geom_point(size = 2) +
geom_line() +
labs(
x = "Discharge Month"
, y = "Z-Score"
, title = "Readmit Rate Z-Score"
) +
# linear trend z-score
geom_smooth(
method = "lm"
, se = F
, color = "black"
, linetype = "dashed"
) +
geom_hline(
yintercept = 0
, color = "green"
, size = 1
, linetype = "dashed"
) +
scale_y_continuous(labels = scales::number_format(accuracy = 0.1)) +
theme_tq()
print(plt)
}
} |
\name{NEWS}
\title{vegan News}
\encoding{UTF-8}
\section{Changes in version 2.5-0}{
\subsection{GENERAL}{
\itemize{
\item This is a major new release with changes all over the
package: Nearly 40\% of program files were changed from the
previous release. Please report regressions and other issues in
\href{https://github.com/vegandevs/vegan/issues/}{https://github.com/vegandevs/vegan/issues/}.
\item Compiled code is used much more extensively, and most
compiled functions use \code{.Call} interface. This gives smaller
memory footprint and is also faster. In wall clock time, the
greatest gains are in permutation tests for constrained ordination
methods (\code{anova.cca}) and binary null models
(\code{nullmodel}).
\item Constrained ordination functions (\code{cca}, \code{rda},
\code{dbrda}, \code{capscale}) are completely rewritten and share
most of their code. This makes them more consistent with each
other and more robust. The internal structure changed in
constrained ordination objects, and scripts may fail if they try
to access the result object directly. There never was a guarantee
for unchanged internal structure, and such scripts should be
changed and they should use the provided support functions to
access the result object (see documentation of \code{cca.object}
and github issue
\href{https://github.com/vegandevs/vegan/issues/262}{#262}). Some
support and analysis functions may no longer work with result
objects created in previous \pkg{vegan} versions. You should use
\code{update(old.result.object)} to fix these old result
objects. See github issues
\href{https://github.com/vegandevs/vegan/issues/218}{#218},
\href{https://github.com/vegandevs/vegan/issues/227}{#227}.
\item \pkg{vegan} includes some tests that are run when checking
the package installation. See github issues
\href{https://github.com/vegandevs/vegan/issues/181}{#181},
\href{https://github.com/vegandevs/vegan/issues/271}{#271}.
\item The informative messages (warnings, notes and error
messages) are cleaned and unified which also makes possible to
provide translations.
} %itemize
} % general
\subsection{NEW FUNCTIONS}{
\itemize{
\item \code{avgdist}: new function to find averaged
dissimilarities from several random rarefactions of
communities. Code by Geoffrey Hannigan. See github issues
\href{https://github.com/vegandevs/vegan/issues/242}{#242},
\href{https://github.com/vegandevs/vegan/issues/243}{#243},
\href{https://github.com/vegandevs/vegan/issues/246}{#246}.
\item \code{chaodist}: new function that is similar to
\code{designdist}, but uses Chao terms that are supposed to take
into account the effects of unseen species (Chao et al.,
\emph{Ecology Letters} \bold{8,} 148-159; 2005). Earlier we had
Jaccard-type Chao dissimilarity in \code{vegdist}, but the new
code allows defining any kind of Chao dissimilarity.
\item New functions to find influence statistics of constrained
ordination objects: \code{hatvalues}, \code{sigma},
\code{rstandard}, \code{rstudent}, \code{cooks.distance},
\code{SSD}, \code{vcov}, \code{df.residual}. Some of these could
be earlier found via \code{as.mlm} function which is
deprecated. See github issue
\href{https://github.com/vegandevs/vegan/issues/234}{#234}.
\item \code{boxplot} was added for \code{permustats} results to
display the (standardized) effect sizes.
\item \code{sppscores}: new function to add or replace species
scores in distance-based ordination such as \code{dbrda},
\code{capscale} and \code{metaMDS}. Earlier \code{dbrda} did not
have species scores, and species scores in \code{capscale} and
\code{metaMDS} were based on raw input data which may not be
consistent with the used dissimilarity measure. See github issue
\href{https://github.com/vegandevs/vegan/issues/254}{#254}.
\item \code{cutreeord}: new function that is similar to
\code{stats::cutree}, but numbers the cluster in the order they
appear in the dendrogram (left to right) instead of labelling them
in the order they appeared in the data.
\item \code{sipoo.map}: a new data set of locations and sizes of
the islands in the Sipoo archipelago bird data set \code{sipoo}.
} %itemize
} % new functions
\subsection{NEW FEATURES IN CONSTRAINED ORDINATION}{
\itemize{
\item The inertia of Correspondence Analysis (\code{cca}) is
called \dQuote{scaled Chi-square} instead of using a name of a
little known statistic.
\item Regression scores for constraints can be extracted and
plotted for constrained ordination methods. See github issue
\href{https://github.com/vegandevs/vegan/issues/226}{#226}.
\item Full model (\code{model = "full"}) is again enabled in
permutations tests for constrained ordination results in
\code{anova.cca} and \code{permutest.cca}.
\item \code{permutest.cca} gained a new option \code{by = "onedf"} to
perform tests by sequential one degree-of-freedom contrasts of
factors. This option is not (yet) enabled in \code{anova.cca}.
\item The permutation tests are more robust, and most scoping issues
should have been fixed.
\item Permutation tests use compiled C code and they are much
faster. See github issue
\href{https://github.com/vegandevs/vegan/issues/211}{#211}.
\item \code{permutest} printed layout is similar to \code{anova.cca}.
\item \code{eigenvals} gained a new argument \code{model} to
select either constrained or unconstrained scores. The old
argument \code{constrained} is deprecated. See github issue
\href{https://github.com/vegandevs/vegan/issues/207}{#207}.
\item Adjusted \eqn{R^2}{R-squared} is not calculated for
results of partial ordination, because it is unclear how this
should be done (function \code{RsquareAdj}).
\item \code{ordiresids} can display standardized and studentized
residuals.
\item Function to construct \code{model.frame} and
\code{model.matrix} for constrained ordination are more robust
and fail in fewer cases.
\item \code{goodness} and \code{inertcomp} for constrained
ordination result object no longer has an option to find
distances: only explained variation is available.
\item \code{inertcomp} gained argument \code{unity}. This will
give \dQuote{local contributions to beta-diversity} (LCBD) and
\dQuote{species contribution to beta-diversity} (SCBD) of Legendre
& De \enc{Cáceres}{Caceres} (\emph{Ecology Letters} \bold{16,}
951-963; 2012).
\item \code{goodness} is disabled for \code{capscale}.
\item \code{prc} gained argument \code{const} for general
scaling of results similarly as in \code{rda}.
\item \code{prc} uses regression scores for Canoco-compatibility.
} %itemae
} % constrained ordination
\subsection{NEW FEATURES IN NULL MODEL COMMUNITIES}{
\itemize{
\item The C code for swap-based binary null models was made more
efficients, and the models are all faster. Many of these
models selected a \eqn{2 \times 2}{2x2} submatrix, and for this
they generated four random numbers (two rows, two columns). Now we
skip selecting third or fourth random number if it is obvious that
the matrix cannot be swapped. Since most of time was used in
generating random numbers in these functions, and most candidates
were rejected, this speeds up functions. However, this also means
that random number sequences change from previous \pkg{vegan}
versions, and old binary model results cannot be replicated
exactly. See github issues
\href{https://github.com/vegandevs/vegan/issues/197}{#197},
\href{https://github.com/vegandevs/vegan/issues/255}{#255} for
details and timing.
\item Ecological null models (\code{nullmodel}, \code{simulate},
\code{make.commsim}, \code{oecosimu}) gained new null model
\code{"greedyqswap"} which can radically speed up quasi-swap
models with minimal risk of introducing bias.
\item Backtracking is written in C and it is much faster. However,
backtracking models are biased, and they are provided only
because they are classic legacy models.
} %itemize
} % nullmodel
\subsection{NEW FEATURES IN OTHER FUNCTIONS}{
\itemize{
\item \code{adonis2} gained a column of \eqn{R^2}{R-squared}
similarly as old \code{adonis}.
\item Great part of \R{} code for \code{decorana} is written in C
which makes it faster and reduces the memory footprint.
\item \code{metaMDS} results gained new \code{points} and
\code{text} methods.
\item \code{ordiplot} and other ordination \code{plot} functions
can be chained with their \code{points} and \code{text}
functions allowing the use of \pkg{magrittr} pipes. The
\code{points} and \code{text} functions gained argument to draw
arrows allowing their use in drawing biplots or adding vectors of
environmental variables with \code{ordiplot}. Since many
ordination \code{plot} methods return an invisible
\code{"ordiplot"} object, these \code{points} and \code{text}
methods also work with them. See github issue
\href{https://github.com/vegandevs/vegan/issues/257}{#257}.
\item Lattice graphics (\code{ordixyplot}) for ordination can
add polygons that enclose all points in the panel and
complete data.
\item \code{ordicluster} gained option to suppress drawing in
plots so that it can be more easily embedded in other functions
for calculations.
\item \code{as.rad} returns the index of included taxa as an
attribute.
\item Random rarefaction (function \code{rrarefy}) uses compiled
C code and is much faster.
\item \code{plot} of \code{specaccum} can draw short
horizontal bars to vertical error bars. See StackOverflow
question
\href{https://stackoverflow.com/questions/45378751}{45378751}.
\item \code{decostand} gained new standardization methods
\code{rank} and \code{rrank} which replace abundance values by
their ranks or relative ranks. See github issue
\href{https://github.com/vegandevs/vegan/issues/225}{#225}.
\item Clark dissimilarity was added to \code{vegdist} (this cannot
be calculated with \code{designdist}).
\item \code{designdist} evaluates minimum terms in compiled code,
and the function is faster than \code{vegdist} also for
dissimilarities using minimum terms. Although \code{designdist} is
usually faster than \code{vegdist}, it is numerically less stable,
in particular with large data sets.
\item \code{swan} passes \code{type} argument to \code{beals}.
\item \code{tabasco} can use traditional cover scale values from
function \code{coverscale}. Function \code{coverscale} can return
scaled values as integers for numerical analysis instead of
returning characters.
\item \code{varpart} can partition \eqn{\chi^2}{Chi-squared}
inertia of correspondence analysis with new argument
\code{chisquare}. The adjusted \eqn{R^2}{R-squared} is based on
permutation tests, and the replicate analysis will have random
variation.
} % itemize
} % new features
\subsection{BUG FIXES}{
\itemize{
\item Very long \code{Condition()} statements (> 500 characters)
failed in partial constrained ordination models (\code{cca},
\code{rda}, \code{dbrda}, \code{capscale}). The problem was
detected in StackOverflow question
\href{https://stackoverflow.com/questions/49249816}{49249816}.
\item Labels were not adjusted when arrows were rescaled in
\code{envfit} plots. See StackOverflow question
\href{https://stackoverflow.com/questions/49259747}{49259747}.
} % itemize
} % bug fixes
\subsection{DEPRECATED AND DEFUNCT}{
\itemize{
\item \code{as.mlm} function for constrained correspondence
analysis is deprecated in favour of new functions that directly
give the influence statistics. See github issue
\href{https://github.com/vegandevs/vegan/issues/234}{#234}.
\item \code{commsimulator} is now defunct: use \code{simulate}
for \code{nullmodel} objects.
\item \pkg{ade4} \code{cca} objects are no longer handled in
\pkg{vegan}: \pkg{ade4} has had no \code{cca} since version
1.7-8 (August 9, 2017).
} %itemize
} % deprecated & defunct
} % 2.5-0
\section{Changes in version 2.4-6}{
\subsection{INSTALLATION AND BUILDING}{
\itemize{
\item CRAN packages are no longer allowed to use FORTRAN input,
but \code{read.cep} function used FORTRAN format to read legacy
CEP and Canoco files. To avoid NOTEs and WARNINGs, the function
was re-written in \R. The new \code{read.cep} is less powerful and
more fragile, and can only read data in \dQuote{condensed} format,
and it can fail in several cases that were successful with the old
code. The old FORTRAN-based function is still available in CRAN
package
\href{https://CRAN.R-project.org/package=cepreader}{cepreader}.
See github issue
\href{https://github.com/vegandevs/vegan/issues/263}{#263}. The
\pkg{cepreader} package is developed in
\href{https://github.com/vegandevs/cepreader}{https://github.com/vegandevs/cepreader}.
} %itemize
} % general
\subsection{BUG FIXES}{
\itemize{
\item Some functions for rarefaction (\code{rrarefy}), species
abundance distribution (\code{preston}) and species pool
(\code{estimateR}) need exact integer data, but the test allowed
small fuzz. The functions worked correctly with original data, but
if data were transformed and then back-transformed, they would
pass the integer test with fuzz and give wrong results. For
instance, \code{sqrt(3)^2} would pass the test as 3, but was
interpreted strictly as integer 2. See github issue
\href{https://github.com/vegandevs/vegan/issues/259}{#259}.
} % itemize
} % bugs
\subsection{NEW FEATURES}{
\itemize{
\item \code{ordiresids} uses now weighted residuals for
\code{cca} results.
} %itemize
} % features
} % 2.4-6
\section{Changes in version 2.4-5}{
\subsection{BUG FIXES}{
\itemize{
\item Several \dQuote{Swap & Shuffle} null models generated wrong
number of initial matrices. Usually they generated too many, which
was not dangerous, but it was slow. However, random sequences will
change with this fix.
\item Lattice graphics for ordination (\code{ordixyplot} and
friends) colour the arrows by \code{groups} instead of randomly
mixed colours.
\item Information on constant or mirrored permutations was
omitted when reporting permutation tests (e.g., in \code{anova}
for constrained ordination).
} % itemize
} % bug fixes
\subsection{NEW FEATURES}{
\itemize{
\item \code{ordistep} has improved interpretation of
\code{scope}: if the lower scope is missing, the formula of the
starting solution is taken as the lower scope instead of using
an empty model. See Stackoverflow question
\href{https://stackoverflow.com/questions/46985029/}{46985029}.
\item \code{fitspecaccum} gained new support functions \code{nobs}
and \code{logLik} which allow better co-operation with other
packages and functions. See GitHub issue
\href{https://github.com/vegandevs/vegan/issues/250}{#250}.
\item The \dQuote{backtracking} null model for community
simulation is faster. However, \dQuote{backtracking} is a biased
legacy model that should not be used except in comparative
studies.
} %itemize
} % new features
} % 2.4-5
\section{Changes in version 2.4-4}{
\subsection{INSTALLATION AND BUILDING}{
\itemize{
\item \code{orditkplot} should no longer give warnings in CRAN
tests.
} %itemize
} % installatin and building
\subsection{BUG FIXES}{
\itemize{
\item \code{anova(..., by = "axis")} for constrained ordination
(\code{cca}, \code{rda}, \code{dbrda}) ignored partial terms in
\code{Condition()}.
\item \code{inertcomp} and \code{summary.cca} failed if the
constrained component was defined, but explained nothing and had
zero rank. See StackOverflow:
\href{https://stackoverflow.com/questions/43683699/}{R - Error
message in doing RDA analysis - vegan package}.
\item Labels are no longer cropped in the \code{meandist} plots.
} % itemize
} % bug fixes
\subsection{NEW FEATURES}{
\itemize{
\item The significance tests for the axes of constrained
ordination use now forward testing strategy. More extensive
analysis indicated that the previous marginal tests were biased.
This is in conflict with Legendre, Oksanen & ter Braak,
\emph{Methods Ecol Evol} \strong{2,} 269--277 (2011) who regarded
marginal tests as unbiased.
\item Canberra distance in \code{vegdist} can now handle negative
input entries similarly as latest versions of \R.
} %itemize
} % new features
} % v2.4-4
\section{Changes in version 2.4-3}{
\subsection{INSTALLATION AND BUILDING}{
\itemize{
\item \pkg{vegan} registers native \bold{C} and \bold{Fortran}
routines. This avoids warnings in model checking, and may also
give a small gain in speed.
\item Future versions of \pkg{vegan} will deprecate and remove
elements \code{pCCA$Fit}, \code{CCA$Xbar}, and \code{CA$Xbar}
from \code{cca} result objects. This release provides a new
function \code{ordiYbar} which is able to construct these
elements both from the current and future releases. Scripts and
functions directly accessing these elements should switch to
\code{ordiYbar} for smooth transition.
} % itemize
} % installation
\subsection{BUG FIXES}{
\itemize{
\item \code{as.mlm} methods for constrained ordination include
zero intercept to give the correct residual degrees of freedom for
derived statistics.
\item \code{biplot} method for \code{rda} passes
\code{correlation} argument to the scaling algorithm.
\item Biplot scores were wrongly centred in \code{cca} which
caused a small error in their values.
\item Weighting and centring were corrected in \code{intersetcor}
and \code{spenvcor}. The fix can make a small difference when
analysing \code{cca} results.
Partial models were not correctly handled in \code{intersetcor}.
\item \code{envfit} and \code{ordisurf} functions failed when
applied to species scores.
\item Non-standard variable names can be used within
\code{Condition()} in partial ordination. Partial models are used
internally within several functions, and a problem was reported by
Albin Meyer (Univ Lorraine, Metz, France) in \code{ordiR2step}
when using a variable name that contained a hyphen (which was
wrongly interpreted as a minus sign in partial ordination).
\item \code{ordispider} did not pass graphical arguments when
used to show the difference of LC and WA scores in constrained
ordination.
\item \code{ordiR2step} uses only \code{forward} selection to
avoid several problems in model evaluation.
\item \code{tolerance} function could return \code{NaN} in some
cases when it should have returned \eqn{0}. Partial models were
not correctly analysed. Misleading (non-zero) tolerances were
sometimes given for species that occurred only once or sampling
units that had only one species.
} %itemize
} % bug fixes
} % 2.4-3
\section{Changes in version 2.4-2}{
\subsection{BUG FIXES}{
\itemize{
\item Permutation tests (\code{permutests}, \code{anova}) for the
first axis failed in constrained distance-based ordination
(\code{dbrda}, \code{capscale}). Now \code{capscale} will also
throw away negative eigenvalues when first eigenvalues are
tested. All permutation tests for the first axis are now
faster. The problem was reported by Cleo Tebby and the fixes are
discussed in GitHub issue
\href{https://github.com/vegandevs/vegan/issues/198}{#198} and
pull request
\href{https://github.com/vegandevs/vegan/pull/199}{#199}.
\item Some support functions for \code{dbrda} or \code{capscale}
gave results or some of their components in wrong scale. Fixes in
\code{stressplot}, \code{simulate}, \code{predict} and
\code{fitted} functions.
\item \code{intersetcor} did not use correct weighting for
\code{cca} and the results were slightly off.
\item \code{anova} and \code{permutest} failed when
\code{betadisper} was fitted with argument
\code{bias.adjust = TRUE}. Fixes Github issue
\href{https://github.com/vegandevs/vegan/issues/219}{#219}
reported by Ross Cunning, O'ahu, Hawaii.
\item \code{ordicluster} should return invisibly only the
coordinates of internal points (where clusters or points are
joined), but last rows contained coordinates of external points
(ordination scores of points).
\item The \code{cca} method of \code{tolerance} was returning
incorrect values for all but the second axis for sample
heterogeneities and species tolerances. See issue
\href{https://github.com/vegandevs/vegan/issues/216}{#216} for
details.
} %itemize
} % bug fixes
\subsection{NEW FEATURES}{
\itemize{
\item Biplot scores are scaled similarly as site scores in
constrained ordination methods \code{cca}, \code{rda},
\code{capscale} and \code{dbrda}. Earlier they were unscaled (or
more technically, had equal scaling on all axes).
\item \code{tabasco} adds argument to \code{scale} the colours
by rows or columns in addition to the old equal scale over the
whole plot. New arguments \code{labRow} and \code{labCex} can be
used to change the column or row labels. Function also takes
care that only above-zero observations are coloured: earlier
tiny observed values were merged to zeros and were not distinct
in the plots.
\item Sequential null models are somewhat faster (up to
10\%). Non-sequential null models may be marginally faster. These
null models are generated by function \code{nullmodel} and also
used in \code{oecosimu}.
\item \code{vegdist} is much faster. It used to be clearly slower
than \code{stats::dist}, but now it is nearly equally fast for the
same dissimilarity measure.
\item Handling of \code{data=} in formula interface is more
robust, and messages on user errors are improved. This fixes
points raised in Github issue
\href{https://github.com/vegandevs/vegan/issues/200}{#200}.
\item The families and orders in \code{dune.taxon} were updated to
APG IV (\emph{Bot J Linnean Soc} \strong{181,} 1--20; 2016) and a
corresponding classification for higher levels (Chase & Reveal,
\emph{Bot J Linnean Soc} \strong{161,} 122-127; 2009).
} %itemize
} % features
} % 2.4-2
\section{Changes in version 2.4-1}{
\subsection{INSTALLATION}{
\itemize{
\item Fortran code was modernized to avoid warnings in latest
\R. The modernization should have no visible effect in
functions. Please report all suspect cases as
\href{https://github.com/vegandevs/vegan/issues/}{vegan issues}.
} %itemize
} % installation
\subsection{BUG FIXES}{
\itemize{
\item Several support functions for ordination methods failed if
the solution had only one ordination axis, for instance, if
there was only one constraining variable in CCA, RDA and
friends. This concerned \code{goodness} for constrained
ordination, \code{inertcomp}, \code{fitted} for
\code{capscale}, \code{stressplot} for RDA, CCA (GitHub issue
\href{https://github.com/vegandevs/vegan/issues/189}{#189}).
\item \code{goodness} for CCA & friends ignored \code{choices}
argument (GitHub issue
\href{https://github.com/vegandevs/vegan/issues/190}{#190}).
\item \code{goodness} function did not consider negative
eigenvalues of db-RDA (function \code{dbrda}).
\item Function \code{meandist} failed in some cases when one of
the groups had only one observation.
\item \code{linestack} could not handle expressions in
\code{labels}. This regression is discussed in GitHub issue
\href{https://github.com/vegandevs/vegan/issues/195}{#195}.
\item Nestedness measures \code{nestedbetajac} and
\code{nestedbetasor} expecting binary data did not cope with
quantitative input in evaluating Baselga's matrix-wide Jaccard
or Sørensen dissimilarity indices.
\item Function \code{as.mcmc} to cast \code{oecosimu} result to an
MCMC object (\pkg{coda} package) failed if there was only one
chain.
} % itemize
} % bug fixes
\subsection{NEW FEATURES}{
\itemize{
\item \code{diversity} function returns now \code{NA} if the
observation had \code{NA} values instead of returning
\code{0}. The function also checks the input and refuses to
handle data with negative values. GitHub issue
\href{https://github.com/vegandevs/vegan/issues/187}{#187}.
\item \code{rarefy} function will work more robustly in marginal
case when the user asks for only one individual which can only
be one species with zero variance.
\item Several functions are more robust if their factor arguments
contain missing values (\code{NA}): \code{betadisper},
\code{adipart}, \code{multipart}, \code{hiersimu}, \code{envfit}
and constrained ordination methods \code{cca}, \code{rda},
\code{capscale} and \code{dbrda}. GitHub issues
\href{https://github.com/vegandevs/vegan/issues/192}{#192} and
\href{https://github.com/vegandevs/vegan/issues/193}{#193}.
} % itemize
} % new features
} % 2.4-1
\section{Changes in version 2.4-0}{
\subsection{DISTANCE-BASED ANALYSIS}{
\itemize{
\item Distance-based methods were redesigned and made
consistent for ordination (\code{capscale}, new \code{dbrda}),
permutational ANOVA (\code{adonis}, new \code{adonis2}),
multivariate dispersion (\code{betadisper}) and variation
partitioning (\code{varpart}). These methods can produce
negative eigenvalues with several popular semimetric
dissimilarity indices, and they were not handled similarly by
all functions. Now all functions are designed after McArdle &
Anderson (\emph{Ecology} 82, 290--297; 2001).
\item \code{dbrda} is a new function for distance-based
Redundancy Analysis following McArdle & Anderson
(\emph{Ecology} 82, 290--297; 2001). With metric
dissimilarities, the function is equivalent to old
\code{capscale}, but negative eigenvalues of semimetric indices
are handled differently. In \code{dbrda} the dissimilarities
are decomposed directly into conditions, constraints and
residuals with their negative eigenvalues, and any of the
components can have imaginary dimensions. Function is mostly
compatible with \code{capscale} and other constrained
ordination methods, but full compatibility cannot be achieved
(see issue
\href{https://github.com/vegandevs/vegan/issues/140}{#140} in
Github). The function is based on the code by Pierre Legendre.
\item The old \code{capscale} function for constrained
ordination is still based only on real components, but the
total inertia of the components is assessed similarly as in
\code{dbrda}.
The significance tests will differ from the previous version,
but function \code{oldCapscale} will cast the \code{capscale}
result to a similar form as previously.
\item \code{adonis2} is a new function for permutational ANOVA
of dissimilarities. It is based on the same algorithm as the
\code{dbrda}. The function can perform overall tests of all
independent variables as well as sequential and marginal tests
of each term. The old \code{adonis} is still available, but it
can only perform sequential tests. With same settings,
\code{adonis} and \code{adonis2} give identical results (but
see Github issue
\href{https://github.com/vegandevs/vegan/issues/156}{#156} for
differences).
\item Function \code{varpart} can partition dissimilarities
using the same algorithm as \code{dbrda}.
\item Argument \code{sqrt.dist} takes square roots of
dissimilarities and these can change many popular semimetric
indices to metric distances in \code{capscale}, \code{dbrda},
\code{wcmdscale}, \code{adonis2}, \code{varpart} and
\code{betadisper} (issue
\href{https://github.com/vegandevs/vegan/issues/179}{#179} in
Github).
\item Lingoes and Cailliez adjustments change any dissimilarity
into metric distance in \code{capscale}, \code{dbrda},
\code{adonis2}, \code{varpart}, \code{betadisper} and
\code{wcmdscale}. Earlier we had only Cailliez adjustment in
\code{capscale} (issue
\href{https://github.com/vegandevs/vegan/issues/179}{#179} in
Github).
\item \code{RsquareAdj} works with \code{capscale} and
\code{dbrda} and this allows using \code{ordiR2step} in model
building.
} % itemize
} % distance-based
\subsection{BUG FIXES}{
\itemize{
\item \code{specaccum}: \code{plot} failed if line type
(\code{lty}) was given. Reported by Lila Nath Sharma (Univ
Bergen, Norway)
} %itemize
} %bug fixes
\subsection{NEW FUNCTIONS}{
\itemize{
\item \code{ordibar} is a new function to draw crosses of
standard deviations or standard errors in ordination diagrams
instead of corresponding ellipses.
\item Several \code{permustats} results can be combined with a
new \code{c()} function.
\item New function \code{smbind} binds together null models by
row, column or replication. If sequential models are bound
together, they can be treated as parallel chains in subsequent
analysis (e.g., after \code{as.mcmc}). See issue
\href{https://github.com/vegandevs/vegan/issues/164}{#164} in
Github.
} %itemize
} % new functions
\subsection{NEW FEATURES}{
\itemize{
\item Null model analysis was upgraded:
New \code{"curveball"} algorithm provides a fast null model with
fixed row and column sums for binary matrices after Strona et
al. (\emph{Nature Commun.} 5: 4114; 2014).
The \code{"quasiswap"} algorithm gained argument \code{thin}
which can reduce the bias of null models.
\code{"backtracking"} is now much faster, but it is still very
slow, and provided mainly to allow comparison against better and
faster methods.
Compiled code can now be interrupted in null model simulations.
\item \code{designdist} can now use beta diversity notation
(\code{gamma}, \code{alpha}) for easier definition of beta
diversity indices.
\item \code{metaMDS} has new iteration strategy: Argument
\code{try} gives the minimum number of random starts, and
\code{trymax} the maximum number. Earlier we only hand
\code{try} which gave the maximum number, but now we run at
least \code{try} times. This reduces the risk of being trapped
in a local optimum (issue
\href{https://github.com/vegandevs/vegan/issues/154}{#154} in
Github).
If there were no convergent solutions, \code{metaMDS} will now
tabulate stopping criteria (if \code{trace = TRUE}). This can
help in deciding if any of the criteria should be made more
stringent or the number of iterations increased. The
documentation for \code{monoMDS} and \code{metaMDS} give more
detailed information on convergence criteria.
\item The \code{summary} of \code{permustats} prints now
\emph{P}-values, and the test direction (\code{alternative}) can
be changed.
The \code{qqmath} function of \code{permustats} can now plot
standardized statistics. This is a partial solution to issue
\href{https://github.com/vegandevs/vegan/issues/172}{#172} in
Github.
\item \code{MDSrotate} can rotate ordination to show maximum
separation of factor levels (classes) using linear discriminant
analysis (\code{lda} in \pkg{MASS} package).
\item \code{adipart}, \code{hiersimu} and \code{multipart}
expose argument \code{method} to specify the null model.
\item \code{RsquareAdj} works with \code{cca} and this allows
using \code{ordiR2step} in model building. The code was
developed by Dan McGlinn (issue
\href{https://github.com/vegandevs/vegan/issues/161}{#161} in
Github). However, \code{cca} still cannot be used in
\code{varpart}.
\item \code{ordiellipse} and \code{ordihull} allow setting
colours, line types and other graphical parameters.
The alpha channel can now be given also as a real number in 0 \dots 1
in addition to integer 0 \dots 255.
\item \code{ordiellipse} can now draw ellipsoid hulls that
enclose points in a group.
\item \code{ordicluster}, \code{ordisegments}, \code{ordispider}
and \code{lines} and \code{plot} functions for \code{isomap} and
\code{spantree} can use a mixture of colours of connected
points. Their behaviour is similar as in analogous functions in
the the \pkg{vegan3d} package.
\item \code{plot} of \code{betadisper} is more configurable. See
issues
\href{https://github.com/vegandevs/vegan/issues/128}{#128} and
\href{https://github.com/vegandevs/vegan/issues/166}{#166} in
Github for details.
\item \code{text} and \code{points} methods for
\code{orditkplot} respect stored graphical parameters.
\item Environmental data for the Barro Colorado Island forest
plots gained new variables from Harms et al. (\emph{J. Ecol.} 89,
947--959; 2001). Issue
\href{https://github.com/vegandevs/vegan/issues/178}{#178} in
Github.
} %itemize
} % features
\subsection{DEPRECATED AND DEFUNCT}{
\itemize{
\item Function \code{metaMDSrotate} was removed and replaced
with \code{MDSrotate}.
\item \code{density} and \code{densityplot} methods for
various \pkg{vegan} objects were deprecated and replaced with
\code{density} and \code{densityplot} for \code{permustats}.
Function \code{permustats} can extract the permutation and
simulation results of \pkg{vegan} result objects.
} %itemize
} % deprecated & defunct
} % v2.4-0
\section{Changes in version 2.3-5}{
\subsection{BUG FIXES}{
\itemize{
\item \code{eigenvals} fails with \code{prcomp} results in
\R-devel. The next version of \code{prcomp} will have an
argument to limit the number of eigenvalues shown
(\code{rank.}), and this breaks \code{eigenvals} in \pkg{vegan}.
\item \code{calibrate} failed for \code{cca} and friends if
\code{rank} was given.
} % itemise
} % bug fixes
} % v2.3-5
\section{Changes in version 2.3-4}{
\subsection{BUG FIXES}{
\itemize{
\item \code{betadiver} index \code{19} had wrong sign in one of
its terms.
\item \code{linestack} failed when the \code{labels} were given,
but the input scores had no names. Reported by Jeff Wood (ANU,
Canberra, ACT).
} %itemize
} % bug fixes
\subsection{DEPRECATED}{
\itemize{
\item \code{vegandocs} is deprecated. Current \R{} provides better
tools for seeing extra documentation (\code{news()} and
\code{browseVignettes()}).
} %itemize
} %deprecated
\subsection{VIGNETTES}{
\itemize{
\item All vignettes are built with standard \R{} tools and can be
browsed with \code{browseVignettes}. \code{FAQ-vegan} and
\code{partitioning} were only accessible with \code{vegandocs}
function.
} %itemize
} %vignettes
\subsection{BUILDING}{
\itemize{
\item Dependence on external software \code{texi2dvi} was
removed. Version 6.1 of \code{texi2dvi} was incompatible with \R{}
and prevented building \pkg{vegan}. The \code{FAQ-vegan} that was
earlier built with \code{texi2dvi} uses now \pkg{knitr}. Because
of this, \pkg{vegan} is now dependent on \R-3.0.0. Fixes issue
\href{https://github.com/vegandevs/vegan/issues/158}{#158} in
Github.
} %itemize
} % building
} % v2.3-4
\section{Changes in version 2.3-3}{
\subsection{BUG FIXES}{
\itemize{
\item \code{metaMDS} and \code{monoMDS} could fail if input
dissimilarities were huge: in the reported case they were of
magnitude 1E85. Fixes issue
\href{https://github.com/vegandevs/vegan/issues/152}{#152} in
Github.
\item Permutations failed if they were defined as \pkg{permute}
control structures in \code{estaccum}, \code{ordiareatest},
\code{renyiaccum} and \code{tsallisaccum}. Reported by Dan
Gafta (Cluj-Napoca) for \code{renyiaccum}.
\item \code{rarefy} gave false warnings if input was a vector
or a single sampling unit.
\item Some extrapolated richness indices in \code{specpool}
needed the number of doubletons (= number of species occurring
in two sampling units), and these failed when only one sampling
unit was supplied. The extrapolated richness cannot be
estimated from a single sampling unit, but now such cases are
handled smoothly instead of failing: observed non-extrapolated
richness with zero standard error will be reported. The issue
was reported in
\href{http://stackoverflow.com/questions/34027496/error-message-when-using-specpool-in-vegan-package}{StackOverflow}.
} %itemize
} % bug fixes
\subsection{NEW FEATURES}{
\itemize{
\item \code{treedist} and \code{treedive} refuse to handle
trees with reversals, i.e, higher levels are more homogeneous
than lower levels. Function \code{treeheight} will estimate
their total height with absolute values of branch
lengths. Function \code{treedive} refuses to handle trees with
negative branch heights indicating negative
dissimilarities. Function \code{treedive} is faster.
\item \code{gdispweight} works when input data are in a matrix
instead of a data frame.
\item Input dissimilarities supplied in symmetric matrices or
data frames are more robustly recognized by \code{anosim},
\code{bioenv} and \code{mrpp}.
} %itemize
} %new features
} %v2.3-3
\section{Changes in version 2.3-2}{
\subsection{BUG FIXES}{
\itemize{
\item Printing details of a gridded permutation design would fail
when the grid was at the within-plot level.
\item \code{ordicluster} joined the branches at wrong coordinates
in some cases.
\item \code{ordiellipse} ignored weights when calculating standard
errors (\code{kind = "se"}). This influenced plots of \code{cca},
and also influenced \code{ordiareatest}.
} % itemize
} % bug fixes
\subsection{NEW FEATURES}{
\itemize{
\item \code{adonis} and \code{capscale} functions recognize
symmetric square matrices as dissimilarities. Formerly
dissimilarities had to be given as \code{"dist"} objects such as
produced by \code{dist} or \code{vegdist} functions, and data
frames and matrices were regarded as observations x variables
data which could confuse users (e.g., issue
\href{https://github.com/vegandevs/vegan/issues/147}{#147}).
\item \code{mso} accepts \code{"dist"} objects for the distances
among locations as an alternative to coordinates of locations.
\item \code{text}, \code{points} and \code{lines} functions for
\code{procrustes} analysis gained new argument \code{truemean}
which allows adding \code{procrustes} items to the plots of
original analysis.
\item \code{rrarefy} returns observed non-rarefied communities
(with a warning) when users request subsamples that are larger
than the observed community instead of failing. Function
\code{drarefy} has been similar and returned sampling
probabilities of 1, but now it also issues a warning. Fixes issue
\href{https://github.com/vegandevs/vegan/issues/144}{#144} in
Github.
} % itemize
} % new features
} %v2.3-2
\section{Changes in version 2.3-1}{
\subsection{BUG FIXES}{
\itemize{
\item Permutation tests did not always correctly recognize ties
with the observed statistic and this could result in too low
\eqn{P}-values. This would happen in particular when all predictor
variables were factors (classes). The changes concern functions
\code{adonis}, \code{anosim}, \code{anova} and \code{permutest}
functions for \code{cca}, \code{rda} and \code{capscale},
\code{permutest} for \code{betadisper}, \code{envfit},
\code{mantel} and \code{mantel.partial}, \code{mrpp}, \code{mso},
\code{oecosimu}, \code{ordiareatest}, \code{protest} and
\code{simper}. This also fixes issues
\href{https://github.com/vegandevs/vegan/issues/120}{#120} and
\href{https://github.com/vegandevs/vegan/issues/132}{#132} in
GitHub.
\item Automated model building in constrained ordination
(\code{cca}, \code{rda}, \code{capscale}) with \code{step},
\code{ordistep} and \code{ordiR2step} could fail if there were
aliased candidate variables, or constraints that were completely
explained by other variables already in the model. This was a
regression introduced in \pkg{vegan} 2.2-0.
\item Constrained ordination methods \code{cca}, \code{rda} and
\code{capscale} treat character variables as factors in analysis,
but did not return their centroids for plotting.
\item Recovery of original data in \code{metaMDS} when computing
WA scores for species would fail if the expression supplied to
argument \code{comm} was long & got deparsed to multiple
strings. \code{metaMDSdist} now returns the (possibly modified)
data frame of community data \code{comm} as attribute
\code{"comm"} of the returned \code{dist} object. \code{metaMDS}
now uses this to compute the WA species scores for the NMDS. In
addition, the deparsed expression for \code{comm} is now robust to
long expressions. Reported by Richard Telford.
\item \code{metaMDS} and \code{monoMDS} rejected dissimilarities
with missing values.
\item Function \code{rarecurve} did not check its input and this
could cause confusing error messages. Now function checks that
input data are integers that can be interpreted as counts on
individuals and all sampling units have some species. Unchecked
bad inputs were the reason for problems reported in
\href{http://stackoverflow.com/questions/30856909/error-while-using-rarecurve-in-r}{Stackoverflow}.
}
} % bug fixes
\subsection{NEW FEATURES AND FUNCTIONS}{
\itemize{
\item Scaling of ordination axes in \code{cca}, \code{rda} and
\code{capscale} can now be expressed with descriptive strings
\code{"none"}, \code{"sites"}, \code{"species"} or
\code{"symmetric"} to tell which kind of scores should be scaled by
eigenvalues. These can be further modified with arguments
\code{hill} in \code{cca} and \code{correlation} in \code{rda}. The
old numeric scaling can still be used.
\item The permutation data can be extracted from \code{anova}
results of constrained ordination (\code{cca}, \code{rda},
\code{capscale}) and further analysed with \code{permustats}
function.
\item New data set \code{BCI.env} of site information for the Barro
Colorado Island tree community data. Most useful variables are the
UTM coordinates of sample plots. Other variables are constant or
nearly constant and of little use in normal analysis.
}
} % new features and functions
}
\section{Changes in version 2.3-0}{
\subsection{BUG FIXES}{
\itemize{
\item Constrained ordination functions \code{cca}, \code{rda} and
\code{capscale} are now more robust. Scoping of data set names and
variable names is much improved. This should fix numerous
long-standing problems, for instance those reported by Benedicte
Bachelot (in email) and Richard Telford (in Twitter), as well as
issues \href{https://github.com/vegandevs/vegan/issues/16}{#16}
and \href{https://github.com/vegandevs/vegan/issues/100}{#100} in
GitHub.
\item Ordination functions \code{cca} and \code{rda} silently
accepted dissimilarities as input although their analysis makes
no sense with these methods. Dissimilarities should be analysed
with distance-based redundancy analysis (\code{capscale}).
\item The variance of the conditional component was over-estimated
in \code{goodness} of \code{rda} results, and results were wrong
for partial RDA. The problems were reported in an
\href{https://stat.ethz.ch/pipermail/r-sig-ecology/2015-March/004936.html}{R-sig-ecology}
message by Christoph von Redwitz.
}
} % bug fixes
\subsection{WINDOWS}{
\itemize{
\item \code{orditkplot} did not add file type identifier to saved
graphics in Windows although that is required. The problem only
concerned Windows OS.
}
} % windows
\subsection{NEW FEATURES AND FUNCTIONS}{
\itemize{
\item \code{goodness} function for constrained ordination
(\code{cca}, \code{rda}, \code{capscale}) was redesigned. Function
gained argument \code{addprevious} to add the variation explained
by previous ordination components to axes when \code{statistic =
"explained"}. With this option, \code{model = "CCA"} will include
the variation explained by partialled-out conditions, and
\code{model = "CA"} will include the accumulated variation
explained by conditions and constraints. The former behaviour was
\code{addprevious = TRUE} for \code{model = "CCA"}, and
\code{addprevious = FALSE} for \code{model = "CA"}. The argument
will have no effect when \code{statistic = "distance"}, but this
will always show the residual distance after all previous
components. Formerly it displayed the residual distance only for
the currently analysed model.
\item Functions \code{ordiArrowMul} and \code{ordiArrowTextXY} are
exported and can be used in normal interactive sessions. These
functions are used to scale a bunch arrows to fit ordination
graphics, and formerly they were internal functions used within
other \pkg{vegan} functions.
\item \code{orditkplot} can export graphics in SVG format. SVG is
a vector graphics format which can be edited with several external
programs, such as Illustrator and Inkscape.
\item Rarefaction curve (\code{rarecurve}) and species
accumulation models (\code{specaccum}, \code{fitspecaccum})
gained new functions to estimate the slope of curve at given
location. Originally this was based on a response to an
\href{https://stat.ethz.ch/pipermail/r-sig-ecology/2015-May/005038.html}{R-SIG-ecology}
query. For rarefaction curves, the function is \code{rareslope},
and for species accumulation models it is \code{specslope}.
The functions are based on analytic equations, and can also be
evaluated at interpolated non-integer values. In
\code{specaccum} models the functions can be only evaluated for
analytic models \code{"exact"}, \code{"rarefaction"} and
\code{"coleman"}. With \code{"random"} and \code{"collector"}
methods you can only use finite differences
(\code{diff(fitted(<result.object>))}). Analytic functions for
slope are used for all non-linear regression models known to
\code{fitspecaccum}.
\item Species accumulation models (\code{specaccum}) and
non-liner regression models for species accumulation
(\code{fitspecaccum}) work more consistently with weights. In
all cases, the models are defined using the number of sites as
independent variable, which with weights means that observations
can be non-integer numbers of virtual sites. The \code{predict}
models also use the number of sites with \code{newdata},
and for analytic models they can estimate the expected values
for non-integer number of sites, and for non-analytic randomized
or collector models they can interpolate on non-integer values.
\item \code{fitspecaccum} gained support functions \code{AIC}
and \code{deviance}.
\item The \code{varpart} plots of four-component models were
redesigned following Legendre, Borcard & Roberts \emph{Ecology}
93, 1234--1240 (2012), and they use now four ellipses instead of
three circles and two rectangles. The components are now labelled
in plots, and the circles and ellipses can be easily filled with
transparent background colour.
}
} % new features
} % v2.2-2
\section{Changes in version 2.2-1}{
\subsection{GENERAL}{
\itemize{
\item This is a maintenance release to avoid warning messages
caused by changes in CRAN repository. The namespace usage is also
more stringent to avoid warnings and notes in development versions
of \R.
}
}% end general
\subsection{INSTALLATION}{
\itemize{
\item \pkg{vegan} can be installed and loaded without \pkg{tcltk}
package. The \pkg{tcltk} package is needed in \code{orditkplot}
function for interactive editing of ordination graphics.
}
} % installation
\subsection{BUG FIXES}{
\itemize{
\item \code{ordisurf} failed if \pkg{gam} package was loaded due
to namespace issues: some support functions of \pkg{gam} were used
instead of \pkg{mgcv} functions.
\item \code{tolerance} function failed for unconstrained
correspondence analysis.
}
} % bug fixes
\subsection{NEW FEATURES}{
\itemize{
\item \code{estimateR} uses a more exact variance formula for
bias-corrected Chao estimate of extrapolated number of
species. The new formula may be unpublished, but it was derived
following the guidelines of Chiu, Wang, Walther & Chao,
\emph{Biometrics} 70, 671--682 (2014),
\href{http://onlinelibrary.wiley.com/doi/10.1111/biom.12200/suppinfo}{online
supplementary material}.
\item Diversity accumulation functions \code{specaccum},
\code{renyiaccum}, \code{tsallisaccum}, \code{poolaccum} and
\code{estaccumR} use now \pkg{permute} package for permutations
of the order of sampling sites. Normally these functions only
need simple random permutation of sites, but restricted
permutation of the \pkg{permute} package and user-supplied
permutation matrices can be used.
\item \code{estaccumR} function can use parallel processing.
\item \code{linestack} accepts now expressions as labels. This
allows using mathematical symbols and formula given as
mathematical expressions.
}
} % new features
} % v2.2-1
\section{Changes in version 2.2-0}{
\subsection{GENERAL}{
\itemize{
\item Several \pkg{vegan} functions can now use parallel
processing for slow and repeating calculations. All these
functions have argument \code{parallel}. The argument can be an
integer giving the number of parallel processes. In unix-alikes
(Mac OS, Linux) this will launch \code{"multicore"} processing
and in Windows it will set up \code{"snow"} clusters as desribed
in the documentation of the \pkg{parallel} package. If \code{option}
\code{"mc.cores"} is set to an integer > 1, this will be used to
automatically start parallel processing. Finally, the argument
can also be a previously set up \code{"snow"} cluster which will
be used both in Windows and in unix-alikes. \pkg{Vegan} vignette
on Design decision explains the implementation (use
\code{vegandocs("decission")}, and \pkg{parallel} package has more
extensive documentation on parallel processing in \R.
The following function use parallel processing in analysing
permutation statistics: \code{adonis}, \code{anosim},
\code{anova.cca} (and \code{permutest.cca}), \code{mantel} (and
\code{mantel.partial}), \code{mrpp}, \code{ordiareatest},
\code{permutest.betadisper} and \code{simper}. In addition,
\code{bioenv} can compare several candidate sets of models in
paralle, \code{metaMDS} can launch several random starts in
parallel, and \code{oecosimu} can evaluate test statistics for
several null models in parallel.
\item All permutation tests are based on the \pkg{permute} package
which offers strong tools for restricted permutation. All these
functions have argument \code{permutations}. The default usage of
simple non-restricted permutations is achieved by giving a single
integer number. Restricted permutations can be defined using the
\code{how} function of the \pkg{permute} package. Finally, the
argument can be a permutation matrix where rows define
permutations. It is possible to use external or user constructed
permutations.
See \code{help(permutations)} for a brief introduction on
permutations in \pkg{vegan}, and \pkg{permute} package for the
full documention. The vignette of the \pkg{permute} package can
be read from \pkg{vegan} with command
\code{vegandocs("permutations")}.
The following functions use the \pkg{permute} package:
\code{CCorA}, \code{adonis}, \code{anosim}, \code{anova.cca} (plus
associated \code{permutest.cca}, \code{add1.cca},
\code{drop1.cca}, \code{ordistep}, \code{ordiR2step}),
\code{envfit} (plus associated \code{factorfit} and
\code{vectorfit}), \code{mantel} (and \code{mantel.partial}),
\code{mrpp}, \code{mso}, \code{ordiareatest},
\code{permutest.betadisper}, \code{protest} and \code{simper}.
\item Community null model generation has been completely
redesigned and rewritten. The communities are constructed with
new \code{nullmodel} function and defined in a low level
\code{commsim} function. The actual null models are generated
with a \code{simulate} function that builds an array of null
models. The new null models include a wide array of quantitative
models in addition to the old binary models, and users can plug
in their own generating functions. The basic tool invoking and
analysing null models is \code{oecosimu}. The null models are
often used only for the analysis of nestedness, but the
implementation in \code{oecosimu} allows analysing any
statistic, and null models are better seen as an alternative to
permutation tests.
} %end itemize
} % end general
\subsection{INSTALLATION}{
\itemize{
\item \pkg{vegan} package dependencies and namespace imports
were adapted to changes in \R, and no more trigger warnings and
notes in package tests.
\item Three-dimensional ordination graphics using
\pkg{scatterplot3d} for static plots and \pkg{rgl} for dynamic
plots were removed from \pkg{vegan} and moved to a companion
package \pkg{vegan3d}. The package is available in CRAN.
} %end itemize
} % end installation
\subsection{NEW FUNCTIONS}{
\itemize{
\item Function \code{dispweight} implements dispersion weighting
of Clarke et al. (\emph{Marine Ecology Progress Series}, 320,
11--27). In addition, we implemented a new method for
generalized dispersion weighting \code{gdispweight}. Both
methods downweight species that are significantly
over-dispersed.
\item New \code{hclust} support functions \code{reorder},
\code{rev} and \code{scores}. Functions \code{reorder} and
\code{rev} are similar as these functions for \code{dendrogram}
objects in base \R. However, \code{reorder} can use (and defaults
to) weighted mean. In weighted mean the node average is always the
mean of member leaves, whereas the \code{dendrogram} uses always
unweighted means of joined branches.
\item Function \code{ordiareatest} supplements \code{ordihull} and
\code{ordiellipse} and provides a randomization test for the
one-sided alternative hypothesis that convex hulls or ellipses in
two-dimensional ordination space have smaller areas than with
randomized groups.
\item Function \code{permustats} extracts and inspects permutation
results with support functions \code{summary}, \code{density},
\code{densityplot}, \code{qqnorm} and \code{qqmath}. The
\code{density} and \code{qqnorm} are standard \R{} tools that only
work with one statistic, and \code{densityplot} and \code{qqmath}
are \pkg{lattice} graphics that work with univariate and
multivariate statistics. The results of following functions can be
extracted: \code{anosim}, \code{adonis}, \code{mantel} (and
\code{mantel.partial}), \code{mrpp}, \code{oecosimu},
\code{permustest.cca} (but not the corresponding \code{anova}
methods), \code{permutest.betadisper}, and \code{protest}.
\item \code{stressplot} functions display the ordination distances
at given number of dimensions against original distances. The
method functins are similar to \code{stressplot} for
\code{metaMDS}, and always use the inherent distances of each
ordination method. The functions are available for the results
\code{capscale}, \code{cca}, \code{princomp}, \code{prcomp},
\code{rda}, and \code{wcmdscale}.
} % end itemize
} % end new functions
\subsection{BUG FIXES}{
\itemize{
\item \code{cascadeKM} of only one group will be \code{NA} instead
of a random value.
\item \code{ordiellipse} can handle points exactly on a line,
including only two points (with a warning).
\item plotting \code{radfit} results for several species failed if
any of the communities had no species or had only one species.
\item \code{RsquareAdj} for \code{capscale} with negative
eigenvalues will now report \code{NA} instead of using biased
method of \code{rda} results.
\item \code{simper} failed when a group had only a single member.
}% end itemize
} % end bug fixes
\subsection{NEW FEATURES}{
\itemize{
\item \code{anova.cca} functions were re-written to use the
\pkg{permute} package. Old results may not be exactly
reproduced, and models with missing data may fail in several
cases. There is a new option of analysing a sequence of models
against each other.
\item \code{simulate} functions for \code{cca} and \code{rda}
can return several simulations in a \code{nullmodel} compatible
object. The functions can produce simulations with correlated
errors (also for \code{capscale}) in parametric simulation with
Gaussian error.
\item \code{bioenv} can use Manhattan, Gower and Mahalanobis
distances in addition to the default Euclidean. New helper
function \code{bioenvdist} can extract the dissimilarities
applied in best model or any other model.
\item \code{metaMDS(..., trace = 2)} will show convergence
information with the default \code{monoMDS} engine.
\item Function \code{MDSrotate} can rotate a \eqn{k}-dimensional
ordination to \eqn{k-1} variables. When these variables are
correlated (like usually is the case), the vectors can also be
correlated to previously rotated dimensions, but will be
uncorrelated to all later ones.
\item \pkg{vegan} 2.0-10 changed the weighted \code{nestednodf}
so that weighted analysis of binary data was equivalent to
binary analysis. However, this broke the equivalence to the
original method. Now the function has an argument \code{wbinary}
to select the method of analysis. The problem was reported and a
fix submitted by Vanderlei Debastiani (Universidade Federal do
Rio Grande do Sul, Brasil).
\item \code{ordiellipse}, \code{ordihull} and \code{ordiellipse}
can handle missing values in \code{groups}.
\item \code{ordispider} can now use spatial medians instead of
means.
\item \code{rankindex} can use Manhattan, Gower and Mahalanobis
distance in addition to the default Euclidean.
\item User can set colours and line types in function
\code{rarecurve} for plotting rarefaction curves.
\item \code{spantree} gained a support function \code{as.hclust}
to change the minimum spanning tree into an \code{hclust} tree.
\item \code{fitspecaccum} can do weighted analysis. Gained
\code{lines} method.
\item Functions for extrapolated number of species or for the size
of species pool using Chao method were modified following Chiu et
al., \emph{Biometrics} 70, 671--682 (2014).
Incidence based \code{specpool} can now use (and defaults to)
small sample correction with number of sites as the sample
size. Function uses basic Chao extrapolation based on the ratio of
singletons and doubletons, but switches now to bias corrected Chao
extrapolation if there are no doubletons (species found
twice). The variance formula for bias corrected Chao was derived
following the supporting
\href{http://onlinelibrary.wiley.com/doi/10.1111/biom.12200/suppinfo}{online material}
and differs slightly from Chiu et al. (2014).
The \code{poolaccum} function was changed similarly, but the small
sample correction is used always.
The abundance based \code{estimateR} uses bias corrected Chao
extrapolation, but earlier it estimated its variance with classic
Chao model. Now we use the widespread
\href{http://viceroy.eeb.uconn.edu/EstimateS/EstimateSPages/EstSUsersGuide/EstimateSUsersGuide.htm#AppendixB}{approximate
equation} for variance.
With these changes these functions are more similar to
\href{http://viceroy.eeb.uconn.edu/EstimateS/EstimateSPages/EstSUsersGuide/EstimateSUsersGuide.htm#AppendixB}{EstimateS}.
\item \code{tabasco} uses now \code{reorder.hclust} for
\code{hclust} object for better ordering than previously when it
cast trees to \code{dendrogram} objects.
\item \code{treedive} and \code{treedist} default now to
\code{match.force = TRUE} and can be silenced with
\code{verbose = FALSE}.
\item \code{vegdist} gained Mahalanobis distance.
\item Nomenclature updated in plant community data with the help
of \pkg{Taxonstand} and \pkg{taxize} packages. The taxonomy of
the \code{dune} data was adapted to the same sources and APG
III. \code{varespec} and \code{dune} use 8-character names (4
from genus + 4 from species epithet). New data set on
phylogenetic distances for \code{dune} was extracted from Zanne
et al. (\emph{Nature} 506, 89--92; 2014).
\item User configurable plots for \code{rarecurve}.
} %end itemize
} % end new featuresq
\subsection{DEPRECATED AND DEFUNCT}{
\itemize{
\item \code{strata} are deprecated in permutations. It is still
accepted but will be phased out in next releases. Use \code{how}
of \pkg{permute} package.
\item \code{cca}, \code{rda} and \code{capscale} do not return
scores scaled by eigenvalues: use \code{scores} function to
extract scaled results.
\item \code{commsimulator} is deprecated. Replace
\code{commsimulator(x, method)} with
\code{simulate(nullmodel(x, method))}.
\item \code{density} and \code{densityplot} for permutation
results are deprecated: use \code{permustats} with its
\code{density} and \code{densityplot} method.
} %end itemize
} % end deprecated
} % end version 2.2-0
\section{Changes in version 2.0-10}{
\subsection{GENERAL}{
\itemize{
\item This version is adapted to the changes in \pkg{permute}
package version 0.8-0 and no more triggers NOTEs in package
checks. This release may be the last of the 2.0 series, and the
next \pkg{vegan} release is scheduled to be a major release with
newly designed \code{oecosimu} and community pattern simulation,
support for parallel processing, and full support of the
\pkg{permute} package. If you are interested in these
developments, you may try the development versions of
\pkg{vegan} in
\href{http://r-forge.r-project.org/projects/vegan/}{R-Forge} or
\href{https://github.com/jarioksa/vegan}{GitHub} and report the
problems and user experience to us. } } % end general
\subsection{BUG FIXES}{
\itemize{
\item \code{envfit} function assumed that all external variables
were either numeric or factors, and failed if they were, say,
character strings. Now only numeric variables are taken as
continuous vectors, and all other variables (character strings,
logical) are coerced to factors if possible. The function also
should work with degenerate data, like only one level of a
factor or a constant value of a continuous environmental
variable. The ties were wrongly in assessing permutation
\eqn{P}-values in \code{vectorfit}.
\item \code{nestednodf} with quantitative data was not
consistent with binary models, and the fill was wrongly
calculated with quantitative data.
\item \code{oecosimu} now correctly adapts displayed quantiles
of simulated values to the \code{alternative} test direction.
\item \code{renyiaccum} plotting failed if only one level of
diversity \code{scale} was used.
}
} % bug fixes
\subsection{NEW FEATURES}{
\itemize{
\item The Kempton and Taylor algorithm was found unreliable in
\code{fisherfit} and \code{fisher.alpha}, and now the estimation
of Fisher \eqn{\alpha}{alpha} is only based on the number of
species and the number of individuals. The estimation of
standard errors and profile confidence intervals also had to be
scrapped.
\item \code{renyiaccum}, \code{specaccum} and
\code{tsallisaccum} functions gained \code{subset} argument.
\item \code{renyiaccum} can now add a \code{collector} curve to
to the analysis. The collector curve is the diversity
accumulation in the order of the sampling units. With an
interesting ordering or sampling units this allows comparing
actual species accumulations with the expected randomized
accumulation.
\item \code{specaccum} can now perform weighted accumulation
using the sampling effort as weights.
}
} % new features
} % end 2.0-10
\section{Changes in version 2.0-9}{
\itemize{
\item This version is released due to changes in programming
interface and testing procedures in \R{} 3.0.2. If you are using an
older version of \R, there is no need to upgrade \pkg{vegan}. There
are no new features nor bug fixes. The only user-visible changes
are in documentation and in output messages and formatting. Because
of \R{} changes, this version is dependent on \R{} version 2.14.0
or newer and on \pkg{lattice} package.
}
}
\section{Changes in version 2.0-8}{
\subsection{GENERAL}{
\itemize{
\item This is a maintenance release that fixes some issues
raised by changed in \R{} toolset for processing vignettes. In
the same we also fix some typographic issues in the vignettes.
}
} % general
\subsection{NEW FEATURES}{
\itemize{
\item \code{ordisurf} gained new arguments for more flexible
definition of fitted models to better utilize the
\pkg{mgcv}\code{::gam} function.
The linewidth of contours can
now be set with the argument \code{lwd}.
\item Labels to arrows are positioned in a better way in
\code{plot} functions for the results of \code{envfit},
\code{cca}, \code{rda} and \code{capscale}. The labels should no
longer overlap the arrow tips.
\item The setting test direction is clearer in \code{oecosimu}.
\item \code{ordipointlabel} gained a \code{plot} method that can
be used to replot the saved result.
}
} % new features
}
\section{Changes in version 2.0-7}{
\subsection{NEW FUNCTIONS}{
\itemize{
\item \code{tabasco()} is a new function for graphical display
of community data matrix. Technically it is an interface to \R
\code{heatmap}, but its use is closer to \pkg{vegan} function
\code{vegemite}. The function can reorder the community data
matrix similarly as \code{vegemite}, for instance, by ordination
results. Unlike \code{heatmap}, it only displays dendrograms if
supplied by the user, and it defaults to re-order the
dendrograms by correspondence analysis. Species are ordered to
match site ordering or like determined by the user.
}
} % new functions
\subsection{BUG FIXES}{
\itemize{
\item Function \code{fitspecaccum(..., model = "asymp")} fitted
logistic model instead of asymptotic model (or the same as
\code{model = "logis"}).
\item \code{nestedtemp()} failed with very sparse data (fill
\eqn{< 0.38}\%).
}
} % bug fixes
\subsection{NEW FEATURES}{
\itemize{
\item The \code{plot} function for constrained ordination
results (\code{cca}, \code{rda}, \code{capscale}) gained
argument \code{axis.bp} (defaults \code{TRUE}) which can be used
to suppress axis scale for biplot arrays.
\item Number of iterations in nonmetric multidimensional scaling
(NMDS) can be set with keyword \code{maxit} (defaults
\code{200}) in \code{metaMDS}.
}
} % new features
\subsection{DEPRECATED}{
\itemize{
\item The result objects of \code{cca}, \code{rda} and
\code{capscale} will no longer have scores \code{u.eig},
\code{v.eig} and \code{wa.eig} in the future versions of
\pkg{vegan}. This change does not influence normal usage,
because \pkg{vegan} functions do not need these items. However,
external scripts and packages may need changes in the future
versions of \pkg{vegan}.
}
} % deprecated
} % vegan 2.0-7
\section{Changes in version 2.0-6}{
\subsection{BUG FIXES}{
\itemize{
\item The species scores were scaled wrongly in
\code{capscale()}. They were scaled correctly only when Euclidean
distances were used, but usually \code{capscale()} is used with
non-Euclidean distances. Most graphics will change and should be
redone. The change of scaling mainly influences the spread of
species scores with respect to the site scores.
\item Function \code{clamtest()} failed to set the minimum
abundance threshold in some cases. In addition, the output was
wrong when some of the possible species groups were missing. Both
problems were reported by Richard Telford (Bergen, Norway).
\item Plotting an object fitted by \code{envfit()} would fail if
\code{p.max} was used and there were unused levels for one or
more factors. The unused levels could result from deletion of
observations with missing values or simply as the result of
supplying a subset of a larger data set to \code{envfit()}.
\item \code{multipart()} printed wrong information about the
analysis type (but did the analysis correctly). Reported by
Valerie Coudrain.
\item \code{oecosimu()} failed if its \code{nestedfun} returned a
data frame. A more fundamental fix will be in \pkg{vegan} 2.2-0,
where the structure of the \code{oecosimu()} result will change.
\item The plot of two-dimensional \code{procrustes()} solutions
often draw original axes in a wrong angle. The problem was
reported by Elizabeth Ottesen (MIT).
\item Function \code{treedive()} for functional or phylogenetic
diversity did not correctly match the species names between the
community data and species tree when the tree contained species
that did not occur in the data. Related function
\code{treedist()} for phylogenetic distances did not try to match
the names at all.
}
} % bug fixes
\subsection{NEW FEATURES}{
\itemize{
\item The output of \code{capscale()} displays the value of the
additive constant when argument \code{add = TRUE} was used.
\item \code{fitted()} functions for \code{cca()}, \code{rda()} and
\code{capscale()} can now return conditioned (partial) component
of the response: Argument \code{model} gained a new alternative
\code{model = "pCCA"}.
\item \code{dispindmorisita()} output gained a new column for
Chi-squared based probabilities that the null hypothesis (random
distribution) is true.
\item \code{metaMDS()} and \code{monoMDS()} have new default
convergence criteria. Most importantly, scale factor of the
gradient (\code{sfgrmin}) is stricter. The former limit was too
slack with large data sets and iterations stopped early without
getting close to the solution. In addition, \code{scores()}
ignore now requests to dimensions beyond those calculated
instead of failing, and \code{scores()} for \code{metaMDS()}
results do not drop dimensions.
\item \code{msoplot()} gained \code{legend} argument for
positioning the legend.
\item Nestedness function \code{nestednodf()} gained a \code{plot}
method.
\item \code{ordiR2step()} gained new argument \code{R2scope}
(defaults \code{TRUE}) which can be used to turn off the criterion
of stopping when the adjusted \eqn{R^2}{R-squared} of the current
model exceeds that of the scope. This option allows model
building when the \code{scope} would be overdetermined (number of
predictors higher than number of observations).
\code{ordiR2step()} now handles partial redundancy analysis
(pRDA).
\item \code{orditorp()} gained argument \code{select} to select
the rows or columns of the results to display.
\item \code{protest()} prints the standardized residual statistic
\eqn{m_{12}^2}{squared m12} in addition to the squared Procrustes
correlation \eqn{R^2}{R-squared}. Both were calculated, but only
the latter was displayed.
Permutation tests are much faster in \code{protest()}. Instead
of calling repeatedly \code{procrustes()}, the goodness of fit
statistic is evaluated within the function.
\item \code{wcmdscale()} gained methods for \code{print},
\code{plot} etc. of the results. These methods are only used if
the full \code{wcmdscale} result is returned with, e.g., argument
\code{eig = TRUE}. The default is still to return only a matrix of
scores similarly as the standard \R function \code{cmdscale()},
and in that case the new methods are not used.
}
} % new features
} % end 2.0-6
\section{Changes in version 2.0-5}{
\subsection{BUG FIXES}{
\itemize{
\item \code{anova(<cca_object>, ...)} failed with
\code{by = "axis"} and \code{by = "term"}. The bug was reported by
Dr Sven Neulinger (Christian Albrecht University, Kiel, Germany).
\item \code{radlattice} did not honour argument \code{BIC = TRUE},
but always displayed AIC.
}
} % bug fixes
\subsection{NEW FUNCTIONS}{
\itemize{
\item Most \pkg{vegan} functions with permutation tests have now a
\code{density} method that can be used to find empirical
probability distributions of permutations. There is a new
\code{plot} method for these functions that displays both the
density and the observed statistic. The \code{density} function is
available for \code{adonis}, \code{anosim}, \code{mantel},
\code{mantel.partial}, \code{mrpp}, \code{permutest.cca} and
\code{procrustes}.
Function \code{adonis} can return several statistics, and it has
now a \code{densityplot} method (based on \pkg{lattice}).
Function \code{oecosimu} already had \code{density} and
\code{densityplot}, but they are now similar to other \pkg{vegan}
methods, and also work with \code{adipart}, \code{hiersimu} and
\code{multipart}.
\item \code{radfit} functions got a \code{predict} method that
also accepts arguments \code{newdata} and \code{total} for new
ranks and site totals for prediction. The functions can also
interpolate to non-integer \dQuote{ranks}, and in some models
also extrapolate.
}
} % new functions
\subsection{NEW FEATURES}{
\itemize{
\item Labels can now be set in the \code{plot} of \code{envfit}
results. The labels must be given in the same order that the
function uses internally, and new support function \code{labels}
can be used to display the default labels in their correct order.
\item Mantel tests (functions \code{mantel} and
\code{mantel.partial}) gained argument \code{na.rm} which can be
used to remove missing values. This options should be used with
care: Permutation tests can be biased if the missing values were
originally in matching or fixed positions.
\item \code{radfit} results can be consistently accessed with
the same methods whether they were a single model for a single
site, all models for a single site or all models for all sites
in the data. All functions now have methods \code{AIC},
\code{coef}, \code{deviance}, \code{logLik}, \code{fitted},
\code{predict} and \code{residuals}.
}
} % new features
\subsection{INSTALLATION AND BUILDING}{
\itemize{
\item Building of \pkg{vegan} vignettes failed with the latest
version of LaTeX (TeXLive 2012).
\item \R{} versions later than 2.15-1 (including development
version) report warnings and errors when installing and checking
\pkg{vegan}, and you must upgrade \pkg{vegan} to this version.
The warnings concern functions \code{cIndexKM} and
\code{betadisper}, and the error occurs in \code{betadisper}.
These errors and warnings were triggered by internal changes in
\R.
}
} % installation and building
} % version 2.0-5
\section{Changes in version 2.0-4}{
\subsection{BUG FIXES}{
\itemize{
\item \code{adipart} assumed constant gamma diversity in
simulations when assessing the \eqn{P}-value. This could give
biased results if the null model produces variable gamma
diversities and option \code{weights = "prop"} is used. The
default null model (\code{"r2dtable"}) and the default option
(\code{weights = "unif"}) were analysed correctly.
\item \code{anova(<prc-object>, by = "axis")} and other
\code{by} cases failed due to \file{NAMESPACE} issues.
\item \code{clamtest} wrongly used frequencies instead of the
counts when calculating sample coverage. No detectable
differences were produced when rerunning examples from Chazdon
et al. 2011 and \pkg{vegan} help page.
\item \code{envfit} failed with unused factor levels.
\item \code{predict} for \code{cca} results with
\code{type = "response"} or \code{type = "working"} failed with
\code{newdata} if the number of rows did not match with the
original data. Now the \code{newdata} is ignored if it has a
wrong number of rows. The number of rows must match because
the results in \code{cca} must be weighted by original row
totals. The problem did not concern \code{rda} or
\code{capscale} results which do not need row weights.
Reported by Glenn De'ath.
}
}% end bug fixes
\subsection{NEW FEATURES}{
\itemize{
\item Functions for diversity partitioning (\code{adipart},
\code{hiersimu} and \code{multipart}) have now \code{formula}
and \code{default} methods. The \code{formula} method is
identical to the previous functions, but the \code{default}
method can take two matrices as input.
Functions \code{adipart} and \code{multipart} can be used for
fast and easy overall partitioning to alpha, beta and gamma
diversities by omitting the argument describing the hierarchy.
\item The method in \code{betadisper} is biased with small
sample sizes. The effects of the bias are strongest with
unequal sample sizes. A bias adjusted version was developed by
Adrian Stier and Ben Bolker, and can be invoked with argument
\code{bias.adjust} (defaults to \code{FALSE}).
\item \code{bioenv} accepts dissimilarities (or square matrices
that can be interpreted as dissimilarities) as an alternative to
community data. This allows using other dissimilarities than
those available in \code{vegdist}.
\item \code{plot} function for \code{envfit} results gained new
argument \code{bg} that can be used to set background colour for
plotted labels.
\item \code{msoplot} is more configurable, and allows, for
instance, setting y-axis limits.
\item Hulls and ellipses are now filled using semitransparent
colours in \code{ordihull} and \code{ordiellipse}, and the
user can set the degree of transparency with a new argument
\code{alpha}. The filled shapes are used when these functions
are called with argument \code{draw = "polygon"}. Function
\code{ordihull} puts labels (with argument \code{label = TRUE})
now in the real polygon centre.
\item \code{ordiplot3d} returns function \code{envfit.convert}
and the projected location of the \code{origin}. Together
these can be used to add \code{envfit} results to existing
\code{ordiplot3d} plots.
Equal aspect ratio cannot be set exactly in \code{ordiplot3d}
because underlying core routines do not allow this. Now
\code{ordiplot3d} sets equal axis ranges, and the documents
urge users to verify that the aspect ratio is reasonably equal
and the graph looks like a cube. If the problems cannot be
solved in the future, \code{ordiplot3d} may be removed from
next releases of \pkg{vegan}.
\item Function \code{ordipointlabel} gained argument to
\code{select} only some of the items for plotting. The
argument can be used only with one set of points.
}
} % end new features
}%end version 2.0-4
\section{Changes in version 2.0-3}{
\subsection{NEW FUNCTIONS}{
\itemize{
\item Added new nestedness functions \code{nestedbetasor} and
\code{nestedbetajac} that implement multiple-site dissimilarity
indices and their decomposition into turnover and nestedness
components following Baselga (\emph{Global Ecology and
Biogeography} 19, 134--143; 2010).
\item Added function \code{rarecurve} to draw rarefaction curves
for each row (sampling unit) of the input data, optionally with
lines showing rarefied species richness with given sample size
for each curve.
\item Added function \code{simper} that implements
\dQuote{similarity percentages} of Clarke (\emph{Australian
Journal of Ecology} 18, 117--143; 1993). The method compares
two or more groups and decomposes the average between-group
Bray-Curtis dissimilarity index to contributions by individual
species. The code was developed in
\href{https://github.com/jarioksa/vegan}{GitHub}
by Eduard Szöcs (Uni Landau, Germany).
}
} % end new functions
\subsection{BUG FIXES}{
\itemize{
\item \code{betadisper()} failed when the \code{groups} was a
factor with empty levels.
\item Some constrained ordination methods and their support
functions are more robust in border cases (completely aliased
effects, saturated models, user requests for non-existng scores
etc). Concerns \code{capscale}, \code{ordistep}, \code{varpart},
\code{plot} function for constrained ordination, and
\code{anova(<cca.object>, by = "margin")}.
\item The \code{scores} function for \code{monoMDS} did not
honour \code{choices} argument and hence dimensions could not be
chosen in \code{plot}.
\item The default \code{scores} method failed if the number of
requested axes was higher than the ordination object had. This
was reported as an error in \code{ordiplot} in
\href{https://stat.ethz.ch/pipermail/r-sig-ecology/2012-February/002768.html}{R-sig-ecology} mailing list.
}
} % end bug fixes
\subsection{NEW FEATURES}{
\itemize{
\item \code{metaMDS} argument \code{noshare = 0} is now
regarded as a numeric threshold that always triggers extended
dissimilarities (\code{stepacross}), instead of being treated
as synonymous with \code{noshare = FALSE} which always
suppresses extended dissimilarities.
\item Nestedness discrepancy index \code{nesteddisc} gained a
new argument that allows user to set the number of iterations
in optimizing the index.
\item \code{oecosimu} displays the mean of simulations and
describes alternative hypothesis more clearly in the printed
output.
\item Implemented adjusted \eqn{R^2}{R-squared} for partial
RDA. For partial model \code{rda(Y ~ X1 + Condition(X2))} this
is the same as the component \code{[a] = X1|X2} in variance
partition in \code{varpart} and describes the marginal (unique)
effect of constraining term to adjusted \eqn{R^2}{R-squared}.
\item Added Cao dissimilarity (CYd) as a new dissimilarity
method in \code{vegdist} following Cao et al., \emph{Water
Envir Res} 69, 95--106 (1997). The index should be good for
data with high beta diversity and variable sampling
intensity. Thanks to consultation to Yong Cao (Univ Illinois,
USA).
}
} % end new features
} % end version 2.0-3
\section{Changes in version 2.0-2}{
\subsection{BUG FIXES}{
\itemize{
\item Function \code{capscale} failed if constrained component
had zero rank. This happened most likely in partial models when
the conditions aliased constraints. The problem was observed in
\code{anova(..., by ="margin")} which uses partial models to
analyses the marginal effects, and was reported in an email
message to
\href{https://stat.ethz.ch/pipermail/r-help/2011-October/293077.html}{R-News
mailing list}.
\item \code{stressplot} and \code{goodness} sometimes failed when
\code{metaMDS} was based on \code{isoMDS} (\pkg{MASS} package)
because \code{metaMDSdist} did not use the same defaults for
step-across (extended) dissimilarities as
\code{metaMDS(..., engine = "isoMDS")}. The change of defaults can
also influence triggering of step-across in
\code{capscale(..., metaMDSdist = TRUE)}.
\item \code{adonis} contained a minor bug resulting from
incomplete implementation of a speed-up that did not affect the
results. In fixing this bug, a further bug was identified in
transposing the hat matrices. This second bug was only active
following fixing of the first bug. In fixing both bugs, a
speed-up in the internal f.test() function is fully
realised. Reported by Nicholas Lewin-Koh.
}
} % end bug fixes
\subsection{NEW FEATURES}{
\itemize{
\item \code{ordiarrows} and \code{ordisegments} gained argument
\code{order.by} that gives a variable to sort points within
\code{groups}. Earlier the points were assumed to be in order.
\item Function \code{ordispider} invisibly returns the
coordinates to which the points were connected. Typically these
are class centroids of each point, but for constrained ordination
with no \code{groups} they are the LC scores.
}
} %end new features
} %end version 2.0-2
\section{Changes in version 2.0-1}{
\subsection{NEW FUNCTIONS}{
\itemize{
\item \code{clamtest}: new function to classify species as
generalists and specialists in two distinct habitats (CLAM test of
Chazdon et al., \emph{Ecology} 92, 1332--1343; 2011). The test is
based on multinomial distribution of individuals in two habitat
types or sampling units, and it is applicable only to count data
with no over-dispersion.
\item \code{as.preston} gained \code{plot} and \code{lines}
methods, and \code{as.fisher} gained \code{plot} method (which
also can add items to existing plots). These are similar as
\code{plot} and \code{lines} for \code{prestonfit} and
\code{fisherfit}, but display only data without the fitted lines.
\item \code{raupcrick}: new function to implement Raup-Crick
dissimilarity as a probability of number of co-occurring species
with occurrence probabilities proportional to species
frequencies. \pkg{Vegan} has Raup-Crick index as a choice in
\code{vegdist}, but that uses equal sampling probabilities for
species and analytic equations. The new \code{raupcrick}
function uses simulation with \code{oecosimu}. The function
follows Chase et al. (2011) \emph{Ecosphere} 2:art24
[\href{http://www.esajournals.org/doi/abs/10.1890/ES10-00117.1}{doi:10.1890/ES10-00117.1}],
and was developed with the consultation of Brian Inouye.
}
} % end NEW FUNCTIONS
\subsection{BUG FIXES}{
\itemize{
\item Function \code{meandist} could scramble items and give
wrong results, especially when the \code{grouping} was
numerical. The problem was reported by Dr Miguel Alvarez
(Univ. Bonn).
\item \code{metaMDS} did not reset \code{tries} when a new model
was started with a \code{previous.best} solution from a different
model.
\item Function \code{permatswap} for community null models using
quantitative swap never swapped items in a \eqn{2 \times 2}{2 by 2}
submatrix if all cells were filled.
\item The result from \code{permutest.cca} could not be
\code{update}d because of a \file{NAMESPACE} issue.
\item \R 2.14.0 changed so that it does not accept using
\code{sd()} function for matrices (which was the behaviour at
least since \R 1.0-0), and several \pkg{vegan} functions were
changed to adapt to this change (\code{rda}, \code{capscale},
\code{simulate} methods for \code{rda}, \code{cca} and
\code{capscale}). The change in \R 2.14.0 does not influence the
results but you probably wish to upgrade \pkg{vegan} to avoid
annoying warnings.
}
} % end BUG FIXES
\subsection{ANALYSES}{
\itemize{
\item \code{nesteddisc} is slacker and hence faster when trying
to optimize the statistic for tied column frequencies. Tracing
showed that in most cases an improved ordering was found rather
early in tries, and the results are equally good in most cases.
}
} % end ANALYSES
} % end version 2.0-1
\section{Changes in version 2.0-0}{
\subsection{GENERAL}{
\itemize{
\item Peter Minchin joins the \pkg{vegan} team.
\item \pkg{vegan} implements standard \R \file{NAMESPACE}. In
general, \code{S3} methods are not exported which means that you
cannot directly use or see contents of functions like
\code{cca.default}, \code{plot.cca} or \code{anova.ccabyterm}. To
use these functions you should rely on \R delegation and simply
use \code{cca} and for its result objects use \code{plot} and
\code{anova} without suffix \code{.cca}. To see the contents of
the function you can use \code{:::}, such as
\code{vegan:::cca.default}. This change may break packages,
documents or scripts that rely on non-exported names.
\item \pkg{vegan} depends on the \pkg{permute} package. This
package provides powerful tools for restricted permutation
schemes. All \pkg{vegan} permutation will gradually move to use
\pkg{permute}, but currently only \code{betadisper} uses the new
feature.
}
} % end GENERAL
\subsection{NEW FUNCTIONS}{
\itemize{
\item \code{monoMDS}: a new function for non-metric
multidimensional scaling (NMDS). This function replaces
\code{MASS::isoMDS} as the default method in
\code{metaMDS}. Major advantages of \code{monoMDS} are that it
has \sQuote{weak} (\sQuote{primary}) tie treatment which means
that it can split tied observed dissimilarities. \sQuote{Weak}
tie treatment improves ordination of heterogeneous data sets,
because maximum dissimilarities of \eqn{1} can be split. In
addition to global NMDS, \code{monoMDS} can perform local and
hybrid NMDS and metric MDS. It can also handle missing and zero
dissimilarities. Moreover, \code{monoMDS} is faster than
previous alternatives. The function uses \code{Fortran} code
written by Peter Minchin.
\item \code{MDSrotate} a new function to replace
\code{metaMDSrotate}. This function can rotate both \code{metaMDS}
and \code{monoMDS} results so that the first axis is parallel to
an environmental vector.
\item \code{eventstar} finds the minimum of the evenness profile
on the Tsallis entropy, and uses this to find the corresponding
values of diversity, evenness and numbers equivalent following
Mendes et al. (\emph{Ecography} 31, 450-456; 2008). The code was
contributed by Eduardo Ribeira Cunha and Heloisa Beatriz Antoniazi
Evangelista and adapted to \pkg{vegan} by Peter Solymos.
\item \code{fitspecaccum} fits non-linear regression models to
the species accumulation results from \code{specaccum}. The
function can use new self-starting species accumulation models
in \pkg{vegan} or other self-starting non-linear regression
models in \R. The function can fit Arrhenius, Gleason, Gitay,
Lomolino (in \pkg{vegan}), asymptotic, Gompertz,
Michaelis-Menten, logistic and Weibull (in base \R) models. The
function has \code{plot} and \code{predict} methods.
\item Self-starting non-linear species accumulation models
\code{SSarrhenius}, \code{SSgleason}, \code{SSgitay} and
\code{SSlomolino}. These can be used with \code{fitspecaccum} or
directly in non-linear regression with \code{nls}. These functions
were implemented because they were found good for species-area
models by Dengler (\emph{J. Biogeogr.} 36, 728-744; 2009).
}
} % end NEW FUNCTIONS
\subsection{NEW FEATURES}{
\itemize{
\item \code{adonis}, \code{anosim}, \code{meandist} and
\code{mrpp} warn on negative dissimilarities, and
\code{betadisper} refuses to analyse them. All these functions
expect dissimilarities, and giving something else (like
correlations) probably is a user error.
\item \code{betadisper} uses restricted permutation of the
\pkg{permute} package.
\item \code{metaMDS} uses \code{monoMDS} as its default ordination
engine. Function gains new argument \code{engine} that can be used
to alternatively select \code{MASS::isoMDS}. The default is not
to use \code{stepacross} with \code{monoMDS} because its
\sQuote{weak} tie treatment can cope with tied maximum
dissimilarities of one. However, \code{stepacross} is the default
with \code{isoMDS} because it cannot handle adequately these tied
maximum dissimilarities.
\item \code{specaccum} gained \code{predict} method which uses
either linear or spline interpolation for data between observed
points. Extrapolation is possible with spline interpolation, but
may make little sense.
\item \code{specpool} can handle missing values or empty factor
levels in the grouping factor \code{pool}. Now also checks that
the length of the \code{pool} matches the number of
observations.
}
} % end NEW FEATURES
\subsection{DEPRECATED AND DEFUNCT}{
\itemize{
\item \code{metaMDSrotate} was replaced with \code{MDSrotate}
that can also handle the results of \code{monoMDS}.
\item \code{permuted.index2} and other \dQuote{new} permutation
code was removed in favour of the \pkg{permute} package. This code
was not intended for normal use, but packages depending on that
code in \pkg{vegan} should instead depend on \pkg{permute}.
}
} % end DEPRECATED
\subsection{ANALYSES}{
\itemize{
\item \code{treeheight} uses much snappier code. The results
should be unchanged.
}
} % end ANALYSES
}% end VERSION 2.0
| /inst/NEWS.Rd | no_license | aaronjg/vegan | R | false | false | 99,110 | rd | \name{NEWS}
\title{vegan News}
\encoding{UTF-8}
\section{Changes in version 2.5-0}{
\subsection{GENERAL}{
\itemize{
\item This is a major new release with changes all over the
package: Nearly 40\% of program files were changed from the
previous release. Please report regressions and other issues in
\href{https://github.com/vegandevs/vegan/issues/}{https://github.com/vegandevs/vegan/issues/}.
\item Compiled code is used much more extensively, and most
compiled functions use \code{.Call} interface. This gives smaller
memory footprint and is also faster. In wall clock time, the
greatest gains are in permutation tests for constrained ordination
methods (\code{anova.cca}) and binary null models
(\code{nullmodel}).
\item Constrained ordination functions (\code{cca}, \code{rda},
\code{dbrda}, \code{capscale}) are completely rewritten and share
most of their code. This makes them more consistent with each
other and more robust. The internal structure changed in
constrained ordination objects, and scripts may fail if they try
to access the result object directly. There never was a guarantee
for unchanged internal structure, and such scripts should be
changed and they should use the provided support functions to
access the result object (see documentation of \code{cca.object}
and github issue
\href{https://github.com/vegandevs/vegan/issues/262}{#262}). Some
support and analysis functions may no longer work with result
objects created in previous \pkg{vegan} versions. You should use
\code{update(old.result.object)} to fix these old result
objects. See github issues
\href{https://github.com/vegandevs/vegan/issues/218}{#218},
\href{https://github.com/vegandevs/vegan/issues/227}{#227}.
\item \pkg{vegan} includes some tests that are run when checking
the package installation. See github issues
\href{https://github.com/vegandevs/vegan/issues/181}{#181},
\href{https://github.com/vegandevs/vegan/issues/271}{#271}.
\item The informative messages (warnings, notes and error
messages) are cleaned and unified which also makes possible to
provide translations.
} %itemize
} % general
\subsection{NEW FUNCTIONS}{
\itemize{
\item \code{avgdist}: new function to find averaged
dissimilarities from several random rarefactions of
communities. Code by Geoffrey Hannigan. See github issues
\href{https://github.com/vegandevs/vegan/issues/242}{#242},
\href{https://github.com/vegandevs/vegan/issues/243}{#243},
\href{https://github.com/vegandevs/vegan/issues/246}{#246}.
\item \code{chaodist}: new function that is similar to
\code{designdist}, but uses Chao terms that are supposed to take
into account the effects of unseen species (Chao et al.,
\emph{Ecology Letters} \bold{8,} 148-159; 2005). Earlier we had
Jaccard-type Chao dissimilarity in \code{vegdist}, but the new
code allows defining any kind of Chao dissimilarity.
\item New functions to find influence statistics of constrained
ordination objects: \code{hatvalues}, \code{sigma},
\code{rstandard}, \code{rstudent}, \code{cooks.distance},
\code{SSD}, \code{vcov}, \code{df.residual}. Some of these could
be earlier found via \code{as.mlm} function which is
deprecated. See github issue
\href{https://github.com/vegandevs/vegan/issues/234}{#234}.
\item \code{boxplot} was added for \code{permustats} results to
display the (standardized) effect sizes.
\item \code{sppscores}: new function to add or replace species
scores in distance-based ordination such as \code{dbrda},
\code{capscale} and \code{metaMDS}. Earlier \code{dbrda} did not
have species scores, and species scores in \code{capscale} and
\code{metaMDS} were based on raw input data which may not be
consistent with the used dissimilarity measure. See github issue
\href{https://github.com/vegandevs/vegan/issues/254}{#254}.
\item \code{cutreeord}: new function that is similar to
\code{stats::cutree}, but numbers the cluster in the order they
appear in the dendrogram (left to right) instead of labelling them
in the order they appeared in the data.
\item \code{sipoo.map}: a new data set of locations and sizes of
the islands in the Sipoo archipelago bird data set \code{sipoo}.
} %itemize
} % new functions
\subsection{NEW FEATURES IN CONSTRAINED ORDINATION}{
\itemize{
\item The inertia of Correspondence Analysis (\code{cca}) is
called \dQuote{scaled Chi-square} instead of using a name of a
little known statistic.
\item Regression scores for constraints can be extracted and
plotted for constrained ordination methods. See github issue
\href{https://github.com/vegandevs/vegan/issues/226}{#226}.
\item Full model (\code{model = "full"}) is again enabled in
permutations tests for constrained ordination results in
\code{anova.cca} and \code{permutest.cca}.
\item \code{permutest.cca} gained a new option \code{by = "onedf"} to
perform tests by sequential one degree-of-freedom contrasts of
factors. This option is not (yet) enabled in \code{anova.cca}.
\item The permutation tests are more robust, and most scoping issues
should have been fixed.
\item Permutation tests use compiled C code and they are much
faster. See github issue
\href{https://github.com/vegandevs/vegan/issues/211}{#211}.
\item \code{permutest} printed layout is similar to \code{anova.cca}.
\item \code{eigenvals} gained a new argument \code{model} to
select either constrained or unconstrained scores. The old
argument \code{constrained} is deprecated. See github issue
\href{https://github.com/vegandevs/vegan/issues/207}{#207}.
\item Adjusted \eqn{R^2}{R-squared} is not calculated for
results of partial ordination, because it is unclear how this
should be done (function \code{RsquareAdj}).
\item \code{ordiresids} can display standardized and studentized
residuals.
\item Function to construct \code{model.frame} and
\code{model.matrix} for constrained ordination are more robust
and fail in fewer cases.
\item \code{goodness} and \code{inertcomp} for constrained
ordination result object no longer has an option to find
distances: only explained variation is available.
\item \code{inertcomp} gained argument \code{unity}. This will
give \dQuote{local contributions to beta-diversity} (LCBD) and
\dQuote{species contribution to beta-diversity} (SCBD) of Legendre
& De \enc{Cáceres}{Caceres} (\emph{Ecology Letters} \bold{16,}
951-963; 2012).
\item \code{goodness} is disabled for \code{capscale}.
\item \code{prc} gained argument \code{const} for general
scaling of results similarly as in \code{rda}.
\item \code{prc} uses regression scores for Canoco-compatibility.
} %itemae
} % constrained ordination
\subsection{NEW FEATURES IN NULL MODEL COMMUNITIES}{
\itemize{
\item The C code for swap-based binary null models was made more
efficients, and the models are all faster. Many of these
models selected a \eqn{2 \times 2}{2x2} submatrix, and for this
they generated four random numbers (two rows, two columns). Now we
skip selecting third or fourth random number if it is obvious that
the matrix cannot be swapped. Since most of time was used in
generating random numbers in these functions, and most candidates
were rejected, this speeds up functions. However, this also means
that random number sequences change from previous \pkg{vegan}
versions, and old binary model results cannot be replicated
exactly. See github issues
\href{https://github.com/vegandevs/vegan/issues/197}{#197},
\href{https://github.com/vegandevs/vegan/issues/255}{#255} for
details and timing.
\item Ecological null models (\code{nullmodel}, \code{simulate},
\code{make.commsim}, \code{oecosimu}) gained new null model
\code{"greedyqswap"} which can radically speed up quasi-swap
models with minimal risk of introducing bias.
\item Backtracking is written in C and it is much faster. However,
backtracking models are biased, and they are provided only
because they are classic legacy models.
} %itemize
} % nullmodel
\subsection{NEW FEATURES IN OTHER FUNCTIONS}{
\itemize{
\item \code{adonis2} gained a column of \eqn{R^2}{R-squared}
similarly as old \code{adonis}.
\item Great part of \R{} code for \code{decorana} is written in C
which makes it faster and reduces the memory footprint.
\item \code{metaMDS} results gained new \code{points} and
\code{text} methods.
\item \code{ordiplot} and other ordination \code{plot} functions
can be chained with their \code{points} and \code{text}
functions allowing the use of \pkg{magrittr} pipes. The
\code{points} and \code{text} functions gained argument to draw
arrows allowing their use in drawing biplots or adding vectors of
environmental variables with \code{ordiplot}. Since many
ordination \code{plot} methods return an invisible
\code{"ordiplot"} object, these \code{points} and \code{text}
methods also work with them. See github issue
\href{https://github.com/vegandevs/vegan/issues/257}{#257}.
\item Lattice graphics (\code{ordixyplot}) for ordination can
add polygons that enclose all points in the panel and
complete data.
\item \code{ordicluster} gained option to suppress drawing in
plots so that it can be more easily embedded in other functions
for calculations.
\item \code{as.rad} returns the index of included taxa as an
attribute.
\item Random rarefaction (function \code{rrarefy}) uses compiled
C code and is much faster.
\item \code{plot} of \code{specaccum} can draw short
horizontal bars to vertical error bars. See StackOverflow
question
\href{https://stackoverflow.com/questions/45378751}{45378751}.
\item \code{decostand} gained new standardization methods
\code{rank} and \code{rrank} which replace abundance values by
their ranks or relative ranks. See github issue
\href{https://github.com/vegandevs/vegan/issues/225}{#225}.
\item Clark dissimilarity was added to \code{vegdist} (this cannot
be calculated with \code{designdist}).
\item \code{designdist} evaluates minimum terms in compiled code,
and the function is faster than \code{vegdist} also for
dissimilarities using minimum terms. Although \code{designdist} is
usually faster than \code{vegdist}, it is numerically less stable,
in particular with large data sets.
\item \code{swan} passes \code{type} argument to \code{beals}.
\item \code{tabasco} can use traditional cover scale values from
function \code{coverscale}. Function \code{coverscale} can return
scaled values as integers for numerical analysis instead of
returning characters.
\item \code{varpart} can partition \eqn{\chi^2}{Chi-squared}
inertia of correspondence analysis with new argument
\code{chisquare}. The adjusted \eqn{R^2}{R-squared} is based on
permutation tests, and the replicate analysis will have random
variation.
} % itemize
} % new features
\subsection{BUG FIXES}{
\itemize{
\item Very long \code{Condition()} statements (> 500 characters)
failed in partial constrained ordination models (\code{cca},
\code{rda}, \code{dbrda}, \code{capscale}). The problem was
detected in StackOverflow question
\href{https://stackoverflow.com/questions/49249816}{49249816}.
\item Labels were not adjusted when arrows were rescaled in
\code{envfit} plots. See StackOverflow question
\href{https://stackoverflow.com/questions/49259747}{49259747}.
} % itemize
} % bug fixes
\subsection{DEPRECATED AND DEFUNCT}{
\itemize{
\item \code{as.mlm} function for constrained correspondence
analysis is deprecated in favour of new functions that directly
give the influence statistics. See github issue
\href{https://github.com/vegandevs/vegan/issues/234}{#234}.
\item \code{commsimulator} is now defunct: use \code{simulate}
for \code{nullmodel} objects.
\item \pkg{ade4} \code{cca} objects are no longer handled in
\pkg{vegan}: \pkg{ade4} has had no \code{cca} since version
1.7-8 (August 9, 2017).
} %itemize
} % deprecated & defunct
} % 2.5-0
\section{Changes in version 2.4-6}{
\subsection{INSTALLATION AND BUILDING}{
\itemize{
\item CRAN packages are no longer allowed to use FORTRAN input,
but \code{read.cep} function used FORTRAN format to read legacy
CEP and Canoco files. To avoid NOTEs and WARNINGs, the function
was re-written in \R. The new \code{read.cep} is less powerful and
more fragile, and can only read data in \dQuote{condensed} format,
and it can fail in several cases that were successful with the old
code. The old FORTRAN-based function is still available in CRAN
package
\href{https://CRAN.R-project.org/package=cepreader}{cepreader}.
See github issue
\href{https://github.com/vegandevs/vegan/issues/263}{#263}. The
\pkg{cepreader} package is developed in
\href{https://github.com/vegandevs/cepreader}{https://github.com/vegandevs/cepreader}.
} %itemize
} % general
\subsection{BUG FIXES}{
\itemize{
\item Some functions for rarefaction (\code{rrarefy}), species
abundance distribution (\code{preston}) and species pool
(\code{estimateR}) need exact integer data, but the test allowed
small fuzz. The functions worked correctly with original data, but
if data were transformed and then back-transformed, they would
pass the integer test with fuzz and give wrong results. For
instance, \code{sqrt(3)^2} would pass the test as 3, but was
interpreted strictly as integer 2. See github issue
\href{https://github.com/vegandevs/vegan/issues/259}{#259}.
} % itemize
} % bugs
\subsection{NEW FEATURES}{
\itemize{
\item \code{ordiresids} uses now weighted residuals for
\code{cca} results.
} %itemize
} % features
} % 2.4-6
\section{Changes in version 2.4-5}{
\subsection{BUG FIXES}{
\itemize{
\item Several \dQuote{Swap & Shuffle} null models generated wrong
number of initial matrices. Usually they generated too many, which
was not dangerous, but it was slow. However, random sequences will
change with this fix.
\item Lattice graphics for ordination (\code{ordixyplot} and
friends) colour the arrows by \code{groups} instead of randomly
mixed colours.
\item Information on constant or mirrored permutations was
omitted when reporting permutation tests (e.g., in \code{anova}
for constrained ordination).
} % itemize
} % bug fixes
\subsection{NEW FEATURES}{
\itemize{
\item \code{ordistep} has improved interpretation of
\code{scope}: if the lower scope is missing, the formula of the
starting solution is taken as the lower scope instead of using
an empty model. See Stackoverflow question
\href{https://stackoverflow.com/questions/46985029/}{46985029}.
\item \code{fitspecaccum} gained new support functions \code{nobs}
and \code{logLik} which allow better co-operation with other
packages and functions. See GitHub issue
\href{https://github.com/vegandevs/vegan/issues/250}{#250}.
\item The \dQuote{backtracking} null model for community
simulation is faster. However, \dQuote{backtracking} is a biased
legacy model that should not be used except in comparative
studies.
} %itemize
} % new features
} % 2.4-5
\section{Changes in version 2.4-4}{
\subsection{INSTALLATION AND BUILDING}{
\itemize{
\item \code{orditkplot} should no longer give warnings in CRAN
tests.
} %itemize
} % installatin and building
\subsection{BUG FIXES}{
\itemize{
\item \code{anova(..., by = "axis")} for constrained ordination
(\code{cca}, \code{rda}, \code{dbrda}) ignored partial terms in
\code{Condition()}.
\item \code{inertcomp} and \code{summary.cca} failed if the
constrained component was defined, but explained nothing and had
zero rank. See StackOverflow:
\href{https://stackoverflow.com/questions/43683699/}{R - Error
message in doing RDA analysis - vegan package}.
\item Labels are no longer cropped in the \code{meandist} plots.
} % itemize
} % bug fixes
\subsection{NEW FEATURES}{
\itemize{
\item The significance tests for the axes of constrained
ordination use now forward testing strategy. More extensive
analysis indicated that the previous marginal tests were biased.
This is in conflict with Legendre, Oksanen & ter Braak,
\emph{Methods Ecol Evol} \strong{2,} 269--277 (2011) who regarded
marginal tests as unbiased.
\item Canberra distance in \code{vegdist} can now handle negative
input entries similarly as latest versions of \R.
} %itemize
} % new features
} % v2.4-4
\section{Changes in version 2.4-3}{
\subsection{INSTALLATION AND BUILDING}{
\itemize{
\item \pkg{vegan} registers native \bold{C} and \bold{Fortran}
routines. This avoids warnings in model checking, and may also
give a small gain in speed.
\item Future versions of \pkg{vegan} will deprecate and remove
elements \code{pCCA$Fit}, \code{CCA$Xbar}, and \code{CA$Xbar}
from \code{cca} result objects. This release provides a new
function \code{ordiYbar} which is able to construct these
elements both from the current and future releases. Scripts and
functions directly accessing these elements should switch to
\code{ordiYbar} for smooth transition.
} % itemize
} % installation
\subsection{BUG FIXES}{
\itemize{
\item \code{as.mlm} methods for constrained ordination include
zero intercept to give the correct residual degrees of freedom for
derived statistics.
\item \code{biplot} method for \code{rda} passes
\code{correlation} argument to the scaling algorithm.
\item Biplot scores were wrongly centred in \code{cca} which
caused a small error in their values.
\item Weighting and centring were corrected in \code{intersetcor}
and \code{spenvcor}. The fix can make a small difference when
analysing \code{cca} results.
Partial models were not correctly handled in \code{intersetcor}.
\item \code{envfit} and \code{ordisurf} functions failed when
applied to species scores.
\item Non-standard variable names can be used within
\code{Condition()} in partial ordination. Partial models are used
internally within several functions, and a problem was reported by
Albin Meyer (Univ Lorraine, Metz, France) in \code{ordiR2step}
when using a variable name that contained a hyphen (which was
wrongly interpreted as a minus sign in partial ordination).
\item \code{ordispider} did not pass graphical arguments when
used to show the difference of LC and WA scores in constrained
ordination.
\item \code{ordiR2step} uses only \code{forward} selection to
avoid several problems in model evaluation.
\item \code{tolerance} function could return \code{NaN} in some
cases when it should have returned \eqn{0}. Partial models were
not correctly analysed. Misleading (non-zero) tolerances were
sometimes given for species that occurred only once or sampling
units that had only one species.
} %itemize
} % bug fixes
} % 2.4-3
\section{Changes in version 2.4-2}{
\subsection{BUG FIXES}{
\itemize{
\item Permutation tests (\code{permutests}, \code{anova}) for the
first axis failed in constrained distance-based ordination
(\code{dbrda}, \code{capscale}). Now \code{capscale} will also
throw away negative eigenvalues when first eigenvalues are
tested. All permutation tests for the first axis are now
faster. The problem was reported by Cleo Tebby and the fixes are
discussed in GitHub issue
\href{https://github.com/vegandevs/vegan/issues/198}{#198} and
pull request
\href{https://github.com/vegandevs/vegan/pull/199}{#199}.
\item Some support functions for \code{dbrda} or \code{capscale}
gave results or some of their components in wrong scale. Fixes in
\code{stressplot}, \code{simulate}, \code{predict} and
\code{fitted} functions.
\item \code{intersetcor} did not use correct weighting for
\code{cca} and the results were slightly off.
\item \code{anova} and \code{permutest} failed when
\code{betadisper} was fitted with argument
\code{bias.adjust = TRUE}. Fixes Github issue
\href{https://github.com/vegandevs/vegan/issues/219}{#219}
reported by Ross Cunning, O'ahu, Hawaii.
\item \code{ordicluster} should return invisibly only the
coordinates of internal points (where clusters or points are
joined), but last rows contained coordinates of external points
(ordination scores of points).
\item The \code{cca} method of \code{tolerance} was returning
incorrect values for all but the second axis for sample
heterogeneities and species tolerances. See issue
\href{https://github.com/vegandevs/vegan/issues/216}{#216} for
details.
} %itemize
} % bug fixes
\subsection{NEW FEATURES}{
\itemize{
\item Biplot scores are scaled similarly as site scores in
constrained ordination methods \code{cca}, \code{rda},
\code{capscale} and \code{dbrda}. Earlier they were unscaled (or
more technically, had equal scaling on all axes).
\item \code{tabasco} adds argument to \code{scale} the colours
by rows or columns in addition to the old equal scale over the
whole plot. New arguments \code{labRow} and \code{labCex} can be
used to change the column or row labels. Function also takes
care that only above-zero observations are coloured: earlier
tiny observed values were merged to zeros and were not distinct
in the plots.
\item Sequential null models are somewhat faster (up to
10\%). Non-sequential null models may be marginally faster. These
null models are generated by function \code{nullmodel} and also
used in \code{oecosimu}.
\item \code{vegdist} is much faster. It used to be clearly slower
than \code{stats::dist}, but now it is nearly equally fast for the
same dissimilarity measure.
\item Handling of \code{data=} in formula interface is more
robust, and messages on user errors are improved. This fixes
points raised in Github issue
\href{https://github.com/vegandevs/vegan/issues/200}{#200}.
\item The families and orders in \code{dune.taxon} were updated to
APG IV (\emph{Bot J Linnean Soc} \strong{181,} 1--20; 2016) and a
corresponding classification for higher levels (Chase & Reveal,
\emph{Bot J Linnean Soc} \strong{161,} 122-127; 2009).
} %itemize
} % features
} % 2.4-2
\section{Changes in version 2.4-1}{
\subsection{INSTALLATION}{
\itemize{
\item Fortran code was modernized to avoid warnings in latest
\R. The modernization should have no visible effect in
functions. Please report all suspect cases as
\href{https://github.com/vegandevs/vegan/issues/}{vegan issues}.
} %itemize
} % installation
\subsection{BUG FIXES}{
\itemize{
\item Several support functions for ordination methods failed if
the solution had only one ordination axis, for instance, if
there was only one constraining variable in CCA, RDA and
friends. This concerned \code{goodness} for constrained
ordination, \code{inertcomp}, \code{fitted} for
\code{capscale}, \code{stressplot} for RDA, CCA (GitHub issue
\href{https://github.com/vegandevs/vegan/issues/189}{#189}).
\item \code{goodness} for CCA & friends ignored \code{choices}
argument (GitHub issue
\href{https://github.com/vegandevs/vegan/issues/190}{#190}).
\item \code{goodness} function did not consider negative
eigenvalues of db-RDA (function \code{dbrda}).
\item Function \code{meandist} failed in some cases when one of
the groups had only one observation.
\item \code{linestack} could not handle expressions in
\code{labels}. This regression is discussed in GitHub issue
\href{https://github.com/vegandevs/vegan/issues/195}{#195}.
\item Nestedness measures \code{nestedbetajac} and
\code{nestedbetasor} expecting binary data did not cope with
quantitative input in evaluating Baselga's matrix-wide Jaccard
or Sørensen dissimilarity indices.
\item Function \code{as.mcmc} to cast \code{oecosimu} result to an
MCMC object (\pkg{coda} package) failed if there was only one
chain.
} % itemize
} % bug fixes
\subsection{NEW FEATURES}{
\itemize{
\item \code{diversity} function returns now \code{NA} if the
observation had \code{NA} values instead of returning
\code{0}. The function also checks the input and refuses to
handle data with negative values. GitHub issue
\href{https://github.com/vegandevs/vegan/issues/187}{#187}.
\item \code{rarefy} function will work more robustly in marginal
case when the user asks for only one individual which can only
be one species with zero variance.
\item Several functions are more robust if their factor arguments
contain missing values (\code{NA}): \code{betadisper},
\code{adipart}, \code{multipart}, \code{hiersimu}, \code{envfit}
and constrained ordination methods \code{cca}, \code{rda},
\code{capscale} and \code{dbrda}. GitHub issues
\href{https://github.com/vegandevs/vegan/issues/192}{#192} and
\href{https://github.com/vegandevs/vegan/issues/193}{#193}.
} % itemize
} % new features
} % 2.4-1
\section{Changes in version 2.4-0}{
\subsection{DISTANCE-BASED ANALYSIS}{
\itemize{
\item Distance-based methods were redesigned and made
consistent for ordination (\code{capscale}, new \code{dbrda}),
permutational ANOVA (\code{adonis}, new \code{adonis2}),
multivariate dispersion (\code{betadisper}) and variation
partitioning (\code{varpart}). These methods can produce
negative eigenvalues with several popular semimetric
dissimilarity indices, and they were not handled similarly by
all functions. Now all functions are designed after McArdle &
Anderson (\emph{Ecology} 82, 290--297; 2001).
\item \code{dbrda} is a new function for distance-based
Redundancy Analysis following McArdle & Anderson
(\emph{Ecology} 82, 290--297; 2001). With metric
dissimilarities, the function is equivalent to old
\code{capscale}, but negative eigenvalues of semimetric indices
are handled differently. In \code{dbrda} the dissimilarities
are decomposed directly into conditions, constraints and
residuals with their negative eigenvalues, and any of the
components can have imaginary dimensions. Function is mostly
compatible with \code{capscale} and other constrained
ordination methods, but full compatibility cannot be achieved
(see issue
\href{https://github.com/vegandevs/vegan/issues/140}{#140} in
Github). The function is based on the code by Pierre Legendre.
\item The old \code{capscale} function for constrained
ordination is still based only on real components, but the
total inertia of the components is assessed similarly as in
\code{dbrda}.
The significance tests will differ from the previous version,
but function \code{oldCapscale} will cast the \code{capscale}
result to a similar form as previously.
\item \code{adonis2} is a new function for permutational ANOVA
of dissimilarities. It is based on the same algorithm as the
\code{dbrda}. The function can perform overall tests of all
independent variables as well as sequential and marginal tests
of each term. The old \code{adonis} is still available, but it
can only perform sequential tests. With same settings,
\code{adonis} and \code{adonis2} give identical results (but
see Github issue
\href{https://github.com/vegandevs/vegan/issues/156}{#156} for
differences).
\item Function \code{varpart} can partition dissimilarities
using the same algorithm as \code{dbrda}.
\item Argument \code{sqrt.dist} takes square roots of
dissimilarities and these can change many popular semimetric
indices to metric distances in \code{capscale}, \code{dbrda},
\code{wcmdscale}, \code{adonis2}, \code{varpart} and
\code{betadisper} (issue
\href{https://github.com/vegandevs/vegan/issues/179}{#179} in
Github).
\item Lingoes and Cailliez adjustments change any dissimilarity
into metric distance in \code{capscale}, \code{dbrda},
\code{adonis2}, \code{varpart}, \code{betadisper} and
\code{wcmdscale}. Earlier we had only Cailliez adjustment in
\code{capscale} (issue
\href{https://github.com/vegandevs/vegan/issues/179}{#179} in
Github).
\item \code{RsquareAdj} works with \code{capscale} and
\code{dbrda} and this allows using \code{ordiR2step} in model
building.
} % itemize
} % distance-based
\subsection{BUG FIXES}{
\itemize{
\item \code{specaccum}: \code{plot} failed if line type
(\code{lty}) was given. Reported by Lila Nath Sharma (Univ
Bergen, Norway)
} %itemize
} %bug fixes
\subsection{NEW FUNCTIONS}{
\itemize{
\item \code{ordibar} is a new function to draw crosses of
standard deviations or standard errors in ordination diagrams
instead of corresponding ellipses.
\item Several \code{permustats} results can be combined with a
new \code{c()} function.
\item New function \code{smbind} binds together null models by
row, column or replication. If sequential models are bound
together, they can be treated as parallel chains in subsequent
analysis (e.g., after \code{as.mcmc}). See issue
\href{https://github.com/vegandevs/vegan/issues/164}{#164} in
Github.
} %itemize
} % new functions
\subsection{NEW FEATURES}{
\itemize{
\item Null model analysis was upgraded:
New \code{"curveball"} algorithm provides a fast null model with
fixed row and column sums for binary matrices after Strona et
al. (\emph{Nature Commun.} 5: 4114; 2014).
The \code{"quasiswap"} algorithm gained argument \code{thin}
which can reduce the bias of null models.
\code{"backtracking"} is now much faster, but it is still very
slow, and provided mainly to allow comparison against better and
faster methods.
Compiled code can now be interrupted in null model simulations.
\item \code{designdist} can now use beta diversity notation
(\code{gamma}, \code{alpha}) for easier definition of beta
diversity indices.
\item \code{metaMDS} has new iteration strategy: Argument
\code{try} gives the minimum number of random starts, and
\code{trymax} the maximum number. Earlier we only hand
\code{try} which gave the maximum number, but now we run at
least \code{try} times. This reduces the risk of being trapped
in a local optimum (issue
\href{https://github.com/vegandevs/vegan/issues/154}{#154} in
Github).
If there were no convergent solutions, \code{metaMDS} will now
tabulate stopping criteria (if \code{trace = TRUE}). This can
help in deciding if any of the criteria should be made more
stringent or the number of iterations increased. The
documentation for \code{monoMDS} and \code{metaMDS} give more
detailed information on convergence criteria.
\item The \code{summary} of \code{permustats} prints now
\emph{P}-values, and the test direction (\code{alternative}) can
be changed.
The \code{qqmath} function of \code{permustats} can now plot
standardized statistics. This is a partial solution to issue
\href{https://github.com/vegandevs/vegan/issues/172}{#172} in
Github.
\item \code{MDSrotate} can rotate ordination to show maximum
separation of factor levels (classes) using linear discriminant
analysis (\code{lda} in \pkg{MASS} package).
\item \code{adipart}, \code{hiersimu} and \code{multipart}
expose argument \code{method} to specify the null model.
\item \code{RsquareAdj} works with \code{cca} and this allows
using \code{ordiR2step} in model building. The code was
developed by Dan McGlinn (issue
\href{https://github.com/vegandevs/vegan/issues/161}{#161} in
Github). However, \code{cca} still cannot be used in
\code{varpart}.
\item \code{ordiellipse} and \code{ordihull} allow setting
colours, line types and other graphical parameters.
The alpha channel can now be given also as a real number in 0 \dots 1
in addition to integer 0 \dots 255.
\item \code{ordiellipse} can now draw ellipsoid hulls that
enclose points in a group.
\item \code{ordicluster}, \code{ordisegments}, \code{ordispider}
and \code{lines} and \code{plot} functions for \code{isomap} and
\code{spantree} can use a mixture of colours of connected
points. Their behaviour is similar as in analogous functions in
the the \pkg{vegan3d} package.
\item \code{plot} of \code{betadisper} is more configurable. See
issues
\href{https://github.com/vegandevs/vegan/issues/128}{#128} and
\href{https://github.com/vegandevs/vegan/issues/166}{#166} in
Github for details.
\item \code{text} and \code{points} methods for
\code{orditkplot} respect stored graphical parameters.
\item Environmental data for the Barro Colorado Island forest
plots gained new variables from Harms et al. (\emph{J. Ecol.} 89,
947--959; 2001). Issue
\href{https://github.com/vegandevs/vegan/issues/178}{#178} in
Github.
} %itemize
} % features
\subsection{DEPRECATED AND DEFUNCT}{
\itemize{
\item Function \code{metaMDSrotate} was removed and replaced
with \code{MDSrotate}.
\item \code{density} and \code{densityplot} methods for
various \pkg{vegan} objects were deprecated and replaced with
\code{density} and \code{densityplot} for \code{permustats}.
Function \code{permustats} can extract the permutation and
simulation results of \pkg{vegan} result objects.
} %itemize
} % deprecated & defunct
} % v2.4-0
\section{Changes in version 2.3-5}{
\subsection{BUG FIXES}{
\itemize{
\item \code{eigenvals} fails with \code{prcomp} results in
\R-devel. The next version of \code{prcomp} will have an
argument to limit the number of eigenvalues shown
(\code{rank.}), and this breaks \code{eigenvals} in \pkg{vegan}.
\item \code{calibrate} failed for \code{cca} and friends if
\code{rank} was given.
} % itemise
} % bug fixes
} % v2.3-5
\section{Changes in version 2.3-4}{
\subsection{BUG FIXES}{
\itemize{
\item \code{betadiver} index \code{19} had wrong sign in one of
its terms.
\item \code{linestack} failed when the \code{labels} were given,
but the input scores had no names. Reported by Jeff Wood (ANU,
Canberra, ACT).
} %itemize
} % bug fixes
\subsection{DEPRECATED}{
\itemize{
\item \code{vegandocs} is deprecated. Current \R{} provides better
tools for seeing extra documentation (\code{news()} and
\code{browseVignettes()}).
} %itemize
} %deprecated
\subsection{VIGNETTES}{
\itemize{
\item All vignettes are built with standard \R{} tools and can be
browsed with \code{browseVignettes}. \code{FAQ-vegan} and
\code{partitioning} were only accessible with \code{vegandocs}
function.
} %itemize
} %vignettes
\subsection{BUILDING}{
\itemize{
\item Dependence on external software \code{texi2dvi} was
removed. Version 6.1 of \code{texi2dvi} was incompatible with \R{}
and prevented building \pkg{vegan}. The \code{FAQ-vegan} that was
earlier built with \code{texi2dvi} uses now \pkg{knitr}. Because
of this, \pkg{vegan} is now dependent on \R-3.0.0. Fixes issue
\href{https://github.com/vegandevs/vegan/issues/158}{#158} in
Github.
} %itemize
} % building
} % v2.3-4
\section{Changes in version 2.3-3}{
\subsection{BUG FIXES}{
\itemize{
\item \code{metaMDS} and \code{monoMDS} could fail if input
dissimilarities were huge: in the reported case they were of
magnitude 1E85. Fixes issue
\href{https://github.com/vegandevs/vegan/issues/152}{#152} in
Github.
\item Permutations failed if they were defined as \pkg{permute}
control structures in \code{estaccum}, \code{ordiareatest},
\code{renyiaccum} and \code{tsallisaccum}. Reported by Dan
Gafta (Cluj-Napoca) for \code{renyiaccum}.
\item \code{rarefy} gave false warnings if input was a vector
or a single sampling unit.
\item Some extrapolated richness indices in \code{specpool}
needed the number of doubletons (= number of species occurring
in two sampling units), and these failed when only one sampling
unit was supplied. The extrapolated richness cannot be
estimated from a single sampling unit, but now such cases are
handled smoothly instead of failing: observed non-extrapolated
richness with zero standard error will be reported. The issue
was reported in
\href{http://stackoverflow.com/questions/34027496/error-message-when-using-specpool-in-vegan-package}{StackOverflow}.
} %itemize
} % bug fixes
\subsection{NEW FEATURES}{
\itemize{
\item \code{treedist} and \code{treedive} refuse to handle
trees with reversals, i.e, higher levels are more homogeneous
than lower levels. Function \code{treeheight} will estimate
their total height with absolute values of branch
lengths. Function \code{treedive} refuses to handle trees with
negative branch heights indicating negative
dissimilarities. Function \code{treedive} is faster.
\item \code{gdispweight} works when input data are in a matrix
instead of a data frame.
\item Input dissimilarities supplied in symmetric matrices or
data frames are more robustly recognized by \code{anosim},
\code{bioenv} and \code{mrpp}.
} %itemize
} %new features
} %v2.3-3
\section{Changes in version 2.3-2}{
\subsection{BUG FIXES}{
\itemize{
\item Printing details of a gridded permutation design would fail
when the grid was at the within-plot level.
\item \code{ordicluster} joined the branches at wrong coordinates
in some cases.
\item \code{ordiellipse} ignored weights when calculating standard
errors (\code{kind = "se"}). This influenced plots of \code{cca},
and also influenced \code{ordiareatest}.
} % itemize
} % bug fixes
\subsection{NEW FEATURES}{
\itemize{
\item \code{adonis} and \code{capscale} functions recognize
symmetric square matrices as dissimilarities. Formerly
dissimilarities had to be given as \code{"dist"} objects such as
produced by \code{dist} or \code{vegdist} functions, and data
frames and matrices were regarded as observations x variables
data which could confuse users (e.g., issue
\href{https://github.com/vegandevs/vegan/issues/147}{#147}).
\item \code{mso} accepts \code{"dist"} objects for the distances
among locations as an alternative to coordinates of locations.
\item \code{text}, \code{points} and \code{lines} functions for
\code{procrustes} analysis gained new argument \code{truemean}
which allows adding \code{procrustes} items to the plots of
original analysis.
\item \code{rrarefy} returns observed non-rarefied communities
(with a warning) when users request subsamples that are larger
than the observed community instead of failing. Function
\code{drarefy} has been similar and returned sampling
probabilities of 1, but now it also issues a warning. Fixes issue
\href{https://github.com/vegandevs/vegan/issues/144}{#144} in
Github.
} % itemize
} % new features
} %v2.3-2
\section{Changes in version 2.3-1}{
\subsection{BUG FIXES}{
\itemize{
\item Permutation tests did not always correctly recognize ties
with the observed statistic and this could result in too low
\eqn{P}-values. This would happen in particular when all predictor
variables were factors (classes). The changes concern functions
\code{adonis}, \code{anosim}, \code{anova} and \code{permutest}
functions for \code{cca}, \code{rda} and \code{capscale},
\code{permutest} for \code{betadisper}, \code{envfit},
\code{mantel} and \code{mantel.partial}, \code{mrpp}, \code{mso},
\code{oecosimu}, \code{ordiareatest}, \code{protest} and
\code{simper}. This also fixes issues
\href{https://github.com/vegandevs/vegan/issues/120}{#120} and
\href{https://github.com/vegandevs/vegan/issues/132}{#132} in
GitHub.
\item Automated model building in constrained ordination
(\code{cca}, \code{rda}, \code{capscale}) with \code{step},
\code{ordistep} and \code{ordiR2step} could fail if there were
aliased candidate variables, or constraints that were completely
explained by other variables already in the model. This was a
regression introduced in \pkg{vegan} 2.2-0.
\item Constrained ordination methods \code{cca}, \code{rda} and
\code{capscale} treat character variables as factors in analysis,
but did not return their centroids for plotting.
\item Recovery of original data in \code{metaMDS} when computing
WA scores for species would fail if the expression supplied to
argument \code{comm} was long & got deparsed to multiple
strings. \code{metaMDSdist} now returns the (possibly modified)
data frame of community data \code{comm} as attribute
\code{"comm"} of the returned \code{dist} object. \code{metaMDS}
now uses this to compute the WA species scores for the NMDS. In
addition, the deparsed expression for \code{comm} is now robust to
long expressions. Reported by Richard Telford.
\item \code{metaMDS} and \code{monoMDS} rejected dissimilarities
with missing values.
\item Function \code{rarecurve} did not check its input and this
could cause confusing error messages. Now function checks that
input data are integers that can be interpreted as counts on
individuals and all sampling units have some species. Unchecked
bad inputs were the reason for problems reported in
\href{http://stackoverflow.com/questions/30856909/error-while-using-rarecurve-in-r}{Stackoverflow}.
}
} % bug fixes
\subsection{NEW FEATURES AND FUNCTIONS}{
\itemize{
\item Scaling of ordination axes in \code{cca}, \code{rda} and
\code{capscale} can now be expressed with descriptive strings
\code{"none"}, \code{"sites"}, \code{"species"} or
\code{"symmetric"} to tell which kind of scores should be scaled by
eigenvalues. These can be further modified with arguments
\code{hill} in \code{cca} and \code{correlation} in \code{rda}. The
old numeric scaling can still be used.
\item The permutation data can be extracted from \code{anova}
results of constrained ordination (\code{cca}, \code{rda},
\code{capscale}) and further analysed with \code{permustats}
function.
\item New data set \code{BCI.env} of site information for the Barro
Colorado Island tree community data. Most useful variables are the
UTM coordinates of sample plots. Other variables are constant or
nearly constant and of little use in normal analysis.
}
} % new features and functions
}
\section{Changes in version 2.3-0}{
\subsection{BUG FIXES}{
\itemize{
\item Constrained ordination functions \code{cca}, \code{rda} and
\code{capscale} are now more robust. Scoping of data set names and
variable names is much improved. This should fix numerous
long-standing problems, for instance those reported by Benedicte
Bachelot (in email) and Richard Telford (in Twitter), as well as
issues \href{https://github.com/vegandevs/vegan/issues/16}{#16}
and \href{https://github.com/vegandevs/vegan/issues/100}{#100} in
GitHub.
\item Ordination functions \code{cca} and \code{rda} silently
accepted dissimilarities as input although their analysis makes
no sense with these methods. Dissimilarities should be analysed
with distance-based redundancy analysis (\code{capscale}).
\item The variance of the conditional component was over-estimated
in \code{goodness} of \code{rda} results, and results were wrong
for partial RDA. The problems were reported in an
\href{https://stat.ethz.ch/pipermail/r-sig-ecology/2015-March/004936.html}{R-sig-ecology}
message by Christoph von Redwitz.
}
} % bug fixes
\subsection{WINDOWS}{
\itemize{
\item \code{orditkplot} did not add file type identifier to saved
graphics in Windows although that is required. The problem only
concerned Windows OS.
}
} % windows
\subsection{NEW FEATURES AND FUNCTIONS}{
\itemize{
\item \code{goodness} function for constrained ordination
(\code{cca}, \code{rda}, \code{capscale}) was redesigned. Function
gained argument \code{addprevious} to add the variation explained
by previous ordination components to axes when \code{statistic =
"explained"}. With this option, \code{model = "CCA"} will include
the variation explained by partialled-out conditions, and
\code{model = "CA"} will include the accumulated variation
explained by conditions and constraints. The former behaviour was
\code{addprevious = TRUE} for \code{model = "CCA"}, and
\code{addprevious = FALSE} for \code{model = "CA"}. The argument
will have no effect when \code{statistic = "distance"}, but this
will always show the residual distance after all previous
components. Formerly it displayed the residual distance only for
the currently analysed model.
\item Functions \code{ordiArrowMul} and \code{ordiArrowTextXY} are
exported and can be used in normal interactive sessions. These
functions are used to scale a bunch arrows to fit ordination
graphics, and formerly they were internal functions used within
other \pkg{vegan} functions.
\item \code{orditkplot} can export graphics in SVG format. SVG is
a vector graphics format which can be edited with several external
programs, such as Illustrator and Inkscape.
\item Rarefaction curve (\code{rarecurve}) and species
accumulation models (\code{specaccum}, \code{fitspecaccum})
gained new functions to estimate the slope of curve at given
location. Originally this was based on a response to an
\href{https://stat.ethz.ch/pipermail/r-sig-ecology/2015-May/005038.html}{R-SIG-ecology}
query. For rarefaction curves, the function is \code{rareslope},
and for species accumulation models it is \code{specslope}.
The functions are based on analytic equations, and can also be
evaluated at interpolated non-integer values. In
\code{specaccum} models the functions can be only evaluated for
analytic models \code{"exact"}, \code{"rarefaction"} and
\code{"coleman"}. With \code{"random"} and \code{"collector"}
methods you can only use finite differences
(\code{diff(fitted(<result.object>))}). Analytic functions for
slope are used for all non-linear regression models known to
\code{fitspecaccum}.
\item Species accumulation models (\code{specaccum}) and
non-liner regression models for species accumulation
(\code{fitspecaccum}) work more consistently with weights. In
all cases, the models are defined using the number of sites as
independent variable, which with weights means that observations
can be non-integer numbers of virtual sites. The \code{predict}
models also use the number of sites with \code{newdata},
and for analytic models they can estimate the expected values
for non-integer number of sites, and for non-analytic randomized
or collector models they can interpolate on non-integer values.
\item \code{fitspecaccum} gained support functions \code{AIC}
and \code{deviance}.
\item The \code{varpart} plots of four-component models were
redesigned following Legendre, Borcard & Roberts \emph{Ecology}
93, 1234--1240 (2012), and they use now four ellipses instead of
three circles and two rectangles. The components are now labelled
in plots, and the circles and ellipses can be easily filled with
transparent background colour.
}
} % new features
} % v2.2-2
\section{Changes in version 2.2-1}{
\subsection{GENERAL}{
\itemize{
\item This is a maintenance release to avoid warning messages
caused by changes in CRAN repository. The namespace usage is also
more stringent to avoid warnings and notes in development versions
of \R.
}
}% end general
\subsection{INSTALLATION}{
\itemize{
\item \pkg{vegan} can be installed and loaded without \pkg{tcltk}
package. The \pkg{tcltk} package is needed in \code{orditkplot}
function for interactive editing of ordination graphics.
}
} % installation
\subsection{BUG FIXES}{
\itemize{
\item \code{ordisurf} failed if \pkg{gam} package was loaded due
to namespace issues: some support functions of \pkg{gam} were used
instead of \pkg{mgcv} functions.
\item \code{tolerance} function failed for unconstrained
correspondence analysis.
}
} % bug fixes
\subsection{NEW FEATURES}{
\itemize{
\item \code{estimateR} uses a more exact variance formula for
bias-corrected Chao estimate of extrapolated number of
species. The new formula may be unpublished, but it was derived
following the guidelines of Chiu, Wang, Walther & Chao,
\emph{Biometrics} 70, 671--682 (2014),
\href{http://onlinelibrary.wiley.com/doi/10.1111/biom.12200/suppinfo}{online
supplementary material}.
\item Diversity accumulation functions \code{specaccum},
\code{renyiaccum}, \code{tsallisaccum}, \code{poolaccum} and
\code{estaccumR} use now \pkg{permute} package for permutations
of the order of sampling sites. Normally these functions only
need simple random permutation of sites, but restricted
permutation of the \pkg{permute} package and user-supplied
permutation matrices can be used.
\item \code{estaccumR} function can use parallel processing.
\item \code{linestack} accepts now expressions as labels. This
allows using mathematical symbols and formula given as
mathematical expressions.
}
} % new features
} % v2.2-1
\section{Changes in version 2.2-0}{
\subsection{GENERAL}{
\itemize{
\item Several \pkg{vegan} functions can now use parallel
processing for slow and repeating calculations. All these
functions have argument \code{parallel}. The argument can be an
integer giving the number of parallel processes. In unix-alikes
(Mac OS, Linux) this will launch \code{"multicore"} processing
and in Windows it will set up \code{"snow"} clusters as desribed
in the documentation of the \pkg{parallel} package. If \code{option}
\code{"mc.cores"} is set to an integer > 1, this will be used to
automatically start parallel processing. Finally, the argument
can also be a previously set up \code{"snow"} cluster which will
be used both in Windows and in unix-alikes. \pkg{Vegan} vignette
on Design decision explains the implementation (use
\code{vegandocs("decission")}, and \pkg{parallel} package has more
extensive documentation on parallel processing in \R.
The following function use parallel processing in analysing
permutation statistics: \code{adonis}, \code{anosim},
\code{anova.cca} (and \code{permutest.cca}), \code{mantel} (and
\code{mantel.partial}), \code{mrpp}, \code{ordiareatest},
\code{permutest.betadisper} and \code{simper}. In addition,
\code{bioenv} can compare several candidate sets of models in
paralle, \code{metaMDS} can launch several random starts in
parallel, and \code{oecosimu} can evaluate test statistics for
several null models in parallel.
\item All permutation tests are based on the \pkg{permute} package
which offers strong tools for restricted permutation. All these
functions have argument \code{permutations}. The default usage of
simple non-restricted permutations is achieved by giving a single
integer number. Restricted permutations can be defined using the
\code{how} function of the \pkg{permute} package. Finally, the
argument can be a permutation matrix where rows define
permutations. It is possible to use external or user constructed
permutations.
See \code{help(permutations)} for a brief introduction on
permutations in \pkg{vegan}, and \pkg{permute} package for the
full documention. The vignette of the \pkg{permute} package can
be read from \pkg{vegan} with command
\code{vegandocs("permutations")}.
The following functions use the \pkg{permute} package:
\code{CCorA}, \code{adonis}, \code{anosim}, \code{anova.cca} (plus
associated \code{permutest.cca}, \code{add1.cca},
\code{drop1.cca}, \code{ordistep}, \code{ordiR2step}),
\code{envfit} (plus associated \code{factorfit} and
\code{vectorfit}), \code{mantel} (and \code{mantel.partial}),
\code{mrpp}, \code{mso}, \code{ordiareatest},
\code{permutest.betadisper}, \code{protest} and \code{simper}.
\item Community null model generation has been completely
redesigned and rewritten. The communities are constructed with
new \code{nullmodel} function and defined in a low level
\code{commsim} function. The actual null models are generated
with a \code{simulate} function that builds an array of null
models. The new null models include a wide array of quantitative
models in addition to the old binary models, and users can plug
in their own generating functions. The basic tool invoking and
analysing null models is \code{oecosimu}. The null models are
often used only for the analysis of nestedness, but the
implementation in \code{oecosimu} allows analysing any
statistic, and null models are better seen as an alternative to
permutation tests.
} %end itemize
} % end general
\subsection{INSTALLATION}{
\itemize{
\item \pkg{vegan} package dependencies and namespace imports
were adapted to changes in \R, and no more trigger warnings and
notes in package tests.
\item Three-dimensional ordination graphics using
\pkg{scatterplot3d} for static plots and \pkg{rgl} for dynamic
plots were removed from \pkg{vegan} and moved to a companion
package \pkg{vegan3d}. The package is available in CRAN.
} %end itemize
} % end installation
\subsection{NEW FUNCTIONS}{
\itemize{
\item Function \code{dispweight} implements dispersion weighting
of Clarke et al. (\emph{Marine Ecology Progress Series}, 320,
11--27). In addition, we implemented a new method for
generalized dispersion weighting \code{gdispweight}. Both
methods downweight species that are significantly
over-dispersed.
\item New \code{hclust} support functions \code{reorder},
\code{rev} and \code{scores}. Functions \code{reorder} and
\code{rev} are similar as these functions for \code{dendrogram}
objects in base \R. However, \code{reorder} can use (and defaults
to) weighted mean. In weighted mean the node average is always the
mean of member leaves, whereas the \code{dendrogram} uses always
unweighted means of joined branches.
\item Function \code{ordiareatest} supplements \code{ordihull} and
\code{ordiellipse} and provides a randomization test for the
one-sided alternative hypothesis that convex hulls or ellipses in
two-dimensional ordination space have smaller areas than with
randomized groups.
\item Function \code{permustats} extracts and inspects permutation
results with support functions \code{summary}, \code{density},
\code{densityplot}, \code{qqnorm} and \code{qqmath}. The
\code{density} and \code{qqnorm} are standard \R{} tools that only
work with one statistic, and \code{densityplot} and \code{qqmath}
are \pkg{lattice} graphics that work with univariate and
multivariate statistics. The results of following functions can be
extracted: \code{anosim}, \code{adonis}, \code{mantel} (and
\code{mantel.partial}), \code{mrpp}, \code{oecosimu},
\code{permustest.cca} (but not the corresponding \code{anova}
methods), \code{permutest.betadisper}, and \code{protest}.
\item \code{stressplot} functions display the ordination distances
at given number of dimensions against original distances. The
method functins are similar to \code{stressplot} for
\code{metaMDS}, and always use the inherent distances of each
ordination method. The functions are available for the results
\code{capscale}, \code{cca}, \code{princomp}, \code{prcomp},
\code{rda}, and \code{wcmdscale}.
} % end itemize
} % end new functions
\subsection{BUG FIXES}{
\itemize{
\item \code{cascadeKM} of only one group will be \code{NA} instead
of a random value.
\item \code{ordiellipse} can handle points exactly on a line,
including only two points (with a warning).
\item plotting \code{radfit} results for several species failed if
any of the communities had no species or had only one species.
\item \code{RsquareAdj} for \code{capscale} with negative
eigenvalues will now report \code{NA} instead of using biased
method of \code{rda} results.
\item \code{simper} failed when a group had only a single member.
}% end itemize
} % end bug fixes
\subsection{NEW FEATURES}{
\itemize{
\item \code{anova.cca} functions were re-written to use the
\pkg{permute} package. Old results may not be exactly
reproduced, and models with missing data may fail in several
cases. There is a new option of analysing a sequence of models
against each other.
\item \code{simulate} functions for \code{cca} and \code{rda}
can return several simulations in a \code{nullmodel} compatible
object. The functions can produce simulations with correlated
errors (also for \code{capscale}) in parametric simulation with
Gaussian error.
\item \code{bioenv} can use Manhattan, Gower and Mahalanobis
distances in addition to the default Euclidean. New helper
function \code{bioenvdist} can extract the dissimilarities
applied in best model or any other model.
\item \code{metaMDS(..., trace = 2)} will show convergence
information with the default \code{monoMDS} engine.
\item Function \code{MDSrotate} can rotate a \eqn{k}-dimensional
ordination to \eqn{k-1} variables. When these variables are
correlated (like usually is the case), the vectors can also be
correlated to previously rotated dimensions, but will be
uncorrelated to all later ones.
\item \pkg{vegan} 2.0-10 changed the weighted \code{nestednodf}
so that weighted analysis of binary data was equivalent to
binary analysis. However, this broke the equivalence to the
original method. Now the function has an argument \code{wbinary}
to select the method of analysis. The problem was reported and a
fix submitted by Vanderlei Debastiani (Universidade Federal do
Rio Grande do Sul, Brasil).
\item \code{ordiellipse}, \code{ordihull} and \code{ordiellipse}
can handle missing values in \code{groups}.
\item \code{ordispider} can now use spatial medians instead of
means.
\item \code{rankindex} can use Manhattan, Gower and Mahalanobis
distance in addition to the default Euclidean.
\item User can set colours and line types in function
\code{rarecurve} for plotting rarefaction curves.
\item \code{spantree} gained a support function \code{as.hclust}
to change the minimum spanning tree into an \code{hclust} tree.
\item \code{fitspecaccum} can do weighted analysis. Gained
\code{lines} method.
\item Functions for extrapolated number of species or for the size
of species pool using Chao method were modified following Chiu et
al., \emph{Biometrics} 70, 671--682 (2014).
Incidence based \code{specpool} can now use (and defaults to)
small sample correction with number of sites as the sample
size. Function uses basic Chao extrapolation based on the ratio of
singletons and doubletons, but switches now to bias corrected Chao
extrapolation if there are no doubletons (species found
twice). The variance formula for bias corrected Chao was derived
following the supporting
\href{http://onlinelibrary.wiley.com/doi/10.1111/biom.12200/suppinfo}{online material}
and differs slightly from Chiu et al. (2014).
The \code{poolaccum} function was changed similarly, but the small
sample correction is used always.
The abundance based \code{estimateR} uses bias corrected Chao
extrapolation, but earlier it estimated its variance with classic
Chao model. Now we use the widespread
\href{http://viceroy.eeb.uconn.edu/EstimateS/EstimateSPages/EstSUsersGuide/EstimateSUsersGuide.htm#AppendixB}{approximate
equation} for variance.
With these changes these functions are more similar to
\href{http://viceroy.eeb.uconn.edu/EstimateS/EstimateSPages/EstSUsersGuide/EstimateSUsersGuide.htm#AppendixB}{EstimateS}.
\item \code{tabasco} uses now \code{reorder.hclust} for
\code{hclust} object for better ordering than previously when it
cast trees to \code{dendrogram} objects.
\item \code{treedive} and \code{treedist} default now to
\code{match.force = TRUE} and can be silenced with
\code{verbose = FALSE}.
\item \code{vegdist} gained Mahalanobis distance.
\item Nomenclature updated in plant community data with the help
of \pkg{Taxonstand} and \pkg{taxize} packages. The taxonomy of
the \code{dune} data was adapted to the same sources and APG
III. \code{varespec} and \code{dune} use 8-character names (4
from genus + 4 from species epithet). New data set on
phylogenetic distances for \code{dune} was extracted from Zanne
et al. (\emph{Nature} 506, 89--92; 2014).
\item User configurable plots for \code{rarecurve}.
} %end itemize
} % end new featuresq
\subsection{DEPRECATED AND DEFUNCT}{
\itemize{
\item \code{strata} are deprecated in permutations. It is still
accepted but will be phased out in next releases. Use \code{how}
of \pkg{permute} package.
\item \code{cca}, \code{rda} and \code{capscale} do not return
scores scaled by eigenvalues: use \code{scores} function to
extract scaled results.
\item \code{commsimulator} is deprecated. Replace
\code{commsimulator(x, method)} with
\code{simulate(nullmodel(x, method))}.
\item \code{density} and \code{densityplot} for permutation
results are deprecated: use \code{permustats} with its
\code{density} and \code{densityplot} method.
} %end itemize
} % end deprecated
} % end version 2.2-0
\section{Changes in version 2.0-10}{
\subsection{GENERAL}{
\itemize{
\item This version is adapted to the changes in \pkg{permute}
package version 0.8-0 and no more triggers NOTEs in package
checks. This release may be the last of the 2.0 series, and the
next \pkg{vegan} release is scheduled to be a major release with
newly designed \code{oecosimu} and community pattern simulation,
support for parallel processing, and full support of the
\pkg{permute} package. If you are interested in these
developments, you may try the development versions of
\pkg{vegan} in
\href{http://r-forge.r-project.org/projects/vegan/}{R-Forge} or
\href{https://github.com/jarioksa/vegan}{GitHub} and report the
problems and user experience to us. } } % end general
\subsection{BUG FIXES}{
\itemize{
\item \code{envfit} function assumed that all external variables
were either numeric or factors, and failed if they were, say,
character strings. Now only numeric variables are taken as
continuous vectors, and all other variables (character strings,
logical) are coerced to factors if possible. The function also
should work with degenerate data, like only one level of a
factor or a constant value of a continuous environmental
variable. The ties were wrongly in assessing permutation
\eqn{P}-values in \code{vectorfit}.
\item \code{nestednodf} with quantitative data was not
consistent with binary models, and the fill was wrongly
calculated with quantitative data.
\item \code{oecosimu} now correctly adapts displayed quantiles
of simulated values to the \code{alternative} test direction.
\item \code{renyiaccum} plotting failed if only one level of
diversity \code{scale} was used.
}
} % bug fixes
\subsection{NEW FEATURES}{
\itemize{
\item The Kempton and Taylor algorithm was found unreliable in
\code{fisherfit} and \code{fisher.alpha}, and now the estimation
of Fisher \eqn{\alpha}{alpha} is only based on the number of
species and the number of individuals. The estimation of
standard errors and profile confidence intervals also had to be
scrapped.
\item \code{renyiaccum}, \code{specaccum} and
\code{tsallisaccum} functions gained \code{subset} argument.
\item \code{renyiaccum} can now add a \code{collector} curve to
to the analysis. The collector curve is the diversity
accumulation in the order of the sampling units. With an
interesting ordering or sampling units this allows comparing
actual species accumulations with the expected randomized
accumulation.
\item \code{specaccum} can now perform weighted accumulation
using the sampling effort as weights.
}
} % new features
} % end 2.0-10
\section{Changes in version 2.0-9}{
\itemize{
\item This version is released due to changes in programming
interface and testing procedures in \R{} 3.0.2. If you are using an
older version of \R, there is no need to upgrade \pkg{vegan}. There
are no new features nor bug fixes. The only user-visible changes
are in documentation and in output messages and formatting. Because
of \R{} changes, this version is dependent on \R{} version 2.14.0
or newer and on \pkg{lattice} package.
}
}
\section{Changes in version 2.0-8}{
\subsection{GENERAL}{
\itemize{
\item This is a maintenance release that fixes some issues
raised by changed in \R{} toolset for processing vignettes. In
the same we also fix some typographic issues in the vignettes.
}
} % general
\subsection{NEW FEATURES}{
\itemize{
\item \code{ordisurf} gained new arguments for more flexible
definition of fitted models to better utilize the
\pkg{mgcv}\code{::gam} function.
The linewidth of contours can
now be set with the argument \code{lwd}.
\item Labels to arrows are positioned in a better way in
\code{plot} functions for the results of \code{envfit},
\code{cca}, \code{rda} and \code{capscale}. The labels should no
longer overlap the arrow tips.
\item The setting test direction is clearer in \code{oecosimu}.
\item \code{ordipointlabel} gained a \code{plot} method that can
be used to replot the saved result.
}
} % new features
}
\section{Changes in version 2.0-7}{
\subsection{NEW FUNCTIONS}{
\itemize{
\item \code{tabasco()} is a new function for graphical display
of community data matrix. Technically it is an interface to \R
\code{heatmap}, but its use is closer to \pkg{vegan} function
\code{vegemite}. The function can reorder the community data
matrix similarly as \code{vegemite}, for instance, by ordination
results. Unlike \code{heatmap}, it only displays dendrograms if
supplied by the user, and it defaults to re-order the
dendrograms by correspondence analysis. Species are ordered to
match site ordering or like determined by the user.
}
} % new functions
\subsection{BUG FIXES}{
\itemize{
\item Function \code{fitspecaccum(..., model = "asymp")} fitted
logistic model instead of asymptotic model (or the same as
\code{model = "logis"}).
\item \code{nestedtemp()} failed with very sparse data (fill
\eqn{< 0.38}\%).
}
} % bug fixes
\subsection{NEW FEATURES}{
\itemize{
\item The \code{plot} function for constrained ordination
results (\code{cca}, \code{rda}, \code{capscale}) gained
argument \code{axis.bp} (defaults \code{TRUE}) which can be used
to suppress axis scale for biplot arrays.
\item Number of iterations in nonmetric multidimensional scaling
(NMDS) can be set with keyword \code{maxit} (defaults
\code{200}) in \code{metaMDS}.
}
} % new features
\subsection{DEPRECATED}{
\itemize{
\item The result objects of \code{cca}, \code{rda} and
\code{capscale} will no longer have scores \code{u.eig},
\code{v.eig} and \code{wa.eig} in the future versions of
\pkg{vegan}. This change does not influence normal usage,
because \pkg{vegan} functions do not need these items. However,
external scripts and packages may need changes in the future
versions of \pkg{vegan}.
}
} % deprecated
} % vegan 2.0-7
\section{Changes in version 2.0-6}{
\subsection{BUG FIXES}{
\itemize{
\item The species scores were scaled wrongly in
\code{capscale()}. They were scaled correctly only when Euclidean
distances were used, but usually \code{capscale()} is used with
non-Euclidean distances. Most graphics will change and should be
redone. The change of scaling mainly influences the spread of
species scores with respect to the site scores.
\item Function \code{clamtest()} failed to set the minimum
abundance threshold in some cases. In addition, the output was
wrong when some of the possible species groups were missing. Both
problems were reported by Richard Telford (Bergen, Norway).
\item Plotting an object fitted by \code{envfit()} would fail if
\code{p.max} was used and there were unused levels for one or
more factors. The unused levels could result from deletion of
observations with missing values or simply as the result of
supplying a subset of a larger data set to \code{envfit()}.
\item \code{multipart()} printed wrong information about the
analysis type (but did the analysis correctly). Reported by
Valerie Coudrain.
\item \code{oecosimu()} failed if its \code{nestedfun} returned a
data frame. A more fundamental fix will be in \pkg{vegan} 2.2-0,
where the structure of the \code{oecosimu()} result will change.
\item The plot of two-dimensional \code{procrustes()} solutions
often draw original axes in a wrong angle. The problem was
reported by Elizabeth Ottesen (MIT).
\item Function \code{treedive()} for functional or phylogenetic
diversity did not correctly match the species names between the
community data and species tree when the tree contained species
that did not occur in the data. Related function
\code{treedist()} for phylogenetic distances did not try to match
the names at all.
}
} % bug fixes
\subsection{NEW FEATURES}{
\itemize{
\item The output of \code{capscale()} displays the value of the
additive constant when argument \code{add = TRUE} was used.
\item \code{fitted()} functions for \code{cca()}, \code{rda()} and
\code{capscale()} can now return conditioned (partial) component
of the response: Argument \code{model} gained a new alternative
\code{model = "pCCA"}.
\item \code{dispindmorisita()} output gained a new column for
Chi-squared based probabilities that the null hypothesis (random
distribution) is true.
\item \code{metaMDS()} and \code{monoMDS()} have new default
convergence criteria. Most importantly, scale factor of the
gradient (\code{sfgrmin}) is stricter. The former limit was too
slack with large data sets and iterations stopped early without
getting close to the solution. In addition, \code{scores()}
ignore now requests to dimensions beyond those calculated
instead of failing, and \code{scores()} for \code{metaMDS()}
results do not drop dimensions.
\item \code{msoplot()} gained \code{legend} argument for
positioning the legend.
\item Nestedness function \code{nestednodf()} gained a \code{plot}
method.
\item \code{ordiR2step()} gained new argument \code{R2scope}
(defaults \code{TRUE}) which can be used to turn off the criterion
of stopping when the adjusted \eqn{R^2}{R-squared} of the current
model exceeds that of the scope. This option allows model
building when the \code{scope} would be overdetermined (number of
predictors higher than number of observations).
\code{ordiR2step()} now handles partial redundancy analysis
(pRDA).
\item \code{orditorp()} gained argument \code{select} to select
the rows or columns of the results to display.
\item \code{protest()} prints the standardized residual statistic
\eqn{m_{12}^2}{squared m12} in addition to the squared Procrustes
correlation \eqn{R^2}{R-squared}. Both were calculated, but only
the latter was displayed.
Permutation tests are much faster in \code{protest()}. Instead
of calling repeatedly \code{procrustes()}, the goodness of fit
statistic is evaluated within the function.
\item \code{wcmdscale()} gained methods for \code{print},
\code{plot} etc. of the results. These methods are only used if
the full \code{wcmdscale} result is returned with, e.g., argument
\code{eig = TRUE}. The default is still to return only a matrix of
scores similarly as the standard \R function \code{cmdscale()},
and in that case the new methods are not used.
}
} % new features
} % end 2.0-6
\section{Changes in version 2.0-5}{
\subsection{BUG FIXES}{
\itemize{
\item \code{anova(<cca_object>, ...)} failed with
\code{by = "axis"} and \code{by = "term"}. The bug was reported by
Dr Sven Neulinger (Christian Albrecht University, Kiel, Germany).
\item \code{radlattice} did not honour argument \code{BIC = TRUE},
but always displayed AIC.
}
} % bug fixes
\subsection{NEW FUNCTIONS}{
\itemize{
\item Most \pkg{vegan} functions with permutation tests have now a
\code{density} method that can be used to find empirical
probability distributions of permutations. There is a new
\code{plot} method for these functions that displays both the
density and the observed statistic. The \code{density} function is
available for \code{adonis}, \code{anosim}, \code{mantel},
\code{mantel.partial}, \code{mrpp}, \code{permutest.cca} and
\code{procrustes}.
Function \code{adonis} can return several statistics, and it has
now a \code{densityplot} method (based on \pkg{lattice}).
Function \code{oecosimu} already had \code{density} and
\code{densityplot}, but they are now similar to other \pkg{vegan}
methods, and also work with \code{adipart}, \code{hiersimu} and
\code{multipart}.
\item \code{radfit} functions got a \code{predict} method that
also accepts arguments \code{newdata} and \code{total} for new
ranks and site totals for prediction. The functions can also
interpolate to non-integer \dQuote{ranks}, and in some models
also extrapolate.
}
} % new functions
\subsection{NEW FEATURES}{
\itemize{
\item Labels can now be set in the \code{plot} of \code{envfit}
results. The labels must be given in the same order that the
function uses internally, and new support function \code{labels}
can be used to display the default labels in their correct order.
\item Mantel tests (functions \code{mantel} and
\code{mantel.partial}) gained argument \code{na.rm} which can be
used to remove missing values. This options should be used with
care: Permutation tests can be biased if the missing values were
originally in matching or fixed positions.
\item \code{radfit} results can be consistently accessed with
the same methods whether they were a single model for a single
site, all models for a single site or all models for all sites
in the data. All functions now have methods \code{AIC},
\code{coef}, \code{deviance}, \code{logLik}, \code{fitted},
\code{predict} and \code{residuals}.
}
} % new features
\subsection{INSTALLATION AND BUILDING}{
\itemize{
\item Building of \pkg{vegan} vignettes failed with the latest
version of LaTeX (TeXLive 2012).
\item \R{} versions later than 2.15-1 (including development
version) report warnings and errors when installing and checking
\pkg{vegan}, and you must upgrade \pkg{vegan} to this version.
The warnings concern functions \code{cIndexKM} and
\code{betadisper}, and the error occurs in \code{betadisper}.
These errors and warnings were triggered by internal changes in
\R.
}
} % installation and building
} % version 2.0-5
\section{Changes in version 2.0-4}{
\subsection{BUG FIXES}{
\itemize{
\item \code{adipart} assumed constant gamma diversity in
simulations when assessing the \eqn{P}-value. This could give
biased results if the null model produces variable gamma
diversities and option \code{weights = "prop"} is used. The
default null model (\code{"r2dtable"}) and the default option
(\code{weights = "unif"}) were analysed correctly.
\item \code{anova(<prc-object>, by = "axis")} and other
\code{by} cases failed due to \file{NAMESPACE} issues.
\item \code{clamtest} wrongly used frequencies instead of the
counts when calculating sample coverage. No detectable
differences were produced when rerunning examples from Chazdon
et al. 2011 and \pkg{vegan} help page.
\item \code{envfit} failed with unused factor levels.
\item \code{predict} for \code{cca} results with
\code{type = "response"} or \code{type = "working"} failed with
\code{newdata} if the number of rows did not match with the
original data. Now the \code{newdata} is ignored if it has a
wrong number of rows. The number of rows must match because
the results in \code{cca} must be weighted by original row
totals. The problem did not concern \code{rda} or
\code{capscale} results which do not need row weights.
Reported by Glenn De'ath.
}
}% end bug fixes
\subsection{NEW FEATURES}{
\itemize{
\item Functions for diversity partitioning (\code{adipart},
\code{hiersimu} and \code{multipart}) have now \code{formula}
and \code{default} methods. The \code{formula} method is
identical to the previous functions, but the \code{default}
method can take two matrices as input.
Functions \code{adipart} and \code{multipart} can be used for
fast and easy overall partitioning to alpha, beta and gamma
diversities by omitting the argument describing the hierarchy.
\item The method in \code{betadisper} is biased with small
sample sizes. The effects of the bias are strongest with
unequal sample sizes. A bias adjusted version was developed by
Adrian Stier and Ben Bolker, and can be invoked with argument
\code{bias.adjust} (defaults to \code{FALSE}).
\item \code{bioenv} accepts dissimilarities (or square matrices
that can be interpreted as dissimilarities) as an alternative to
community data. This allows using other dissimilarities than
those available in \code{vegdist}.
\item \code{plot} function for \code{envfit} results gained new
argument \code{bg} that can be used to set background colour for
plotted labels.
\item \code{msoplot} is more configurable, and allows, for
instance, setting y-axis limits.
\item Hulls and ellipses are now filled using semitransparent
colours in \code{ordihull} and \code{ordiellipse}, and the
user can set the degree of transparency with a new argument
\code{alpha}. The filled shapes are used when these functions
are called with argument \code{draw = "polygon"}. Function
\code{ordihull} puts labels (with argument \code{label = TRUE})
now in the real polygon centre.
\item \code{ordiplot3d} returns function \code{envfit.convert}
and the projected location of the \code{origin}. Together
these can be used to add \code{envfit} results to existing
\code{ordiplot3d} plots.
Equal aspect ratio cannot be set exactly in \code{ordiplot3d}
because underlying core routines do not allow this. Now
\code{ordiplot3d} sets equal axis ranges, and the documents
urge users to verify that the aspect ratio is reasonably equal
and the graph looks like a cube. If the problems cannot be
solved in the future, \code{ordiplot3d} may be removed from
next releases of \pkg{vegan}.
\item Function \code{ordipointlabel} gained argument to
\code{select} only some of the items for plotting. The
argument can be used only with one set of points.
}
} % end new features
}%end version 2.0-4
\section{Changes in version 2.0-3}{
\subsection{NEW FUNCTIONS}{
\itemize{
\item Added new nestedness functions \code{nestedbetasor} and
\code{nestedbetajac} that implement multiple-site dissimilarity
indices and their decomposition into turnover and nestedness
components following Baselga (\emph{Global Ecology and
Biogeography} 19, 134--143; 2010).
\item Added function \code{rarecurve} to draw rarefaction curves
for each row (sampling unit) of the input data, optionally with
lines showing rarefied species richness with given sample size
for each curve.
\item Added function \code{simper} that implements
\dQuote{similarity percentages} of Clarke (\emph{Australian
Journal of Ecology} 18, 117--143; 1993). The method compares
two or more groups and decomposes the average between-group
Bray-Curtis dissimilarity index to contributions by individual
species. The code was developed in
\href{https://github.com/jarioksa/vegan}{GitHub}
by Eduard Szöcs (Uni Landau, Germany).
}
} % end new functions
\subsection{BUG FIXES}{
\itemize{
\item \code{betadisper()} failed when the \code{groups} was a
factor with empty levels.
\item Some constrained ordination methods and their support
functions are more robust in border cases (completely aliased
effects, saturated models, user requests for non-existng scores
etc). Concerns \code{capscale}, \code{ordistep}, \code{varpart},
\code{plot} function for constrained ordination, and
\code{anova(<cca.object>, by = "margin")}.
\item The \code{scores} function for \code{monoMDS} did not
honour \code{choices} argument and hence dimensions could not be
chosen in \code{plot}.
\item The default \code{scores} method failed if the number of
requested axes was higher than the ordination object had. This
was reported as an error in \code{ordiplot} in
\href{https://stat.ethz.ch/pipermail/r-sig-ecology/2012-February/002768.html}{R-sig-ecology} mailing list.
}
} % end bug fixes
\subsection{NEW FEATURES}{
\itemize{
\item \code{metaMDS} argument \code{noshare = 0} is now
regarded as a numeric threshold that always triggers extended
dissimilarities (\code{stepacross}), instead of being treated
as synonymous with \code{noshare = FALSE} which always
suppresses extended dissimilarities.
\item Nestedness discrepancy index \code{nesteddisc} gained a
new argument that allows user to set the number of iterations
in optimizing the index.
\item \code{oecosimu} displays the mean of simulations and
describes alternative hypothesis more clearly in the printed
output.
\item Implemented adjusted \eqn{R^2}{R-squared} for partial
RDA. For partial model \code{rda(Y ~ X1 + Condition(X2))} this
is the same as the component \code{[a] = X1|X2} in variance
partition in \code{varpart} and describes the marginal (unique)
effect of constraining term to adjusted \eqn{R^2}{R-squared}.
\item Added Cao dissimilarity (CYd) as a new dissimilarity
method in \code{vegdist} following Cao et al., \emph{Water
Envir Res} 69, 95--106 (1997). The index should be good for
data with high beta diversity and variable sampling
intensity. Thanks to consultation to Yong Cao (Univ Illinois,
USA).
}
} % end new features
} % end version 2.0-3
\section{Changes in version 2.0-2}{
\subsection{BUG FIXES}{
\itemize{
\item Function \code{capscale} failed if constrained component
had zero rank. This happened most likely in partial models when
the conditions aliased constraints. The problem was observed in
\code{anova(..., by ="margin")} which uses partial models to
analyses the marginal effects, and was reported in an email
message to
\href{https://stat.ethz.ch/pipermail/r-help/2011-October/293077.html}{R-News
mailing list}.
\item \code{stressplot} and \code{goodness} sometimes failed when
\code{metaMDS} was based on \code{isoMDS} (\pkg{MASS} package)
because \code{metaMDSdist} did not use the same defaults for
step-across (extended) dissimilarities as
\code{metaMDS(..., engine = "isoMDS")}. The change of defaults can
also influence triggering of step-across in
\code{capscale(..., metaMDSdist = TRUE)}.
\item \code{adonis} contained a minor bug resulting from
incomplete implementation of a speed-up that did not affect the
results. In fixing this bug, a further bug was identified in
transposing the hat matrices. This second bug was only active
following fixing of the first bug. In fixing both bugs, a
speed-up in the internal f.test() function is fully
realised. Reported by Nicholas Lewin-Koh.
}
} % end bug fixes
\subsection{NEW FEATURES}{
\itemize{
\item \code{ordiarrows} and \code{ordisegments} gained argument
\code{order.by} that gives a variable to sort points within
\code{groups}. Earlier the points were assumed to be in order.
\item Function \code{ordispider} invisibly returns the
coordinates to which the points were connected. Typically these
are class centroids of each point, but for constrained ordination
with no \code{groups} they are the LC scores.
}
} %end new features
} %end version 2.0-2
\section{Changes in version 2.0-1}{
\subsection{NEW FUNCTIONS}{
\itemize{
\item \code{clamtest}: new function to classify species as
generalists and specialists in two distinct habitats (CLAM test of
Chazdon et al., \emph{Ecology} 92, 1332--1343; 2011). The test is
based on multinomial distribution of individuals in two habitat
types or sampling units, and it is applicable only to count data
with no over-dispersion.
\item \code{as.preston} gained \code{plot} and \code{lines}
methods, and \code{as.fisher} gained \code{plot} method (which
also can add items to existing plots). These are similar as
\code{plot} and \code{lines} for \code{prestonfit} and
\code{fisherfit}, but display only data without the fitted lines.
\item \code{raupcrick}: new function to implement Raup-Crick
dissimilarity as a probability of number of co-occurring species
with occurrence probabilities proportional to species
frequencies. \pkg{Vegan} has Raup-Crick index as a choice in
\code{vegdist}, but that uses equal sampling probabilities for
species and analytic equations. The new \code{raupcrick}
function uses simulation with \code{oecosimu}. The function
follows Chase et al. (2011) \emph{Ecosphere} 2:art24
[\href{http://www.esajournals.org/doi/abs/10.1890/ES10-00117.1}{doi:10.1890/ES10-00117.1}],
and was developed with the consultation of Brian Inouye.
}
} % end NEW FUNCTIONS
\subsection{BUG FIXES}{
\itemize{
\item Function \code{meandist} could scramble items and give
wrong results, especially when the \code{grouping} was
numerical. The problem was reported by Dr Miguel Alvarez
(Univ. Bonn).
\item \code{metaMDS} did not reset \code{tries} when a new model
was started with a \code{previous.best} solution from a different
model.
\item Function \code{permatswap} for community null models using
quantitative swap never swapped items in a \eqn{2 \times 2}{2 by 2}
submatrix if all cells were filled.
\item The result from \code{permutest.cca} could not be
\code{update}d because of a \file{NAMESPACE} issue.
\item \R 2.14.0 changed so that it does not accept using
\code{sd()} function for matrices (which was the behaviour at
least since \R 1.0-0), and several \pkg{vegan} functions were
changed to adapt to this change (\code{rda}, \code{capscale},
\code{simulate} methods for \code{rda}, \code{cca} and
\code{capscale}). The change in \R 2.14.0 does not influence the
results but you probably wish to upgrade \pkg{vegan} to avoid
annoying warnings.
}
} % end BUG FIXES
\subsection{ANALYSES}{
\itemize{
\item \code{nesteddisc} is slacker and hence faster when trying
to optimize the statistic for tied column frequencies. Tracing
showed that in most cases an improved ordering was found rather
early in tries, and the results are equally good in most cases.
}
} % end ANALYSES
} % end version 2.0-1
\section{Changes in version 2.0-0}{
\subsection{GENERAL}{
\itemize{
\item Peter Minchin joins the \pkg{vegan} team.
\item \pkg{vegan} implements standard \R \file{NAMESPACE}. In
general, \code{S3} methods are not exported which means that you
cannot directly use or see contents of functions like
\code{cca.default}, \code{plot.cca} or \code{anova.ccabyterm}. To
use these functions you should rely on \R delegation and simply
use \code{cca} and for its result objects use \code{plot} and
\code{anova} without suffix \code{.cca}. To see the contents of
the function you can use \code{:::}, such as
\code{vegan:::cca.default}. This change may break packages,
documents or scripts that rely on non-exported names.
\item \pkg{vegan} depends on the \pkg{permute} package. This
package provides powerful tools for restricted permutation
schemes. All \pkg{vegan} permutation will gradually move to use
\pkg{permute}, but currently only \code{betadisper} uses the new
feature.
}
} % end GENERAL
\subsection{NEW FUNCTIONS}{
\itemize{
\item \code{monoMDS}: a new function for non-metric
multidimensional scaling (NMDS). This function replaces
\code{MASS::isoMDS} as the default method in
\code{metaMDS}. Major advantages of \code{monoMDS} are that it
has \sQuote{weak} (\sQuote{primary}) tie treatment which means
that it can split tied observed dissimilarities. \sQuote{Weak}
tie treatment improves ordination of heterogeneous data sets,
because maximum dissimilarities of \eqn{1} can be split. In
addition to global NMDS, \code{monoMDS} can perform local and
hybrid NMDS and metric MDS. It can also handle missing and zero
dissimilarities. Moreover, \code{monoMDS} is faster than
previous alternatives. The function uses \code{Fortran} code
written by Peter Minchin.
\item \code{MDSrotate} a new function to replace
\code{metaMDSrotate}. This function can rotate both \code{metaMDS}
and \code{monoMDS} results so that the first axis is parallel to
an environmental vector.
\item \code{eventstar} finds the minimum of the evenness profile
on the Tsallis entropy, and uses this to find the corresponding
values of diversity, evenness and numbers equivalent following
Mendes et al. (\emph{Ecography} 31, 450-456; 2008). The code was
contributed by Eduardo Ribeira Cunha and Heloisa Beatriz Antoniazi
Evangelista and adapted to \pkg{vegan} by Peter Solymos.
\item \code{fitspecaccum} fits non-linear regression models to
the species accumulation results from \code{specaccum}. The
function can use new self-starting species accumulation models
in \pkg{vegan} or other self-starting non-linear regression
models in \R. The function can fit Arrhenius, Gleason, Gitay,
Lomolino (in \pkg{vegan}), asymptotic, Gompertz,
Michaelis-Menten, logistic and Weibull (in base \R) models. The
function has \code{plot} and \code{predict} methods.
\item Self-starting non-linear species accumulation models
\code{SSarrhenius}, \code{SSgleason}, \code{SSgitay} and
\code{SSlomolino}. These can be used with \code{fitspecaccum} or
directly in non-linear regression with \code{nls}. These functions
were implemented because they were found good for species-area
models by Dengler (\emph{J. Biogeogr.} 36, 728-744; 2009).
}
} % end NEW FUNCTIONS
\subsection{NEW FEATURES}{
\itemize{
\item \code{adonis}, \code{anosim}, \code{meandist} and
\code{mrpp} warn on negative dissimilarities, and
\code{betadisper} refuses to analyse them. All these functions
expect dissimilarities, and giving something else (like
correlations) probably is a user error.
\item \code{betadisper} uses restricted permutation of the
\pkg{permute} package.
\item \code{metaMDS} uses \code{monoMDS} as its default ordination
engine. Function gains new argument \code{engine} that can be used
to alternatively select \code{MASS::isoMDS}. The default is not
to use \code{stepacross} with \code{monoMDS} because its
\sQuote{weak} tie treatment can cope with tied maximum
dissimilarities of one. However, \code{stepacross} is the default
with \code{isoMDS} because it cannot handle adequately these tied
maximum dissimilarities.
\item \code{specaccum} gained \code{predict} method which uses
either linear or spline interpolation for data between observed
points. Extrapolation is possible with spline interpolation, but
may make little sense.
\item \code{specpool} can handle missing values or empty factor
levels in the grouping factor \code{pool}. Now also checks that
the length of the \code{pool} matches the number of
observations.
}
} % end NEW FEATURES
\subsection{DEPRECATED AND DEFUNCT}{
\itemize{
\item \code{metaMDSrotate} was replaced with \code{MDSrotate}
that can also handle the results of \code{monoMDS}.
\item \code{permuted.index2} and other \dQuote{new} permutation
code was removed in favour of the \pkg{permute} package. This code
was not intended for normal use, but packages depending on that
code in \pkg{vegan} should instead depend on \pkg{permute}.
}
} % end DEPRECATED
\subsection{ANALYSES}{
\itemize{
\item \code{treeheight} uses much snappier code. The results
should be unchanged.
}
} % end ANALYSES
}% end VERSION 2.0
|
age <- c(4, 8, 7, 12, 6, 9, 10, 14, 7)
gender <- as.factor(c(1, 0, 1, 1, 1, 0, 1, 0, 0))
bmi_p <- c(0.86, 0.45, 0.99, 0.84, 0.85, 0.67, 0.91, 0.29, 0.88)
m_edu <- as.factor(c(0, 1, 1, 2, 2, 3, 2, 0, 1))
p_edu <- as.factor(c(0, 2, 2, 2, 2, 3, 2, 0, 0))
f_color <- as.factor(c("blue", "blue", "yellow", "red", "red", "yellow",
"yellow", "red", "yellow"))
asthma <- c(1, 1, 0, 1, 0, 0, 0, 1, 1)
xfactors <- model.matrix(asthma ~ gender + m_edu + p_edu + f_color)[, -1] #we use model matrix to encode the categorical features
x<- as.matrix(data.frame(age, bmi_p, xfactors))
# Note alpha=1 for lasso only and can blend with ridge penalty down to
# alpha=0 ridge only.
glmmod <- glmnet(x, y=as.factor(asthma), alpha=1, family="binomial")
# Plot variable coefficients vs. shrinkage parameter lambda.
plot(glmmod, xvar="lambda")
| /Lab1/dummy_coding.R | permissive | quartermaine/Introduction-to-Machine-Learning | R | false | false | 888 | r | age <- c(4, 8, 7, 12, 6, 9, 10, 14, 7)
gender <- as.factor(c(1, 0, 1, 1, 1, 0, 1, 0, 0))
bmi_p <- c(0.86, 0.45, 0.99, 0.84, 0.85, 0.67, 0.91, 0.29, 0.88)
m_edu <- as.factor(c(0, 1, 1, 2, 2, 3, 2, 0, 1))
p_edu <- as.factor(c(0, 2, 2, 2, 2, 3, 2, 0, 0))
f_color <- as.factor(c("blue", "blue", "yellow", "red", "red", "yellow",
"yellow", "red", "yellow"))
asthma <- c(1, 1, 0, 1, 0, 0, 0, 1, 1)
xfactors <- model.matrix(asthma ~ gender + m_edu + p_edu + f_color)[, -1] #we use model matrix to encode the categorical features
x<- as.matrix(data.frame(age, bmi_p, xfactors))
# Note alpha=1 for lasso only and can blend with ridge penalty down to
# alpha=0 ridge only.
glmmod <- glmnet(x, y=as.factor(asthma), alpha=1, family="binomial")
# Plot variable coefficients vs. shrinkage parameter lambda.
plot(glmmod, xvar="lambda")
|
library(ggplot2)
# download data if needed
if(!file.exists("summarySCC_PM25.rds")) {
fileURL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
download.file(fileURL, destfile = "dataset.zip", method = "curl")
unzip("dataset.zip")
unlink("dataset.zip")
}
# read in data
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
## Across the United States, how have emissions from coal combustion-related
## sources changed from 1999–2008?
# merge NEI and subsetted SCC data
mergeData <- merge(x = NEI, y = SCC, by = "SCC")
# isolate emissions related to coal combustion
coal <- mergeData[grep("Coal", mergeData$SCC.Level.Four), ]
coalcomb <- coal[grep("Combustion", coal$SCC.Level.One), ]
agg <- aggregate(Emissions ~ year, coalcomb, sum)
# plot
ggplot(data = agg, aes(x = factor(year), y = Emissions/1000)) +
geom_bar(stat = "identity", width = 0.6, fill = "gray50") +
geom_text(aes(label = round(Emissions/1000, digits = 2), vjust = 1.5)) +
ggtitle(expression("U.S. Coal Combustion " * PM[2.5] * " Emissions")) +
xlab("Year") + ylab(expression(PM[2.5] * " Emissions (kilotons)"))
# save to png file
dev.copy(png, file = "plot4.png", height = 480, width = 480)
dev.off() | /plot4.R | no_license | gitcub/ExData_Project2 | R | false | false | 1,267 | r | library(ggplot2)
# download data if needed
if(!file.exists("summarySCC_PM25.rds")) {
fileURL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
download.file(fileURL, destfile = "dataset.zip", method = "curl")
unzip("dataset.zip")
unlink("dataset.zip")
}
# read in data
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
## Across the United States, how have emissions from coal combustion-related
## sources changed from 1999–2008?
# merge NEI and subsetted SCC data
mergeData <- merge(x = NEI, y = SCC, by = "SCC")
# isolate emissions related to coal combustion
coal <- mergeData[grep("Coal", mergeData$SCC.Level.Four), ]
coalcomb <- coal[grep("Combustion", coal$SCC.Level.One), ]
agg <- aggregate(Emissions ~ year, coalcomb, sum)
# plot
ggplot(data = agg, aes(x = factor(year), y = Emissions/1000)) +
geom_bar(stat = "identity", width = 0.6, fill = "gray50") +
geom_text(aes(label = round(Emissions/1000, digits = 2), vjust = 1.5)) +
ggtitle(expression("U.S. Coal Combustion " * PM[2.5] * " Emissions")) +
xlab("Year") + ylab(expression(PM[2.5] * " Emissions (kilotons)"))
# save to png file
dev.copy(png, file = "plot4.png", height = 480, width = 480)
dev.off() |
library(ggplot2)
twitter_data = read.csv("twitter-fulldata.csv")
imdb_data = read.csv("imdb-fulldata.csv")
amazon_data = read.csv("amazon-fulldata.csv")
twitter_data_emb = twitter_data[twitter_data$EXTENSION != "nn" , ]
twitter_data_nn = twitter_data[twitter_data$EXTENSION != "emb" , ]
imdb_data_emb = imdb_data[imdb_data$EXTENSION != "nn" , ]
imdb_data_nn = imdb_data[imdb_data$EXTENSION != "emb" , ]
amazon_data_emb = amazon_data[amazon_data$EXTENSION != "nn" , ]
amazon_data_nn = amazon_data[amazon_data$EXTENSION != "emb" , ]
full = rbind(twitter_data, imdb_data, amazon_data)
full_nn = rbind(twitter_data_nn, imdb_data_nn)
full_emb = rbind(twitter_data_emb, imdb_data_emb, amazon_data_emb)
## FULL DATA PLOT (RTP-P)
ggplot(full, aes(x=P, y=RTP, color = D, shape = AT)) +
geom_point(size = 4) +
geom_smooth(method=lm,formula= (y ~ x), se = FALSE, size = 1)+ labs(y = "M", x = "P")+theme_bw()
## RTP - P CNN DATA
ggplot(full_nn, aes(x=P, y=RTP, color = D, shape = AT, size = 3)) +
geom_point(size = 4) +
geom_smooth(method=lm,formula= (y ~ x), se = FALSE, size = 1)+theme_bw()
ggplot(full_emb, aes(x=P, y=RTP, color = D, shape = AT, size = 3)) +
geom_point(size = 4) +
geom_smooth(method=lm,formula= (y ~ x), se = FALSE, size = 1)+ labs(y = "M", x = "P")+theme_bw()
## FULL DATA PLOT (SW-P)
ggplot(full, aes(x=P, y=SW, color = D, shape = AT, size = 3)) +
geom_point(size = 4) +
geom_smooth(method=lm,formula= (y ~ x), se = FALSE, size = 1)+ labs(y = "M", x = "P")+theme_bw()
## FULL DATA PLOT (ACC-P)
ggplot(full, aes(x=P, y=A,color = D, shape = AT, size = 3)) +
geom_point(size = 4) +
geom_smooth(method=lm,formula= (y ~ x), se = FALSE, size = 1)+theme_bw()
## CNN DATA PLOT (ACC-P)
ggplot(full_nn, aes(x=P, y=A, color = D, shape = AT, size = 3)) +
geom_point(size = 4) +
geom_smooth(method=lm,formula= (y ~ x), se = FALSE, size = 1)+theme_bw()
## DENSE DATA PLOT (ACC-P)
ggplot(full_emb, aes(x=P, y=A,color = D, shape = AT, size = 3)) +
geom_point(size = 4) +
geom_smooth(method=lm,formula= (y ~ x), se = FALSE, size = 1)+theme_bw()
| /src/analysis/visualizer_text.R | no_license | roger-creus/Which-Design-Decisions-in-AI-enabled-MobileApplications-Contribute-to-Greener-AI | R | false | false | 2,083 | r | library(ggplot2)
twitter_data = read.csv("twitter-fulldata.csv")
imdb_data = read.csv("imdb-fulldata.csv")
amazon_data = read.csv("amazon-fulldata.csv")
twitter_data_emb = twitter_data[twitter_data$EXTENSION != "nn" , ]
twitter_data_nn = twitter_data[twitter_data$EXTENSION != "emb" , ]
imdb_data_emb = imdb_data[imdb_data$EXTENSION != "nn" , ]
imdb_data_nn = imdb_data[imdb_data$EXTENSION != "emb" , ]
amazon_data_emb = amazon_data[amazon_data$EXTENSION != "nn" , ]
amazon_data_nn = amazon_data[amazon_data$EXTENSION != "emb" , ]
full = rbind(twitter_data, imdb_data, amazon_data)
full_nn = rbind(twitter_data_nn, imdb_data_nn)
full_emb = rbind(twitter_data_emb, imdb_data_emb, amazon_data_emb)
## FULL DATA PLOT (RTP-P)
ggplot(full, aes(x=P, y=RTP, color = D, shape = AT)) +
geom_point(size = 4) +
geom_smooth(method=lm,formula= (y ~ x), se = FALSE, size = 1)+ labs(y = "M", x = "P")+theme_bw()
## RTP - P CNN DATA
ggplot(full_nn, aes(x=P, y=RTP, color = D, shape = AT, size = 3)) +
geom_point(size = 4) +
geom_smooth(method=lm,formula= (y ~ x), se = FALSE, size = 1)+theme_bw()
ggplot(full_emb, aes(x=P, y=RTP, color = D, shape = AT, size = 3)) +
geom_point(size = 4) +
geom_smooth(method=lm,formula= (y ~ x), se = FALSE, size = 1)+ labs(y = "M", x = "P")+theme_bw()
## FULL DATA PLOT (SW-P)
ggplot(full, aes(x=P, y=SW, color = D, shape = AT, size = 3)) +
geom_point(size = 4) +
geom_smooth(method=lm,formula= (y ~ x), se = FALSE, size = 1)+ labs(y = "M", x = "P")+theme_bw()
## FULL DATA PLOT (ACC-P)
ggplot(full, aes(x=P, y=A,color = D, shape = AT, size = 3)) +
geom_point(size = 4) +
geom_smooth(method=lm,formula= (y ~ x), se = FALSE, size = 1)+theme_bw()
## CNN DATA PLOT (ACC-P)
ggplot(full_nn, aes(x=P, y=A, color = D, shape = AT, size = 3)) +
geom_point(size = 4) +
geom_smooth(method=lm,formula= (y ~ x), se = FALSE, size = 1)+theme_bw()
## DENSE DATA PLOT (ACC-P)
ggplot(full_emb, aes(x=P, y=A,color = D, shape = AT, size = 3)) +
geom_point(size = 4) +
geom_smooth(method=lm,formula= (y ~ x), se = FALSE, size = 1)+theme_bw()
|
library(Matrix)
source("formula_parsers.R")
poly_ridge_regression <- function(formula, data, lambda=1) {
obj <- structure(list(), class="poly.ridge.lm")
obj$base_formula <- formula(get_nonspecial_terms(formula))
obj$response_name <- get_response_name(formula)
obj$poly_terms <- parse_special_terms(get_special_terms(formula))
obj$train_matrix <- .poly_ridge_regression_design_matrix(
obj$base_formula, obj$poly_terms, data, lambda
)
obj$train_response <- .poly_ridge_regression_response(
data[, obj$response_name], obj$poly_terms
)
obj$lm <- lm.fit(obj$train_matrix, obj$train_response)
obj$lm$terms <- terms(formula)
class(obj$lm) <- "lm"
obj
}
summary.poly.ridge.lm <- function(obj) {
summary(obj$lm)
}
predict.poly.ridge.lm <- function(obj, newdata) {
n_row <- nrow(newdata)
dm <- .poly_ridge_regression_design_matrix(
obj$base_formula, obj$poly_terms, newdata, lambda=1
)
dm <- dm[1:n_row, ]
coefs <- obj$lm$coefficients
dm %*% coefs
}
# Make a design matrix for polynomial ridge regression.
.poly_ridge_regression_design_matrix <- function(base_formula, poly_terms, data, lambda) {
base_matrix <- model.matrix(base_formula, data)
# Iterively add blocks
if(!is.null(poly_terms)) {
n_col_added_sofar <- 0
n_poly <- length(poly_terms)
for(i in 1:n_poly) {
poly_term <- poly_terms[[i]]
new_blocks <- .make_poly_blocks(
poly_term, data, lambda,
n_col_added_sofar=n_col_added_sofar,
curr_ncol=ncol(base_matrix)
)
side_block <- rbind(new_blocks$pmatrix, new_blocks$sblock)
bottom_block <- cbind(new_blocks$bblock, new_blocks$rmatrix)
base_matrix <- cbind(base_matrix, side_block)
base_matrix <- rbind(base_matrix, bottom_block)
n_col_added_sofar <- n_col_added_sofar + poly_term$cdeg
}
}
base_matrix[, "(Intercept)"] <- 1
base_matrix
}
.poly_ridge_regression_response <- function(raw_response, poly_terms) {
if(is.null(poly_terms)) {
.return <- raw_response
} else {
n_poly <- length(poly_terms)
tot_degree <- 0
for(i in 1:n_poly) {tot_degree <- tot_degree + poly_terms[[i]]$cdeg}
.return <- c(raw_response, rep(0, tot_degree))
}
.return
}
.make_poly_blocks <- function(poly_term, data, lambda, n_col_added_sofar, curr_ncol) {
.return <- list()
vname <- as.character(poly_term$cvar)
vdegree <- poly_term$cdeg
.return$pmatrix <- .make_poly_matrix(data, vname, vdegree)
.return$rmatrix <- .make_shrinkage_matrix(vname, vdegree, lambda)
.return$bblock <- .make_bottom_block(curr_ncol, vdegree)
.return$sblock <- .make_side_block(n_col_added_sofar, vdegree)
.return
}
.make_bottom_block <- function(curr_ncol, degree) {
bm <- rep(0, degree*curr_ncol)
dim(bm) <- c(degree, curr_ncol)
bm
}
.make_side_block <- function(n_col_added_sofar, degree) {
sm <- rep(0, n_col_added_sofar*degree)
dim(sm) <- c(n_col_added_sofar, degree)
sm
}
.make_poly_matrix <- function(data, vname, vdegree) {
pmatrix <- poly(data[, vname], degree=vdegree)
colnames(pmatrix) <- paste(vname, 1:vdegree, sep=".d.")
pmatrix
}
.make_shrinkage_matrix <- function(vname, vdegree, lambda) {
rmatrix <- as.matrix(Diagonal(x=sqrt(rep(lambda, vdegree)*1:vdegree)))
colnames(rmatrix) <- paste(vname, 1:vdegree, sep=".d.")
rmatrix
} | /poly_ridge_regression.R | no_license | madrury/poly-ridge-regressor | R | false | false | 3,323 | r | library(Matrix)
source("formula_parsers.R")
poly_ridge_regression <- function(formula, data, lambda=1) {
obj <- structure(list(), class="poly.ridge.lm")
obj$base_formula <- formula(get_nonspecial_terms(formula))
obj$response_name <- get_response_name(formula)
obj$poly_terms <- parse_special_terms(get_special_terms(formula))
obj$train_matrix <- .poly_ridge_regression_design_matrix(
obj$base_formula, obj$poly_terms, data, lambda
)
obj$train_response <- .poly_ridge_regression_response(
data[, obj$response_name], obj$poly_terms
)
obj$lm <- lm.fit(obj$train_matrix, obj$train_response)
obj$lm$terms <- terms(formula)
class(obj$lm) <- "lm"
obj
}
summary.poly.ridge.lm <- function(obj) {
summary(obj$lm)
}
predict.poly.ridge.lm <- function(obj, newdata) {
n_row <- nrow(newdata)
dm <- .poly_ridge_regression_design_matrix(
obj$base_formula, obj$poly_terms, newdata, lambda=1
)
dm <- dm[1:n_row, ]
coefs <- obj$lm$coefficients
dm %*% coefs
}
# Make a design matrix for polynomial ridge regression.
.poly_ridge_regression_design_matrix <- function(base_formula, poly_terms, data, lambda) {
base_matrix <- model.matrix(base_formula, data)
# Iterively add blocks
if(!is.null(poly_terms)) {
n_col_added_sofar <- 0
n_poly <- length(poly_terms)
for(i in 1:n_poly) {
poly_term <- poly_terms[[i]]
new_blocks <- .make_poly_blocks(
poly_term, data, lambda,
n_col_added_sofar=n_col_added_sofar,
curr_ncol=ncol(base_matrix)
)
side_block <- rbind(new_blocks$pmatrix, new_blocks$sblock)
bottom_block <- cbind(new_blocks$bblock, new_blocks$rmatrix)
base_matrix <- cbind(base_matrix, side_block)
base_matrix <- rbind(base_matrix, bottom_block)
n_col_added_sofar <- n_col_added_sofar + poly_term$cdeg
}
}
base_matrix[, "(Intercept)"] <- 1
base_matrix
}
.poly_ridge_regression_response <- function(raw_response, poly_terms) {
if(is.null(poly_terms)) {
.return <- raw_response
} else {
n_poly <- length(poly_terms)
tot_degree <- 0
for(i in 1:n_poly) {tot_degree <- tot_degree + poly_terms[[i]]$cdeg}
.return <- c(raw_response, rep(0, tot_degree))
}
.return
}
.make_poly_blocks <- function(poly_term, data, lambda, n_col_added_sofar, curr_ncol) {
.return <- list()
vname <- as.character(poly_term$cvar)
vdegree <- poly_term$cdeg
.return$pmatrix <- .make_poly_matrix(data, vname, vdegree)
.return$rmatrix <- .make_shrinkage_matrix(vname, vdegree, lambda)
.return$bblock <- .make_bottom_block(curr_ncol, vdegree)
.return$sblock <- .make_side_block(n_col_added_sofar, vdegree)
.return
}
.make_bottom_block <- function(curr_ncol, degree) {
bm <- rep(0, degree*curr_ncol)
dim(bm) <- c(degree, curr_ncol)
bm
}
.make_side_block <- function(n_col_added_sofar, degree) {
sm <- rep(0, n_col_added_sofar*degree)
dim(sm) <- c(n_col_added_sofar, degree)
sm
}
.make_poly_matrix <- function(data, vname, vdegree) {
pmatrix <- poly(data[, vname], degree=vdegree)
colnames(pmatrix) <- paste(vname, 1:vdegree, sep=".d.")
pmatrix
}
.make_shrinkage_matrix <- function(vname, vdegree, lambda) {
rmatrix <- as.matrix(Diagonal(x=sqrt(rep(lambda, vdegree)*1:vdegree)))
colnames(rmatrix) <- paste(vname, 1:vdegree, sep=".d.")
rmatrix
} |
source("~/Dropbox/Chido/comparaciones/resultados/plotImage_todos_vs_todos(copia)(copia).R")
load("~/Dropbox/Chido/permanencias.RData")
a<-permanencias.perturbacion
perturbaciones.main<-c("Sin perturbar","Perturbando las lluvias (sequías)", "Perturbando a las arveneses (malezas)", "Perturbando a los herbívoros (plagas)")
perturbacion=c("det","precipitacion","arvenses","herbivoros")
niveles=c("0","0.1","0.1","0.1")
#manejo=c("desyer")
manejo=c("desyer", "desyerPlagui", "herb", "plaguiHerb", "Roundup")
#manejo=c("desyer", "desyerPlagui", "herb", "plaguiHerb", "Roundup")
diversidad=c("milpa", "mzcb", "mzfre", "mz", "cb")
nivel=c("1212", "1434", "1767", "110910")
#nivel=c("0","1212", "1434", "1767", "110910")
dimen=(length(diversidad)*length(diversidad))
matrizMZ<-matrix(0,dimen,dimen)
matrizFR<-matrix(0,dimen,dimen)
matrizCB<-matrix(0,dimen,dimen)
matrizQ<-matrix(0,dimen,dimen)
matrizSH<-matrix(0,dimen,dimen)
alfa=1-(1-0.05)^(1/(dimen-1))
zeta=qnorm(1-(alfa/2))
#numero=0
pdf(paste0("~/Dropbox/Chido/comparaciones/resultados/mz_manejo_vs_manejo_119010.pdf"),height=7,width=10)
for(h in 1:length(perturbacion)){
for(j in 1:length(diversidad)){
for(i in 1:length(manejo)){
if(h==1) {nivel=c("0"); level=1}
if(h!=1) {nivel=c("110910"); level=4}
# for(k in level){
# for(m in 1:length(manejo)){
for(n in 1:length(diversidad)){
for(m in 1:length(manejo)){
# if(l==1) {nivel=c("0"); level=1}
# if(l!=1) {nivel=c("110910"); level=4}
# for(o in level){
# numero=numero+1
# print(numero)
b<-(a[[h]][[i]][[j]][[level]]$MzG_MzJ[1]-a[[h]][[m]][[n]][[level]]$MzG_MzJ[1])/sqrt(a[[h]][[i]][[j]][[level]]$MzG_MzJ[2]/100+a[[h]][[m]][[n]][[level]]$MzG_MzJ[2]/100)
# print(b)
if(b!="NaN" & a[[h]][[i]][[j]][[level]]$MzG_MzJ[2]!=0 & a[[h]][[m]][[n]][[level]]$MzG_MzJ[2]!=0){
if(b>zeta | b<(-zeta)){
matrizMZ[length(diversidad)*(j-1)+i, length(diversidad)*(n-1)+m]<-a[[h]][[i]][[j]][[level]]$MzG_MzJ[1]-a[[h]][[m]][[n]][[level]]$MzG_MzJ[1]
}else{
matrizMZ[length(diversidad)*(j-1)+i, length(diversidad)*(n-1)+m]<-0
}
}else{
matrizMZ[length(diversidad)*(j-1)+i, length(diversidad)*(n-1)+m]<-0
}
# }
}
}
# }
# }
}
}
myImagePlot(matrizMZ,paste0("Comparaciones múltiples de la permanencia del maíz\n",perturbaciones.main[h]," Nivel de pert: ",niveles[h]),manejo="otro")
}
dev.off()
pdf(paste0("~/Dropbox/Chido/comparaciones/resultados/fre_manejo_vs_manejo_119010.pdf"),height=7,width=10)
for(h in 1:length(perturbacion)){
for(j in 1:length(diversidad)){
for(i in 1:length(manejo)){
if(h==1) {nivel=c("0"); level=1}
if(h!=1) {nivel=c("110910"); level=4}
# for(k in level){
# for(m in 1:length(manejo)){
for(n in 1:length(diversidad)){
for(m in 1:length(manejo)){
# if(l==1) {nivel=c("0"); level=1}
# if(l!=1) {nivel=c("110910"); level=4}
# for(o in level){
# numero=numero+1
# print(numero)
b<-(a[[h]][[i]][[j]][[level]]$FreG_Fre[1]-a[[h]][[m]][[n]][[level]]$FreG_Fre[1])/sqrt(a[[h]][[i]][[j]][[level]]$FreG_Fre[2]/100+a[[h]][[m]][[n]][[level]]$FreG_Fre[2]/100)
# print(b)
if(b!="NaN" & a[[h]][[i]][[j]][[level]]$FreG_Fre[2]!=0 & a[[h]][[m]][[n]][[level]]$FreG_Fre[2]!=0){
if(b>zeta | b<(-zeta)){
matrizFR[length(diversidad)*(j-1)+i, length(diversidad)*(n-1)+m]<-a[[h]][[i]][[j]][[level]]$FreG_Fre[1]-a[[h]][[m]][[n]][[level]]$FreG_Fre[1]
}else{
matrizFR[length(diversidad)*(j-1)+i, length(diversidad)*(n-1)+m]<-0
}
}else{
matrizFR[length(diversidad)*(j-1)+i, length(diversidad)*(n-1)+m]<-0
}
# }
}
}
# }
# }
}
}
myImagePlot(matrizFR,paste0("Comparaciones múltiples de la permanencia del frijol\n",perturbaciones.main[h]," Nivel de pert: ",niveles[h]),manejo="otro")
}
dev.off()
pdf(paste0("~/Dropbox/Chido/comparaciones/resultados/cb_manejo_vs_manejo_119010.pdf"),height=7,width=10)
for(h in 1:length(perturbacion)){
for(j in 1:length(diversidad)){
for(i in 1:length(manejo)){
if(h==1) {nivel=c("0"); level=1}
if(h!=1) {nivel=c("110910"); level=4}
# for(k in level){
# for(m in 1:length(manejo)){
for(n in 1:length(diversidad)){
for(m in 1:length(manejo)){
# if(l==1) {nivel=c("0"); level=1}
# if(l!=1) {nivel=c("110910"); level=4}
# for(o in level){
# numero=numero+1
# print(numero)
b<-(a[[h]][[i]][[j]][[level]]$CbG_CbJ[1]-a[[h]][[m]][[n]][[level]]$CbG_CbJ[1])/sqrt(a[[h]][[i]][[j]][[level]]$CbG_CbJ[2]/100+a[[h]][[m]][[n]][[level]]$CbG_CbJ[2]/100)
# print(b)
if(b!="NaN" & a[[h]][[i]][[j]][[level]]$CbG_CbJ[2]!=0 & a[[h]][[m]][[n]][[level]]$CbG_CbJ[2]!=0){
if(b>zeta | b<(-zeta)){
matrizCB[length(diversidad)*(j-1)+i, length(diversidad)*(n-1)+m]<-a[[h]][[i]][[j]][[level]]$CbG_CbJ[1]-a[[h]][[m]][[n]][[level]]$CbG_CbJ[1]
}else{
matrizCB[length(diversidad)*(j-1)+i, length(diversidad)*(n-1)+m]<-0
}
}else{
matrizCB[length(diversidad)*(j-1)+i, length(diversidad)*(n-1)+m]<-0
}
# }
}
}
# }
# }
}
}
myImagePlot(matrizCB,paste0("Comparaciones múltiples de la permanencia de la calabaza\n",perturbaciones.main[h]," Nivel de pert: 0.1"),manejo="otro")
}
dev.off()
pdf(paste0("~/Dropbox/Chido/comparaciones/resultados/quel_manejo_vs_manejo_119010.pdf"),height=7,width=10)
for(h in 1:length(perturbacion)){
for(j in 1:length(diversidad)){
for(i in 1:length(manejo)){
if(h==1) {nivel=c("0"); level=1}
if(h!=1) {nivel=c("110910"); level=4}
# for(k in level){
# for(m in 1:length(manejo)){
for(n in 1:length(diversidad)){
for(m in 1:length(manejo)){
# if(l==1) {nivel=c("0"); level=1}
# if(l!=1) {nivel=c("110910"); level=4}
# for(o in level){
# numero=numero+1
# print(numero)
b<-(a[[h]][[i]][[j]][[level]]$Quelites[1]-a[[h]][[m]][[n]][[level]]$Quelites[1])/sqrt(a[[h]][[i]][[j]][[level]]$Quelites[2]/100+a[[h]][[m]][[n]][[level]]$Quelites[2]/100)
# print(b)
if(b!="NaN" & a[[h]][[i]][[j]][[level]]$Quelites[2]!=0 & a[[h]][[m]][[n]][[level]]$Quelites[2]!=0){
if(b>zeta | b<(-zeta)){
matrizQ[length(diversidad)*(j-1)+i, length(diversidad)*(n-1)+m]<-a[[h]][[i]][[j]][[level]]$Quelites[1]-a[[h]][[m]][[n]][[level]]$Quelites[1]
}else{
matrizQ[length(diversidad)*(j-1)+i, length(diversidad)*(n-1)+m]<-0
}
}else{
matrizQ[length(diversidad)*(j-1)+i, length(diversidad)*(n-1)+m]<-0
}
# }
}
}
# }
# }
}
}
myImagePlot(matrizQ,paste0("Comparaciones múltiples de la permanencia de los quelites\n",perturbaciones.main[h]," Nivel de pert: 0.1"),manejo="otro")
}
dev.off()
pdf(paste0("~/Dropbox/Chido/comparaciones/resultados/conj_manejo_vs_manejo_119010.pdf"),height=7,width=10)
for(h in 1:length(perturbacion)){
for(j in 1:length(diversidad)){
for(i in 1:length(manejo)){
if(h==1) {nivel=c("0"); level=1}
if(h!=1) {nivel=c("110910"); level=4}
# for(k in level){
# for(m in 1:length(manejo)){
for(n in 1:length(diversidad)){
for(m in 1:length(manejo)){
# if(l==1) {nivel=c("0"); level=1}
# if(l!=1) {nivel=c("110910"); level=4}
# for(o in level){
# numero=numero+1
# print(numero)
b<-(a[[h]][[i]][[j]][[level]]$conj[1]-a[[h]][[m]][[n]][[level]]$conj[1])/sqrt(a[[h]][[i]][[j]][[level]]$conj[2]/100+a[[h]][[m]][[n]][[level]]$conj[2]/100)
# print(b)
if(b!="NaN" & a[[h]][[i]][[j]][[level]]$conj[2]!=0 & a[[h]][[m]][[n]][[level]]$conj[2]!=0){
if(b>zeta | b<(-zeta)){
matrizSH[length(diversidad)*(j-1)+i, length(diversidad)*(n-1)+m]<-a[[h]][[i]][[j]][[level]]$conj[1]-a[[h]][[m]][[n]][[level]]$conj[1]
}else{
matrizSH[length(diversidad)*(j-1)+i, length(diversidad)*(n-1)+m]<-0
}
}else{
matrizSH[length(diversidad)*(j-1)+i, length(diversidad)*(n-1)+m]<-0
}
# }
}
}
# }
# }
}
}
myImagePlot(matrizSH,paste0("Comparaciones múltiples de la permanencia conjunta\n",perturbaciones.main[h]," Nivel de pert: 0.1"),manejo="otro")
}
dev.off()
#cerosMZ<-which(apply(matrizMZ,1,sum)==0)
#matrizMZ<-abs(matrizMZ[-cerosMZ,-cerosMZ])
#cerosFR<-which(apply(matrizFR,1,sum)==0)
#matrizFR<-abs(matrizFR[-cerosFR,-cerosFR])
#cerosCB<-which(apply(matrizCB,1,sum)==0)
#matrizCB<-abs(matrizCB[-cerosCB,-cerosCB])
##cerosQ<-which(apply(matrizQ,1,sum)==0)
#matrizQ<-abs(matrizQ)#[-cerosQ,-cerosQ])
##cerosSH<-which(apply(matrizSH,1,sum)==0)
#matrizSH<-abs(matrizSH)#[-cerosSH,-cerosSH]
#pdf("~/Dropbox/Chido/comparaciones/resultados/sentido_vs_sentido_110910_vs_0.pdf",height=10, width=16)
#source("~/Dropbox/Chido/comparaciones/resultados/plotImage_filtrado_mz.R")
#myImagePlot(matrizMZ,"Comparaciones múltiples de la permanencia promedio del maíz \n Todas las perturbaciones, nivel=1/2, control determinista",manejo="otro")
#source("~/Dropbox/Chido/comparaciones/resultados/plotImage_filtrado_fre.R")
#myImagePlot(matrizFR,"Comparaciones múltiples de la permanencia promedio del frijol\n Todas las perturbaciones, nivel=1/2, control determinista",manejo="otro")
#source("~/Dropbox/Chido/comparaciones/resultados/plotImage_filtrado_cb.R")
#myImagePlot(matrizCB,"Comparaciones múltiples de la permanencia promedio de la calabaza\n Todas las perturbaciones, nivel=1/2, control determinista",manejo="otro")
#source("~/Dropbox/Chido/comparaciones/resultados/plotImage_todos_vs_todos.R")
#myImagePlot(matrizQ,"Comparaciones múltiples de la permanencia promedio de los quelites\n Todas las perturbaciones, nivel=1/2, control determinista",manejo="otro")
#myImagePlot(matrizSH,"Comparaciones múltiples de la permanencia conjunta\n Todas las perturbaciones, nivel=1/2, control determinista",manejo="otro")
#dev.off()
| /comparaciones/resultados/comparaciones_manejo_vs_manejo_110910.R | no_license | laparcela/modelo_red_booleana_milpa_rafa | R | false | false | 9,953 | r | source("~/Dropbox/Chido/comparaciones/resultados/plotImage_todos_vs_todos(copia)(copia).R")
load("~/Dropbox/Chido/permanencias.RData")
a<-permanencias.perturbacion
perturbaciones.main<-c("Sin perturbar","Perturbando las lluvias (sequías)", "Perturbando a las arveneses (malezas)", "Perturbando a los herbívoros (plagas)")
perturbacion=c("det","precipitacion","arvenses","herbivoros")
niveles=c("0","0.1","0.1","0.1")
#manejo=c("desyer")
manejo=c("desyer", "desyerPlagui", "herb", "plaguiHerb", "Roundup")
#manejo=c("desyer", "desyerPlagui", "herb", "plaguiHerb", "Roundup")
diversidad=c("milpa", "mzcb", "mzfre", "mz", "cb")
nivel=c("1212", "1434", "1767", "110910")
#nivel=c("0","1212", "1434", "1767", "110910")
dimen=(length(diversidad)*length(diversidad))
matrizMZ<-matrix(0,dimen,dimen)
matrizFR<-matrix(0,dimen,dimen)
matrizCB<-matrix(0,dimen,dimen)
matrizQ<-matrix(0,dimen,dimen)
matrizSH<-matrix(0,dimen,dimen)
alfa=1-(1-0.05)^(1/(dimen-1))
zeta=qnorm(1-(alfa/2))
#numero=0
pdf(paste0("~/Dropbox/Chido/comparaciones/resultados/mz_manejo_vs_manejo_119010.pdf"),height=7,width=10)
for(h in 1:length(perturbacion)){
for(j in 1:length(diversidad)){
for(i in 1:length(manejo)){
if(h==1) {nivel=c("0"); level=1}
if(h!=1) {nivel=c("110910"); level=4}
# for(k in level){
# for(m in 1:length(manejo)){
for(n in 1:length(diversidad)){
for(m in 1:length(manejo)){
# if(l==1) {nivel=c("0"); level=1}
# if(l!=1) {nivel=c("110910"); level=4}
# for(o in level){
# numero=numero+1
# print(numero)
b<-(a[[h]][[i]][[j]][[level]]$MzG_MzJ[1]-a[[h]][[m]][[n]][[level]]$MzG_MzJ[1])/sqrt(a[[h]][[i]][[j]][[level]]$MzG_MzJ[2]/100+a[[h]][[m]][[n]][[level]]$MzG_MzJ[2]/100)
# print(b)
if(b!="NaN" & a[[h]][[i]][[j]][[level]]$MzG_MzJ[2]!=0 & a[[h]][[m]][[n]][[level]]$MzG_MzJ[2]!=0){
if(b>zeta | b<(-zeta)){
matrizMZ[length(diversidad)*(j-1)+i, length(diversidad)*(n-1)+m]<-a[[h]][[i]][[j]][[level]]$MzG_MzJ[1]-a[[h]][[m]][[n]][[level]]$MzG_MzJ[1]
}else{
matrizMZ[length(diversidad)*(j-1)+i, length(diversidad)*(n-1)+m]<-0
}
}else{
matrizMZ[length(diversidad)*(j-1)+i, length(diversidad)*(n-1)+m]<-0
}
# }
}
}
# }
# }
}
}
myImagePlot(matrizMZ,paste0("Comparaciones múltiples de la permanencia del maíz\n",perturbaciones.main[h]," Nivel de pert: ",niveles[h]),manejo="otro")
}
dev.off()
pdf(paste0("~/Dropbox/Chido/comparaciones/resultados/fre_manejo_vs_manejo_119010.pdf"),height=7,width=10)
for(h in 1:length(perturbacion)){
for(j in 1:length(diversidad)){
for(i in 1:length(manejo)){
if(h==1) {nivel=c("0"); level=1}
if(h!=1) {nivel=c("110910"); level=4}
# for(k in level){
# for(m in 1:length(manejo)){
for(n in 1:length(diversidad)){
for(m in 1:length(manejo)){
# if(l==1) {nivel=c("0"); level=1}
# if(l!=1) {nivel=c("110910"); level=4}
# for(o in level){
# numero=numero+1
# print(numero)
b<-(a[[h]][[i]][[j]][[level]]$FreG_Fre[1]-a[[h]][[m]][[n]][[level]]$FreG_Fre[1])/sqrt(a[[h]][[i]][[j]][[level]]$FreG_Fre[2]/100+a[[h]][[m]][[n]][[level]]$FreG_Fre[2]/100)
# print(b)
if(b!="NaN" & a[[h]][[i]][[j]][[level]]$FreG_Fre[2]!=0 & a[[h]][[m]][[n]][[level]]$FreG_Fre[2]!=0){
if(b>zeta | b<(-zeta)){
matrizFR[length(diversidad)*(j-1)+i, length(diversidad)*(n-1)+m]<-a[[h]][[i]][[j]][[level]]$FreG_Fre[1]-a[[h]][[m]][[n]][[level]]$FreG_Fre[1]
}else{
matrizFR[length(diversidad)*(j-1)+i, length(diversidad)*(n-1)+m]<-0
}
}else{
matrizFR[length(diversidad)*(j-1)+i, length(diversidad)*(n-1)+m]<-0
}
# }
}
}
# }
# }
}
}
myImagePlot(matrizFR,paste0("Comparaciones múltiples de la permanencia del frijol\n",perturbaciones.main[h]," Nivel de pert: ",niveles[h]),manejo="otro")
}
dev.off()
pdf(paste0("~/Dropbox/Chido/comparaciones/resultados/cb_manejo_vs_manejo_119010.pdf"),height=7,width=10)
for(h in 1:length(perturbacion)){
for(j in 1:length(diversidad)){
for(i in 1:length(manejo)){
if(h==1) {nivel=c("0"); level=1}
if(h!=1) {nivel=c("110910"); level=4}
# for(k in level){
# for(m in 1:length(manejo)){
for(n in 1:length(diversidad)){
for(m in 1:length(manejo)){
# if(l==1) {nivel=c("0"); level=1}
# if(l!=1) {nivel=c("110910"); level=4}
# for(o in level){
# numero=numero+1
# print(numero)
b<-(a[[h]][[i]][[j]][[level]]$CbG_CbJ[1]-a[[h]][[m]][[n]][[level]]$CbG_CbJ[1])/sqrt(a[[h]][[i]][[j]][[level]]$CbG_CbJ[2]/100+a[[h]][[m]][[n]][[level]]$CbG_CbJ[2]/100)
# print(b)
if(b!="NaN" & a[[h]][[i]][[j]][[level]]$CbG_CbJ[2]!=0 & a[[h]][[m]][[n]][[level]]$CbG_CbJ[2]!=0){
if(b>zeta | b<(-zeta)){
matrizCB[length(diversidad)*(j-1)+i, length(diversidad)*(n-1)+m]<-a[[h]][[i]][[j]][[level]]$CbG_CbJ[1]-a[[h]][[m]][[n]][[level]]$CbG_CbJ[1]
}else{
matrizCB[length(diversidad)*(j-1)+i, length(diversidad)*(n-1)+m]<-0
}
}else{
matrizCB[length(diversidad)*(j-1)+i, length(diversidad)*(n-1)+m]<-0
}
# }
}
}
# }
# }
}
}
myImagePlot(matrizCB,paste0("Comparaciones múltiples de la permanencia de la calabaza\n",perturbaciones.main[h]," Nivel de pert: 0.1"),manejo="otro")
}
dev.off()
pdf(paste0("~/Dropbox/Chido/comparaciones/resultados/quel_manejo_vs_manejo_119010.pdf"),height=7,width=10)
for(h in 1:length(perturbacion)){
for(j in 1:length(diversidad)){
for(i in 1:length(manejo)){
if(h==1) {nivel=c("0"); level=1}
if(h!=1) {nivel=c("110910"); level=4}
# for(k in level){
# for(m in 1:length(manejo)){
for(n in 1:length(diversidad)){
for(m in 1:length(manejo)){
# if(l==1) {nivel=c("0"); level=1}
# if(l!=1) {nivel=c("110910"); level=4}
# for(o in level){
# numero=numero+1
# print(numero)
b<-(a[[h]][[i]][[j]][[level]]$Quelites[1]-a[[h]][[m]][[n]][[level]]$Quelites[1])/sqrt(a[[h]][[i]][[j]][[level]]$Quelites[2]/100+a[[h]][[m]][[n]][[level]]$Quelites[2]/100)
# print(b)
if(b!="NaN" & a[[h]][[i]][[j]][[level]]$Quelites[2]!=0 & a[[h]][[m]][[n]][[level]]$Quelites[2]!=0){
if(b>zeta | b<(-zeta)){
matrizQ[length(diversidad)*(j-1)+i, length(diversidad)*(n-1)+m]<-a[[h]][[i]][[j]][[level]]$Quelites[1]-a[[h]][[m]][[n]][[level]]$Quelites[1]
}else{
matrizQ[length(diversidad)*(j-1)+i, length(diversidad)*(n-1)+m]<-0
}
}else{
matrizQ[length(diversidad)*(j-1)+i, length(diversidad)*(n-1)+m]<-0
}
# }
}
}
# }
# }
}
}
myImagePlot(matrizQ,paste0("Comparaciones múltiples de la permanencia de los quelites\n",perturbaciones.main[h]," Nivel de pert: 0.1"),manejo="otro")
}
dev.off()
pdf(paste0("~/Dropbox/Chido/comparaciones/resultados/conj_manejo_vs_manejo_119010.pdf"),height=7,width=10)
for(h in 1:length(perturbacion)){
for(j in 1:length(diversidad)){
for(i in 1:length(manejo)){
if(h==1) {nivel=c("0"); level=1}
if(h!=1) {nivel=c("110910"); level=4}
# for(k in level){
# for(m in 1:length(manejo)){
for(n in 1:length(diversidad)){
for(m in 1:length(manejo)){
# if(l==1) {nivel=c("0"); level=1}
# if(l!=1) {nivel=c("110910"); level=4}
# for(o in level){
# numero=numero+1
# print(numero)
b<-(a[[h]][[i]][[j]][[level]]$conj[1]-a[[h]][[m]][[n]][[level]]$conj[1])/sqrt(a[[h]][[i]][[j]][[level]]$conj[2]/100+a[[h]][[m]][[n]][[level]]$conj[2]/100)
# print(b)
if(b!="NaN" & a[[h]][[i]][[j]][[level]]$conj[2]!=0 & a[[h]][[m]][[n]][[level]]$conj[2]!=0){
if(b>zeta | b<(-zeta)){
matrizSH[length(diversidad)*(j-1)+i, length(diversidad)*(n-1)+m]<-a[[h]][[i]][[j]][[level]]$conj[1]-a[[h]][[m]][[n]][[level]]$conj[1]
}else{
matrizSH[length(diversidad)*(j-1)+i, length(diversidad)*(n-1)+m]<-0
}
}else{
matrizSH[length(diversidad)*(j-1)+i, length(diversidad)*(n-1)+m]<-0
}
# }
}
}
# }
# }
}
}
myImagePlot(matrizSH,paste0("Comparaciones múltiples de la permanencia conjunta\n",perturbaciones.main[h]," Nivel de pert: 0.1"),manejo="otro")
}
dev.off()
#cerosMZ<-which(apply(matrizMZ,1,sum)==0)
#matrizMZ<-abs(matrizMZ[-cerosMZ,-cerosMZ])
#cerosFR<-which(apply(matrizFR,1,sum)==0)
#matrizFR<-abs(matrizFR[-cerosFR,-cerosFR])
#cerosCB<-which(apply(matrizCB,1,sum)==0)
#matrizCB<-abs(matrizCB[-cerosCB,-cerosCB])
##cerosQ<-which(apply(matrizQ,1,sum)==0)
#matrizQ<-abs(matrizQ)#[-cerosQ,-cerosQ])
##cerosSH<-which(apply(matrizSH,1,sum)==0)
#matrizSH<-abs(matrizSH)#[-cerosSH,-cerosSH]
#pdf("~/Dropbox/Chido/comparaciones/resultados/sentido_vs_sentido_110910_vs_0.pdf",height=10, width=16)
#source("~/Dropbox/Chido/comparaciones/resultados/plotImage_filtrado_mz.R")
#myImagePlot(matrizMZ,"Comparaciones múltiples de la permanencia promedio del maíz \n Todas las perturbaciones, nivel=1/2, control determinista",manejo="otro")
#source("~/Dropbox/Chido/comparaciones/resultados/plotImage_filtrado_fre.R")
#myImagePlot(matrizFR,"Comparaciones múltiples de la permanencia promedio del frijol\n Todas las perturbaciones, nivel=1/2, control determinista",manejo="otro")
#source("~/Dropbox/Chido/comparaciones/resultados/plotImage_filtrado_cb.R")
#myImagePlot(matrizCB,"Comparaciones múltiples de la permanencia promedio de la calabaza\n Todas las perturbaciones, nivel=1/2, control determinista",manejo="otro")
#source("~/Dropbox/Chido/comparaciones/resultados/plotImage_todos_vs_todos.R")
#myImagePlot(matrizQ,"Comparaciones múltiples de la permanencia promedio de los quelites\n Todas las perturbaciones, nivel=1/2, control determinista",manejo="otro")
#myImagePlot(matrizSH,"Comparaciones múltiples de la permanencia conjunta\n Todas las perturbaciones, nivel=1/2, control determinista",manejo="otro")
#dev.off()
|
#Class implementing an Association Rules Algorithm
#Implements the GeneticFuzzyApriori_A KEEL association rules algorithm
#Author: Oliver Sanchez
GeneticFuzzyApriori_A <- function(dat, seed=1286082570,NumberofEvaluations=10000,PopulationSize=50,ProbabilityofMutation=0.01,ProbabilityofCrossover=0.8,ParameterdforMMACrossover=0.35,NumberofFuzzyRegionsforNumericAttributes=3,UseMaxOperatorfor1FrequentItemsets="false",MinimumSupport=0.1,MinimumConfidence=0.8){
alg <- RKEEL::R6_GeneticFuzzyApriori_A$new()
alg$setParameters(dat,seed,NumberofEvaluations,PopulationSize,ProbabilityofMutation,ProbabilityofCrossover,ParameterdforMMACrossover,NumberofFuzzyRegionsforNumericAttributes,UseMaxOperatorfor1FrequentItemsets,MinimumSupport,MinimumConfidence)
return (alg)
}
R6_GeneticFuzzyApriori_A <- R6::R6Class("R6_GeneticFuzzyApriori_A",
inherit = AssociationRulesAlgorithm,
public = list(
#Public properties
#pruned
#pruned = TRUE,
#confidence
#confidence = 0.25,
#instances per leaf
#instancesPerLeaf = 2,
seed=1286082570,
NumberofEvaluations=10000,
PopulationSize=50,
ProbabilityofMutation=0.01,
ProbabilityofCrossover=0.8,
ParameterdforMMACrossover=0.35,
NumberofFuzzyRegionsforNumericAttributes=3,
UseMaxOperatorfor1FrequentItemsets="false",
MinimumSupport=0.1,
MinimumConfidence=0.8,
#Public functions
#Initialize function
setParameters = function(dat, seed=1286082570,NumberofEvaluations=10000,PopulationSize=50,ProbabilityofMutation=0.01,ProbabilityofCrossover=0.8,ParameterdforMMACrossover=0.35,NumberofFuzzyRegionsforNumericAttributes=3,UseMaxOperatorfor1FrequentItemsets="false",MinimumSupport=0.1,MinimumConfidence=0.8){
super$setParameters(dat)
self$seed <- seed
self$NumberofEvaluations <- NumberofEvaluations
self$PopulationSize <- PopulationSize
self$ProbabilityofMutation <- ProbabilityofMutation
self$ProbabilityofCrossover <- ProbabilityofCrossover
self$ParameterdforMMACrossover <- ParameterdforMMACrossover
self$NumberofFuzzyRegionsforNumericAttributes <- NumberofFuzzyRegionsforNumericAttributes
self$UseMaxOperatorfor1FrequentItemsets <- UseMaxOperatorfor1FrequentItemsets
self$MinimumSupport <- MinimumSupport
self$MinimumConfidence <- MinimumConfidence
}
),
private = list(
#Private properties
#jar Filename
jarName = "GeneticFuzzyApriori.jar",
#algorithm name
algorithmName = "GeneticFuzzyApriori_A",
#String with algorithm name
algorithmString = "GeneticFuzzyApriori_A",
algorithmOutputNumTxt = 2,
#Private functions
#Get the text with the parameters for the config file
getParametersText = function(){
text <- ""
text <- paste0(text, "seed = ", self$seed, "\n")
text <- paste0(text, "Number of Evaluations = ", self$NumberofEvaluations, "\n")
text <- paste0(text, "Population Size = ", self$PopulationSize, "\n")
text <- paste0(text, "Probability of Mutation = ", self$ProbabilityofMutation, "\n")
text <- paste0(text, "Probability of Crossover = ", self$ProbabilityofCrossover, "\n")
text <- paste0(text, "Parameter d for MMA Crossover = ", self$ParameterdforMMACrossover, "\n")
text <- paste0(text, "Number of Fuzzy Regions for Numeric Attributes = ", self$NumberofFuzzyRegionsforNumericAttributes, "\n")
text <- paste0(text, "Use Max Operator for 1-Frequent Itemsets = ", self$UseMaxOperatorfor1FrequentItemsets, "\n")
text <- paste0(text, "Minimum Support = ", self$MinimumSupport, "\n")
text <- paste0(text, "Minimum Confidence = ", self$MinimumConfidence, "\n")
return(text)
}
)
)
| /RKEEL/R/GeneticFuzzyApriori-A.R | no_license | i02momuj/RKEEL | R | false | false | 3,726 | r | #Class implementing an Association Rules Algorithm
#Implements the GeneticFuzzyApriori_A KEEL association rules algorithm
#Author: Oliver Sanchez
GeneticFuzzyApriori_A <- function(dat, seed=1286082570,NumberofEvaluations=10000,PopulationSize=50,ProbabilityofMutation=0.01,ProbabilityofCrossover=0.8,ParameterdforMMACrossover=0.35,NumberofFuzzyRegionsforNumericAttributes=3,UseMaxOperatorfor1FrequentItemsets="false",MinimumSupport=0.1,MinimumConfidence=0.8){
alg <- RKEEL::R6_GeneticFuzzyApriori_A$new()
alg$setParameters(dat,seed,NumberofEvaluations,PopulationSize,ProbabilityofMutation,ProbabilityofCrossover,ParameterdforMMACrossover,NumberofFuzzyRegionsforNumericAttributes,UseMaxOperatorfor1FrequentItemsets,MinimumSupport,MinimumConfidence)
return (alg)
}
R6_GeneticFuzzyApriori_A <- R6::R6Class("R6_GeneticFuzzyApriori_A",
inherit = AssociationRulesAlgorithm,
public = list(
#Public properties
#pruned
#pruned = TRUE,
#confidence
#confidence = 0.25,
#instances per leaf
#instancesPerLeaf = 2,
seed=1286082570,
NumberofEvaluations=10000,
PopulationSize=50,
ProbabilityofMutation=0.01,
ProbabilityofCrossover=0.8,
ParameterdforMMACrossover=0.35,
NumberofFuzzyRegionsforNumericAttributes=3,
UseMaxOperatorfor1FrequentItemsets="false",
MinimumSupport=0.1,
MinimumConfidence=0.8,
#Public functions
#Initialize function
setParameters = function(dat, seed=1286082570,NumberofEvaluations=10000,PopulationSize=50,ProbabilityofMutation=0.01,ProbabilityofCrossover=0.8,ParameterdforMMACrossover=0.35,NumberofFuzzyRegionsforNumericAttributes=3,UseMaxOperatorfor1FrequentItemsets="false",MinimumSupport=0.1,MinimumConfidence=0.8){
super$setParameters(dat)
self$seed <- seed
self$NumberofEvaluations <- NumberofEvaluations
self$PopulationSize <- PopulationSize
self$ProbabilityofMutation <- ProbabilityofMutation
self$ProbabilityofCrossover <- ProbabilityofCrossover
self$ParameterdforMMACrossover <- ParameterdforMMACrossover
self$NumberofFuzzyRegionsforNumericAttributes <- NumberofFuzzyRegionsforNumericAttributes
self$UseMaxOperatorfor1FrequentItemsets <- UseMaxOperatorfor1FrequentItemsets
self$MinimumSupport <- MinimumSupport
self$MinimumConfidence <- MinimumConfidence
}
),
private = list(
#Private properties
#jar Filename
jarName = "GeneticFuzzyApriori.jar",
#algorithm name
algorithmName = "GeneticFuzzyApriori_A",
#String with algorithm name
algorithmString = "GeneticFuzzyApriori_A",
algorithmOutputNumTxt = 2,
#Private functions
#Get the text with the parameters for the config file
getParametersText = function(){
text <- ""
text <- paste0(text, "seed = ", self$seed, "\n")
text <- paste0(text, "Number of Evaluations = ", self$NumberofEvaluations, "\n")
text <- paste0(text, "Population Size = ", self$PopulationSize, "\n")
text <- paste0(text, "Probability of Mutation = ", self$ProbabilityofMutation, "\n")
text <- paste0(text, "Probability of Crossover = ", self$ProbabilityofCrossover, "\n")
text <- paste0(text, "Parameter d for MMA Crossover = ", self$ParameterdforMMACrossover, "\n")
text <- paste0(text, "Number of Fuzzy Regions for Numeric Attributes = ", self$NumberofFuzzyRegionsforNumericAttributes, "\n")
text <- paste0(text, "Use Max Operator for 1-Frequent Itemsets = ", self$UseMaxOperatorfor1FrequentItemsets, "\n")
text <- paste0(text, "Minimum Support = ", self$MinimumSupport, "\n")
text <- paste0(text, "Minimum Confidence = ", self$MinimumConfidence, "\n")
return(text)
}
)
)
|
## BIO8069
### Assignment - Part 1:Wildife Acoustics
#read in relevant packages. The packages listed below won't all be used.
#I have set up this code over the course of the practicals for this module as I can then
#easily copy and paste it into scripts, which has helped prevent re-installation of packages.
necessary.packages<-c("devtools","behaviouR","tuneR","seewave","ggplot2","dplyr",
"warbleR","leaflet","lubridate","sp","sf","raster","mapview",
"leafem","BIRDS","xts","zoo", "stringr","vegan","rmarkdown","shiny")
already.installed <- necessary.packages%in%installed.packages()[,'Package'] #asks if the necessary packages are already installed
if (length(necessary.packages[!already.installed])>=1) { #if not installed download now
install.packages(necessary.packages[!already.installed],dep=1)
}
sapply(necessary.packages, function(p){require(p,quietly = T,character.only = T)})
#The analysis was conducted in three parts.
#The first part examined and compared the calls and songs of the European Robin.
#The second part compared the calls of European Robins with two other common garden birds.
#Finally, the third part compared the songs of European Robins with the songs of other members of the
#Subfamily Erithacinae.
#### Part 1
# European Robins (Erithacus rubecula)
# Using query_xc () to check for presence of recordings on the xeno-canto website prior to download.
# download = FALSE - prevents recordings from being downloaded, while 'cnt:' specifies the country,
#'type:' specifies the call type and 'len:' specifies the length of the recording.
# requires the package warbleR
robin_song <-query_xc(qword = 'Erithacus rubecula cnt:"united kingdom" type:song len:5-25', download = FALSE)
robin_call<-query_xc(qword = 'Erithacus rubecula cnt:"united kingdom" type:call len:5-25', download = FALSE)
#using the map_xc() function and the leaflet package the site of each recording can be visualised.
#Clicking on the pop-up will give links to spectograms and 'listen' links on the xeno-canto website.
map_xc(robin_song, leaflet.map = TRUE)
#Now that the sets of recordings have been specified, they can then be downloaded for analysis.
#Sub-folders are then created in the RStudio Project for songs and calls.
#As the robin songs and calls will be used in two separate analyses, multiple sub-folders have been created
dir.create(file.path("robin_song"))
dir.create(file.path("robin_song2"))
dir.create(file.path("robin_call"))
dir.create(file.path("robin_call2"))
#The .MP3 files can then be downloaded into the separate sub-folders
query_xc(X = robin_song, path="robin_song")
query_xc(X = robin_song, path="robin_song2")
query_xc(X = robin_call, path="robin_call")
query_xc(X = robin_call, path="robin_call2")
#Renaming files
#Using the _stringr_ package, the structure of the names of the .MP3 files was changed using the code below.
#This allowed for more succinct and manageable file names.
#str_split() divides the name into 3 pieces
#str_c()concatenates the file name together merging the scientific name followed by -song_ and adding in the file
#number .mp3. For example; Erithacusrubecula-song_374144.mp3.
#songs
old_files <- list.files("robin_song", full.names=TRUE)
new_files <- NULL
for(file in 1:length(old_files)){
curr_file <- str_split(old_files[file], "-")
new_name <- str_c(c(curr_file[[1]][1:2], "-song_", curr_file[[1]][3]), collapse="")
new_files <- c(new_files, new_name)
}
file.rename(old_files, new_files)
#songs2
old_files <- list.files("robin_song2", full.names=TRUE)
new_files <- NULL
for(file in 1:length(old_files)){
curr_file <- str_split(old_files[file], "-")
new_name <- str_c(c(curr_file[[1]][1:2], "-song_", curr_file[[1]][3]), collapse="")
new_files <- c(new_files, new_name)
}
file.rename(old_files, new_files)
#calls
old_files <- list.files("robin_call", full.names=TRUE)
new_files <- NULL
for(file in 1:length(old_files)){
curr_file <- str_split(old_files[file], "-")
new_name <- str_c(c(curr_file[[1]][1:2], "-call_", curr_file[[1]][3]), collapse="")
new_files <- c(new_files, new_name)
}
file.rename(old_files, new_files)
#call2
old_files <- list.files("robin_call2", full.names=TRUE)
new_files <- NULL
for(file in 1:length(old_files)){
curr_file <- str_split(old_files[file], "-")
new_name <- str_c(c(curr_file[[1]][1:2], "-call_", curr_file[[1]][3]), collapse="")
new_files <- c(new_files, new_name)
}
file.rename(old_files, new_files)
#Three separate analyses will be run - one comparing sparrow sounds and one comparing common garden bird calls
#and finally, one containing the songs of the sub-family Erithacinae.
#So three separate folders are created.
#Robins
dir.create(file.path("robin_audio"))
file.copy(from=paste0("robin_song/",list.files("robin_song")),
to="robin_audio")
file.copy(from=paste0("robin_call/",list.files("robin_call")),
to="robin_audio")
#Common garden birds calls (birds)
dir.create(file.path("birds_audio"))
file.copy(from=paste0("robin_call2/",list.files("robin_call2")),
to="birds_audio")
#Sub-family Erithacinae
dir.create(file.path("erithacinae_audio"))
file.copy(from=paste0("robin_song2/",list.files("robin_song2")),
to="erithacinae_audio")
#Change files from MP3 to WAV files using the mp32wav() function from the warbler package.
#The .mp3 files are then stored as a new object and subsequently removed to save disk space,
#before removing the .mp3 files check that the conversion has happened.
mp32wav(path="robin_audio", dest.path="robin_audio")
unwanted_mp3 <- dir(path="robin_audio", pattern="*.mp3")
file.remove(paste0("robin_audio/", unwanted_mp3))
#Visualisation and analysis of the song and alarm calls can be carried out
#An oscillogram is generated using the function oscillo() from the seewave package
#Single robin song oscillogram
#first a single robin song is read using the readWave() fuction found in the package TuneR.
#This reading is stored in a new object - robin_wav.
robin_wav<- readWave("robin_audio/Erithacusrubecula-song_374144.wav")
robin_wav
#The oscillo() function is then run on the object to plot the full frequency diagram.
oscillo(robin_wav)
#To view the frquency diagram in greater detail it is possible to zoom in.
#Here section 0.59 to 0.60 has been specified.
oscillo(robin_wav, from = 0.59, to = 0.60)
#Additionally the SpectrogramSingle() function from the DenaJGibbon/behaviouR package
#can be used to visualise the spectrum of frequencies over time, which can be presented in colour.
SpectrogramSingle(sound.file = "robin_audio/Erithacusrubecula-song_374144.wav",
Colors = "Colors")
#Single robin call oscillogram and spectrogram
robinc_wav<- readWave("robin_audio/Erithacusrubecula-call_70122.wav")
oscillo(robinc_wav)
oscillo(robinc_wav, from = 0.59, to = 0.60)
SpectrogramSingle(sound.file = "robin_audio/Erithacusrubecula-call_70122.wav",
Colors = "Colors")
#MFCC of robin song and calls
#Before the PCA was carried out the data was simplified by pushing it through
#Mel-frequency cepstral coefficients (MFCC), which identifies repeated patterns
#and extracts them to form a simplified data set that can be used in the PCA.
#An MFCC can be applied simply by using the MFCCfunction().
source("nes8010.R") #use NES8010.R as a source for stored functions used in the PCA
robin_mfcc <- MFCCFunction(input.dir = "robin_audio",
max.freq=7000)
dim(robin_mfcc) #shows the key components have been extracted simplifying the data to 178 components.
#PCA of Robin songs and calls
#the vegan package is required.
#Using the ordi_pca() function and the ordi_scores() function from the source script to carry
#out the PCA.
robin_pca <- ordi_pca(robin_mfcc[, -1], scale=TRUE)# Use [, -1] to keep all rows but omit first column
summary(robin_pca)
robin_sco <- ordi_scores(robin_pca, display="sites")
robin_sco <- mutate(robin_sco, group_code = robin_mfcc$Class)
#robin_sco can then be plotted using ggplot - allowing for the variation between call types to be visualised.
ggplot(robin_sco, aes(x=PC1, y=PC2, colour=group_code)) +
geom_point() +
scale_colour_discrete(name = "Call Type",
labels = c("Red Breasted Robin call", "Red Breasted Robin song")) +
theme_classic()
#### Part 2
#Part 2 of the analysis, the robin call was then compared with the calls of two other
#common garden birds found in the United Kingdom, the house sparrow (Passer domesticus)
#and the coal tit (Periparus ater).
#This analysis will follow the same process as Part 1.
## Using query_xc () to check for presence of recordings on the xeno-canto website prior to download
#House Sparrow
sparrow_call<-query_xc(qword = 'Passer domesticus cnt:"united kingdom" type:call len:5-25', download = FALSE)
#Coal tit
coaltit_call<-query_xc(qword = 'Periparus ater cnt:"united kingdom" type:call len:5-25', download = FALSE)
#Sub-folders are then created in the RStudio Project for calls.
#Recordings are downloaded into these folders
#House sparrow
dir.create(file.path("sparrow_call"))
query_xc(X = sparrow_call, path="sparrow_call")
#Coal tit
dir.create(file.path("coaltit_call"))
query_xc(X = coaltit_call, path="coaltit_call")
#Renaming files
#Using the _stringr_ package, the structure of the names of the .MP3 files was changed using the code below.
#This allowed for more succinct and manageable file names.
#House sparrow
old_files <- list.files("sparrow_call", full.names=TRUE)
new_files <- NULL
for(file in 1:length(old_files)){
curr_file <- str_split(old_files[file], "-")
new_name <- str_c(c(curr_file[[1]][1:2], "-call_", curr_file[[1]][3]), collapse="")
new_files <- c(new_files, new_name)
}
file.rename(old_files, new_files)
#Coal tit
old_files <- list.files("coaltit_call", full.names=TRUE)
new_files <- NULL
for(file in 1:length(old_files)){
curr_file <- str_split(old_files[file], "-")
new_name <- str_c(c(curr_file[[1]][1:2], "-call_", curr_file[[1]][3]), collapse="")
new_files <- c(new_files, new_name)
}
file.rename(old_files, new_files)
#House sparrow and Coal tit recordings are then copied to the birds_audio folder
file.copy(from=paste0("sparrow_call/",list.files("sparrow_call")),
to="birds_audio")
file.copy(from=paste0("coaltit_call/",list.files("coaltit_call")),
to="birds_audio")
#Change files from MP3 to WAV files using the mp32wav() function from the warbler package.
#The .mp3 files are then stored as a new object and subsequently removed to save disk space,
#before removing the .mp3 files check that the conversion has happened.
mp32wav(path="birds_audio", dest.path="birds_audio")
unwanted_mp3 <- dir(path="birds_audio", pattern="*.mp3")
file.remove(paste0("birds_audio/", unwanted_mp3))
#Visualisation and analysis of the calls can be carried out using oscillograms and spectrograms
#House sparrow
sparrow_wav<- readWave("birds_audio/Passerdomesticus-call_208481.wav")
oscillo(sparrow_wav)
oscillo(sparrow_wav, from = 0.59, to = 0.60)
SpectrogramSingle(sound.file = "birds_audio/Passerdomesticus-call_208481.wav",
Colors = "Colors")
#Coal tit
coal_wav<- readWave("birds_audio/Periparusater-call_307342.wav")
oscillo(coal_wav)
oscillo(coal_wav, from = 0.59, to = 0.60)
SpectrogramSingle(sound.file = "birds_audio/Periparusater-call_307342.wav",
Colors = "Colors")
#MFCC of common garden bird calls
birds_mfcc <- MFCCFunction(input.dir = "birds_audio",
max.freq=7000)
dim(birds_mfcc)#reduced to 178 components
#PCA of common bird calls
birds_pca <- ordi_pca(birds_mfcc[, -1], scale=TRUE)
summary(birds_pca)
birds_sco <- ordi_scores(birds_pca, display="sites")
birds_sco <- mutate(birds_sco, group_code = birds_mfcc$Class)
summary(birds_sco)
#Plot the generated scores using ggplot - adding labels to specify bird type
ggplot(birds_sco, aes(x=PC1, y=PC2, colour=group_code)) +
geom_point() +
scale_colour_discrete(name = "Bird Type",
labels = c("Red Breasted Robin", "House Sparrow", "Coal Tit")) +
theme_classic()
#### Part 3
#This section explores the variation in the songs of Old World Flycatchers,
#focusing on the Subfamily Erithacinae. This analysis included the European Robin,
#the Cape Robin-chat (Cossypha caffra), the Spotted Palm Thrush (Cichladusa guttata)
#and the Forest Robin (Stiphrornis erythrothorax.
#This analysis will follow the same process as Part 1.
# Using query_xc () to check for presence of recordings on the xeno-canto website prior to download
#Cape robin-chat
crobin_song <-query_xc(qword = 'Cossypha caffra cnt:"south africa" type:song len:5-25', download = FALSE) #country specified: South Africa
#Spotted Palm Thrush
palm_song <-query_xc(qword = 'Cichladusa guttata cnt:"kenya" type:song len:5-25', download = FALSE) #country specified: Kenya
#Forest robin
frobin_song <-query_xc(qword = 'Stiphrornis erythrothorax type:song len:5-25', download = FALSE)
#No country specification as the recordings were all within the central African region and some parts
#of Western Africa and there were too few recordings to limit by country.
#Sub-folders are then created in the RStudio Project for these songs.
#Recordings are then downloaded into these folders
#Cape robin-chat
dir.create(file.path("crobin_song"))
query_xc(X = crobin_song, path= "crobin_song")
#Spotted Palm Thrush
dir.create(file.path("palm_song"))
query_xc(X = palm_song, path="palm_song")
#Forest robin
dir.create(file.path("frobin_song"))
query_xc(X = frobin_song, path="frobin_song")
#Renaming files
#Using the _stringr_ package, the structure of the names of the .MP3 files was changed using the code below.
#This allowed for more succinct and manageable file names.
#Cape robin-chat
old_files <- list.files("crobin_song", full.names=TRUE)
new_files <- NULL
for(file in 1:length(old_files)){
curr_file <- str_split(old_files[file], "-")
new_name <- str_c(c(curr_file[[1]][1:2], "-call_", curr_file[[1]][3]), collapse="")
new_files <- c(new_files, new_name)
}
file.rename(old_files, new_files)
#Spotted Palm Thrush
old_files <- list.files("palm_song", full.names=TRUE)
new_files <- NULL
for(file in 1:length(old_files)){
curr_file <- str_split(old_files[file], "-")
new_name <- str_c(c(curr_file[[1]][1:2], "-call_", curr_file[[1]][3]), collapse="")
new_files <- c(new_files, new_name)
}
file.rename(old_files, new_files)
#Forest Robin
old_files <- list.files("frobin_song", full.names=TRUE)
new_files <- NULL
for(file in 1:length(old_files)){
curr_file <- str_split(old_files[file], "-")
new_name <- str_c(c(curr_file[[1]][1:2], "-call_", curr_file[[1]][3]), collapse="")
new_files <- c(new_files, new_name)
}
file.rename(old_files, new_files)
#The recordings are then copied to the erithacinae_audio folder
file.copy(from=paste0("crobin_song/",list.files("crobin_song")),
to="erithacinae_audio")
file.copy(from=paste0("palm_song/",list.files("palm_song")),
to="erithacinae_audio")
file.copy(from=paste0("frobin_song/",list.files("frobin_song")),
to="erithacinae_audio")
#Change files from MP3 to WAV files using the mp32wav() function from the warbler package.
#The .mp3 files are then stored as a new object and subsequently removed to save disk space,
#before removing the .mp3 files check that the conversion has happened.
mp32wav(path="erithacinae_audio", dest.path="erithacinae_audio")
unwanted_mp3 <- dir(path="erithacinae_audio", pattern="*.mp3")
file.remove(paste0("erithacinae_audio/", unwanted_mp3))
#Visualisation and analysis of the songs can be carried out using oscillograms and spectrograms
#allowing comparisons between individual songs to be made.
#Cape Robin-chat
crobin_wav<- readWave("erithacinae_audio/Cossyphacaffra-call_324664.wav")
oscillo(crobin_wav)
oscillo(crobin_wav, from = 0.59, to = 0.60)
SpectrogramSingle(sound.file = "erithacinae_audio/Cossyphacaffra-call_324664.wav",
Colors = "Colors")
#Spotted Palm Thrush
palm_wav<- readWave("erithacinae_audio/Cichladusaguttata-call_371366.wav")
oscillo(palm_wav)
oscillo(palm_wav, from = 0.59, to = 0.60)
SpectrogramSingle(sound.file = "erithacinae_audio/Cichladusaguttata-call_371366.wav",
Colors = "Colors")
#Forest robin
forest_wav<- readWave("erithacinae_audio/Stiphrorniserythrothorax-call_284893.wav")
oscillo(forest_wav)
oscillo(forest_wav, from = 0.59, to = 0.60)
SpectrogramSingle(sound.file = "erithacinae_audio/Stiphrorniserythrothorax-call_284893.wav",
Colors = "Colors")
#MFCC of the sub-family Erithacinae bird songs to reduce data complexity.
erithacinae_mfcc <- MFCCFunction(input.dir = "erithacinae_audio",
max.freq=7000)
dim(erithacinae_mfcc)#reduced to 178 components
#PCA of sub-family Erithacinae bird songs
erithacinae_pca <- ordi_pca(erithacinae_mfcc[, -1], scale=TRUE)
summary(erithacinae_pca)
erith_sco <- ordi_scores(erithacinae_pca, display="sites")
erith_sco <- mutate(erith_sco, group_code = erithacinae_mfcc$Class)
#Plot the generated scores using ggplot - adding labels to specify bird type
ggplot(erith_sco, aes(x=PC1, y=PC2, colour=group_code)) +
geom_point() +
scale_colour_discrete(name = "Bird Type",
labels = c("Spotted Palm Thrush", "Cape Robin-chat", "Red Breasted Robin", "Forest Robin")) +
theme_classic()
| /Assignment_Part_1.R | no_license | SheenaDavis/BIO8068_Assignment_Part1 | R | false | false | 17,452 | r | ## BIO8069
### Assignment - Part 1:Wildife Acoustics
#read in relevant packages. The packages listed below won't all be used.
#I have set up this code over the course of the practicals for this module as I can then
#easily copy and paste it into scripts, which has helped prevent re-installation of packages.
necessary.packages<-c("devtools","behaviouR","tuneR","seewave","ggplot2","dplyr",
"warbleR","leaflet","lubridate","sp","sf","raster","mapview",
"leafem","BIRDS","xts","zoo", "stringr","vegan","rmarkdown","shiny")
already.installed <- necessary.packages%in%installed.packages()[,'Package'] #asks if the necessary packages are already installed
if (length(necessary.packages[!already.installed])>=1) { #if not installed download now
install.packages(necessary.packages[!already.installed],dep=1)
}
sapply(necessary.packages, function(p){require(p,quietly = T,character.only = T)})
#The analysis was conducted in three parts.
#The first part examined and compared the calls and songs of the European Robin.
#The second part compared the calls of European Robins with two other common garden birds.
#Finally, the third part compared the songs of European Robins with the songs of other members of the
#Subfamily Erithacinae.
#### Part 1
# European Robins (Erithacus rubecula)
# Using query_xc () to check for presence of recordings on the xeno-canto website prior to download.
# download = FALSE - prevents recordings from being downloaded, while 'cnt:' specifies the country,
#'type:' specifies the call type and 'len:' specifies the length of the recording.
# requires the package warbleR
robin_song <-query_xc(qword = 'Erithacus rubecula cnt:"united kingdom" type:song len:5-25', download = FALSE)
robin_call<-query_xc(qword = 'Erithacus rubecula cnt:"united kingdom" type:call len:5-25', download = FALSE)
#using the map_xc() function and the leaflet package the site of each recording can be visualised.
#Clicking on the pop-up will give links to spectograms and 'listen' links on the xeno-canto website.
map_xc(robin_song, leaflet.map = TRUE)
#Now that the sets of recordings have been specified, they can then be downloaded for analysis.
#Sub-folders are then created in the RStudio Project for songs and calls.
#As the robin songs and calls will be used in two separate analyses, multiple sub-folders have been created
dir.create(file.path("robin_song"))
dir.create(file.path("robin_song2"))
dir.create(file.path("robin_call"))
dir.create(file.path("robin_call2"))
#The .MP3 files can then be downloaded into the separate sub-folders
query_xc(X = robin_song, path="robin_song")
query_xc(X = robin_song, path="robin_song2")
query_xc(X = robin_call, path="robin_call")
query_xc(X = robin_call, path="robin_call2")
#Renaming files
#Using the _stringr_ package, the structure of the names of the .MP3 files was changed using the code below.
#This allowed for more succinct and manageable file names.
#str_split() divides the name into 3 pieces
#str_c()concatenates the file name together merging the scientific name followed by -song_ and adding in the file
#number .mp3. For example; Erithacusrubecula-song_374144.mp3.
#songs
old_files <- list.files("robin_song", full.names=TRUE)
new_files <- NULL
for(file in 1:length(old_files)){
curr_file <- str_split(old_files[file], "-")
new_name <- str_c(c(curr_file[[1]][1:2], "-song_", curr_file[[1]][3]), collapse="")
new_files <- c(new_files, new_name)
}
file.rename(old_files, new_files)
#songs2
old_files <- list.files("robin_song2", full.names=TRUE)
new_files <- NULL
for(file in 1:length(old_files)){
curr_file <- str_split(old_files[file], "-")
new_name <- str_c(c(curr_file[[1]][1:2], "-song_", curr_file[[1]][3]), collapse="")
new_files <- c(new_files, new_name)
}
file.rename(old_files, new_files)
#calls
old_files <- list.files("robin_call", full.names=TRUE)
new_files <- NULL
for(file in 1:length(old_files)){
curr_file <- str_split(old_files[file], "-")
new_name <- str_c(c(curr_file[[1]][1:2], "-call_", curr_file[[1]][3]), collapse="")
new_files <- c(new_files, new_name)
}
file.rename(old_files, new_files)
#call2
old_files <- list.files("robin_call2", full.names=TRUE)
new_files <- NULL
for(file in 1:length(old_files)){
curr_file <- str_split(old_files[file], "-")
new_name <- str_c(c(curr_file[[1]][1:2], "-call_", curr_file[[1]][3]), collapse="")
new_files <- c(new_files, new_name)
}
file.rename(old_files, new_files)
#Three separate analyses will be run - one comparing sparrow sounds and one comparing common garden bird calls
#and finally, one containing the songs of the sub-family Erithacinae.
#So three separate folders are created.
#Robins
dir.create(file.path("robin_audio"))
file.copy(from=paste0("robin_song/",list.files("robin_song")),
to="robin_audio")
file.copy(from=paste0("robin_call/",list.files("robin_call")),
to="robin_audio")
#Common garden birds calls (birds)
dir.create(file.path("birds_audio"))
file.copy(from=paste0("robin_call2/",list.files("robin_call2")),
to="birds_audio")
#Sub-family Erithacinae
dir.create(file.path("erithacinae_audio"))
file.copy(from=paste0("robin_song2/",list.files("robin_song2")),
to="erithacinae_audio")
#Change files from MP3 to WAV files using the mp32wav() function from the warbler package.
#The .mp3 files are then stored as a new object and subsequently removed to save disk space,
#before removing the .mp3 files check that the conversion has happened.
mp32wav(path="robin_audio", dest.path="robin_audio")
unwanted_mp3 <- dir(path="robin_audio", pattern="*.mp3")
file.remove(paste0("robin_audio/", unwanted_mp3))
#Visualisation and analysis of the song and alarm calls can be carried out
#An oscillogram is generated using the function oscillo() from the seewave package
#Single robin song oscillogram
#first a single robin song is read using the readWave() fuction found in the package TuneR.
#This reading is stored in a new object - robin_wav.
robin_wav<- readWave("robin_audio/Erithacusrubecula-song_374144.wav")
robin_wav
#The oscillo() function is then run on the object to plot the full frequency diagram.
oscillo(robin_wav)
#To view the frquency diagram in greater detail it is possible to zoom in.
#Here section 0.59 to 0.60 has been specified.
oscillo(robin_wav, from = 0.59, to = 0.60)
#Additionally the SpectrogramSingle() function from the DenaJGibbon/behaviouR package
#can be used to visualise the spectrum of frequencies over time, which can be presented in colour.
SpectrogramSingle(sound.file = "robin_audio/Erithacusrubecula-song_374144.wav",
Colors = "Colors")
#Single robin call oscillogram and spectrogram
robinc_wav<- readWave("robin_audio/Erithacusrubecula-call_70122.wav")
oscillo(robinc_wav)
oscillo(robinc_wav, from = 0.59, to = 0.60)
SpectrogramSingle(sound.file = "robin_audio/Erithacusrubecula-call_70122.wav",
Colors = "Colors")
#MFCC of robin song and calls
#Before the PCA was carried out the data was simplified by pushing it through
#Mel-frequency cepstral coefficients (MFCC), which identifies repeated patterns
#and extracts them to form a simplified data set that can be used in the PCA.
#An MFCC can be applied simply by using the MFCCfunction().
source("nes8010.R") #use NES8010.R as a source for stored functions used in the PCA
robin_mfcc <- MFCCFunction(input.dir = "robin_audio",
max.freq=7000)
dim(robin_mfcc) #shows the key components have been extracted simplifying the data to 178 components.
#PCA of Robin songs and calls
#the vegan package is required.
#Using the ordi_pca() function and the ordi_scores() function from the source script to carry
#out the PCA.
robin_pca <- ordi_pca(robin_mfcc[, -1], scale=TRUE)# Use [, -1] to keep all rows but omit first column
summary(robin_pca)
robin_sco <- ordi_scores(robin_pca, display="sites")
robin_sco <- mutate(robin_sco, group_code = robin_mfcc$Class)
#robin_sco can then be plotted using ggplot - allowing for the variation between call types to be visualised.
ggplot(robin_sco, aes(x=PC1, y=PC2, colour=group_code)) +
geom_point() +
scale_colour_discrete(name = "Call Type",
labels = c("Red Breasted Robin call", "Red Breasted Robin song")) +
theme_classic()
#### Part 2
#Part 2 of the analysis, the robin call was then compared with the calls of two other
#common garden birds found in the United Kingdom, the house sparrow (Passer domesticus)
#and the coal tit (Periparus ater).
#This analysis will follow the same process as Part 1.
## Using query_xc () to check for presence of recordings on the xeno-canto website prior to download
#House Sparrow
sparrow_call<-query_xc(qword = 'Passer domesticus cnt:"united kingdom" type:call len:5-25', download = FALSE)
#Coal tit
coaltit_call<-query_xc(qword = 'Periparus ater cnt:"united kingdom" type:call len:5-25', download = FALSE)
#Sub-folders are then created in the RStudio Project for calls.
#Recordings are downloaded into these folders
#House sparrow
dir.create(file.path("sparrow_call"))
query_xc(X = sparrow_call, path="sparrow_call")
#Coal tit
dir.create(file.path("coaltit_call"))
query_xc(X = coaltit_call, path="coaltit_call")
#Renaming files
#Using the _stringr_ package, the structure of the names of the .MP3 files was changed using the code below.
#This allowed for more succinct and manageable file names.
#House sparrow
old_files <- list.files("sparrow_call", full.names=TRUE)
new_files <- NULL
for(file in 1:length(old_files)){
curr_file <- str_split(old_files[file], "-")
new_name <- str_c(c(curr_file[[1]][1:2], "-call_", curr_file[[1]][3]), collapse="")
new_files <- c(new_files, new_name)
}
file.rename(old_files, new_files)
#Coal tit
old_files <- list.files("coaltit_call", full.names=TRUE)
new_files <- NULL
for(file in 1:length(old_files)){
curr_file <- str_split(old_files[file], "-")
new_name <- str_c(c(curr_file[[1]][1:2], "-call_", curr_file[[1]][3]), collapse="")
new_files <- c(new_files, new_name)
}
file.rename(old_files, new_files)
#House sparrow and Coal tit recordings are then copied to the birds_audio folder
file.copy(from=paste0("sparrow_call/",list.files("sparrow_call")),
to="birds_audio")
file.copy(from=paste0("coaltit_call/",list.files("coaltit_call")),
to="birds_audio")
#Change files from MP3 to WAV files using the mp32wav() function from the warbler package.
#The .mp3 files are then stored as a new object and subsequently removed to save disk space,
#before removing the .mp3 files check that the conversion has happened.
mp32wav(path="birds_audio", dest.path="birds_audio")
unwanted_mp3 <- dir(path="birds_audio", pattern="*.mp3")
file.remove(paste0("birds_audio/", unwanted_mp3))
#Visualisation and analysis of the calls can be carried out using oscillograms and spectrograms
#House sparrow
sparrow_wav<- readWave("birds_audio/Passerdomesticus-call_208481.wav")
oscillo(sparrow_wav)
oscillo(sparrow_wav, from = 0.59, to = 0.60)
SpectrogramSingle(sound.file = "birds_audio/Passerdomesticus-call_208481.wav",
Colors = "Colors")
#Coal tit
coal_wav<- readWave("birds_audio/Periparusater-call_307342.wav")
oscillo(coal_wav)
oscillo(coal_wav, from = 0.59, to = 0.60)
SpectrogramSingle(sound.file = "birds_audio/Periparusater-call_307342.wav",
Colors = "Colors")
#MFCC of common garden bird calls
birds_mfcc <- MFCCFunction(input.dir = "birds_audio",
max.freq=7000)
dim(birds_mfcc)#reduced to 178 components
#PCA of common bird calls
birds_pca <- ordi_pca(birds_mfcc[, -1], scale=TRUE)
summary(birds_pca)
birds_sco <- ordi_scores(birds_pca, display="sites")
birds_sco <- mutate(birds_sco, group_code = birds_mfcc$Class)
summary(birds_sco)
#Plot the generated scores using ggplot - adding labels to specify bird type
ggplot(birds_sco, aes(x=PC1, y=PC2, colour=group_code)) +
geom_point() +
scale_colour_discrete(name = "Bird Type",
labels = c("Red Breasted Robin", "House Sparrow", "Coal Tit")) +
theme_classic()
#### Part 3
#This section explores the variation in the songs of Old World Flycatchers,
#focusing on the Subfamily Erithacinae. This analysis included the European Robin,
#the Cape Robin-chat (Cossypha caffra), the Spotted Palm Thrush (Cichladusa guttata)
#and the Forest Robin (Stiphrornis erythrothorax.
#This analysis will follow the same process as Part 1.
# Using query_xc () to check for presence of recordings on the xeno-canto website prior to download
#Cape robin-chat
crobin_song <-query_xc(qword = 'Cossypha caffra cnt:"south africa" type:song len:5-25', download = FALSE) #country specified: South Africa
#Spotted Palm Thrush
palm_song <-query_xc(qword = 'Cichladusa guttata cnt:"kenya" type:song len:5-25', download = FALSE) #country specified: Kenya
#Forest robin
frobin_song <-query_xc(qword = 'Stiphrornis erythrothorax type:song len:5-25', download = FALSE)
#No country specification as the recordings were all within the central African region and some parts
#of Western Africa and there were too few recordings to limit by country.
#Sub-folders are then created in the RStudio Project for these songs.
#Recordings are then downloaded into these folders
#Cape robin-chat
dir.create(file.path("crobin_song"))
query_xc(X = crobin_song, path= "crobin_song")
#Spotted Palm Thrush
dir.create(file.path("palm_song"))
query_xc(X = palm_song, path="palm_song")
#Forest robin
dir.create(file.path("frobin_song"))
query_xc(X = frobin_song, path="frobin_song")
#Renaming files
#Using the _stringr_ package, the structure of the names of the .MP3 files was changed using the code below.
#This allowed for more succinct and manageable file names.
#Cape robin-chat
old_files <- list.files("crobin_song", full.names=TRUE)
new_files <- NULL
for(file in 1:length(old_files)){
curr_file <- str_split(old_files[file], "-")
new_name <- str_c(c(curr_file[[1]][1:2], "-call_", curr_file[[1]][3]), collapse="")
new_files <- c(new_files, new_name)
}
file.rename(old_files, new_files)
#Spotted Palm Thrush
old_files <- list.files("palm_song", full.names=TRUE)
new_files <- NULL
for(file in 1:length(old_files)){
curr_file <- str_split(old_files[file], "-")
new_name <- str_c(c(curr_file[[1]][1:2], "-call_", curr_file[[1]][3]), collapse="")
new_files <- c(new_files, new_name)
}
file.rename(old_files, new_files)
#Forest Robin
old_files <- list.files("frobin_song", full.names=TRUE)
new_files <- NULL
for(file in 1:length(old_files)){
curr_file <- str_split(old_files[file], "-")
new_name <- str_c(c(curr_file[[1]][1:2], "-call_", curr_file[[1]][3]), collapse="")
new_files <- c(new_files, new_name)
}
file.rename(old_files, new_files)
#The recordings are then copied to the erithacinae_audio folder
file.copy(from=paste0("crobin_song/",list.files("crobin_song")),
to="erithacinae_audio")
file.copy(from=paste0("palm_song/",list.files("palm_song")),
to="erithacinae_audio")
file.copy(from=paste0("frobin_song/",list.files("frobin_song")),
to="erithacinae_audio")
#Change files from MP3 to WAV files using the mp32wav() function from the warbler package.
#The .mp3 files are then stored as a new object and subsequently removed to save disk space,
#before removing the .mp3 files check that the conversion has happened.
mp32wav(path="erithacinae_audio", dest.path="erithacinae_audio")
unwanted_mp3 <- dir(path="erithacinae_audio", pattern="*.mp3")
file.remove(paste0("erithacinae_audio/", unwanted_mp3))
#Visualisation and analysis of the songs can be carried out using oscillograms and spectrograms
#allowing comparisons between individual songs to be made.
#Cape Robin-chat
crobin_wav<- readWave("erithacinae_audio/Cossyphacaffra-call_324664.wav")
oscillo(crobin_wav)
oscillo(crobin_wav, from = 0.59, to = 0.60)
SpectrogramSingle(sound.file = "erithacinae_audio/Cossyphacaffra-call_324664.wav",
Colors = "Colors")
#Spotted Palm Thrush
palm_wav<- readWave("erithacinae_audio/Cichladusaguttata-call_371366.wav")
oscillo(palm_wav)
oscillo(palm_wav, from = 0.59, to = 0.60)
SpectrogramSingle(sound.file = "erithacinae_audio/Cichladusaguttata-call_371366.wav",
Colors = "Colors")
#Forest robin
forest_wav<- readWave("erithacinae_audio/Stiphrorniserythrothorax-call_284893.wav")
oscillo(forest_wav)
oscillo(forest_wav, from = 0.59, to = 0.60)
SpectrogramSingle(sound.file = "erithacinae_audio/Stiphrorniserythrothorax-call_284893.wav",
Colors = "Colors")
#MFCC of the sub-family Erithacinae bird songs to reduce data complexity.
erithacinae_mfcc <- MFCCFunction(input.dir = "erithacinae_audio",
max.freq=7000)
dim(erithacinae_mfcc)#reduced to 178 components
#PCA of sub-family Erithacinae bird songs
erithacinae_pca <- ordi_pca(erithacinae_mfcc[, -1], scale=TRUE)
summary(erithacinae_pca)
erith_sco <- ordi_scores(erithacinae_pca, display="sites")
erith_sco <- mutate(erith_sco, group_code = erithacinae_mfcc$Class)
#Plot the generated scores using ggplot - adding labels to specify bird type
ggplot(erith_sco, aes(x=PC1, y=PC2, colour=group_code)) +
geom_point() +
scale_colour_discrete(name = "Bird Type",
labels = c("Spotted Palm Thrush", "Cape Robin-chat", "Red Breasted Robin", "Forest Robin")) +
theme_classic()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/topoJSON_data.R
\docType{data}
\name{mxmunicipio.topoJSON}
\alias{mxmunicipio.topoJSON}
\title{Map of the all Mexican municipios and delegaciones}
\usage{
data(mxmunicipio.topoJSON)
}
\description{
A data.frame which contains a map of all Mexican municipios plus
boroughs of the Federal District in topoJSON format.
}
\references{
Downloaded from the "Cartografia Geoestadistica Urbana y
Rural Amanzanada. Planeacion de la Encuesta Intercensal 2015" shapefiles
(https://gist.github.com/diegovalle/aa3eef87c085d6ea034f)
}
| /man/mxmunicipio.topoJSON.Rd | permissive | DennyMtz/mxmaps | R | false | true | 599 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/topoJSON_data.R
\docType{data}
\name{mxmunicipio.topoJSON}
\alias{mxmunicipio.topoJSON}
\title{Map of the all Mexican municipios and delegaciones}
\usage{
data(mxmunicipio.topoJSON)
}
\description{
A data.frame which contains a map of all Mexican municipios plus
boroughs of the Federal District in topoJSON format.
}
\references{
Downloaded from the "Cartografia Geoestadistica Urbana y
Rural Amanzanada. Planeacion de la Encuesta Intercensal 2015" shapefiles
(https://gist.github.com/diegovalle/aa3eef87c085d6ea034f)
}
|
# załadowanie bibliotek
library(proxy)
#zmiana katologu roboczego
workDir <- "D:\\Adam_nowy\\TextMining\\TextMining"
setwd(workDir)
#definicja katalogu ze skryptami
scriptDir <- ".\\scripts"
#załadowanie skryptu
sourceFile <- paste(scriptDir,
"frequency_matrix.R",
sep="\\"
)
source(sourceFile)
#skalowanie wielowymiarowe (MDS)
distCos <- dist(dtmTfidfBoundsMatrix, method = "cosine")
distCosMatrix <- as.matrix(distCos)
mds <-cmdscale(distCos, eig = TRUE, k=2)
#rysowanie wykresu w oknie aplikacji
x <- mds$points[,1]
y <- mds$points[,2]
plot(
x,
y,
xlab = "Synthetic variable 1",
ylab = "Synthetic variable 2",
main = "Multidimensional scalling"
)
text(
x,
y,
labels = row.names(distCosMatrix),
cex = .7
)
#eksport wykresu do pliku .png
plotFile <- paste(outputDir,
"mds.png",
sep="\\"
)
png(file = plotFile)
plot(
x,
y,
xlab = "Synthetic variable 1",
ylab = "Synthetic variable 2",
main = "Multidimensional scalling",
col = "orange",
xlim = c(-0.5,0.5)
)
text(
x,
y,
labels = row.names(distCosMatrix),
cex = .7,
col = "orange"
)
dev.off()
| /scripts/mds.R | no_license | adam96op/TextMining | R | false | false | 1,196 | r | # załadowanie bibliotek
library(proxy)
#zmiana katologu roboczego
workDir <- "D:\\Adam_nowy\\TextMining\\TextMining"
setwd(workDir)
#definicja katalogu ze skryptami
scriptDir <- ".\\scripts"
#załadowanie skryptu
sourceFile <- paste(scriptDir,
"frequency_matrix.R",
sep="\\"
)
source(sourceFile)
#skalowanie wielowymiarowe (MDS)
distCos <- dist(dtmTfidfBoundsMatrix, method = "cosine")
distCosMatrix <- as.matrix(distCos)
mds <-cmdscale(distCos, eig = TRUE, k=2)
#rysowanie wykresu w oknie aplikacji
x <- mds$points[,1]
y <- mds$points[,2]
plot(
x,
y,
xlab = "Synthetic variable 1",
ylab = "Synthetic variable 2",
main = "Multidimensional scalling"
)
text(
x,
y,
labels = row.names(distCosMatrix),
cex = .7
)
#eksport wykresu do pliku .png
plotFile <- paste(outputDir,
"mds.png",
sep="\\"
)
png(file = plotFile)
plot(
x,
y,
xlab = "Synthetic variable 1",
ylab = "Synthetic variable 2",
main = "Multidimensional scalling",
col = "orange",
xlim = c(-0.5,0.5)
)
text(
x,
y,
labels = row.names(distCosMatrix),
cex = .7,
col = "orange"
)
dev.off()
|
library(shiny)
source("ui.R")
source("server.R")
shinyApp(ui, server)
#runApp("shiny_v1")
| /scripts/R_run/davidyu_stock/v1/app.R | permissive | davidyuqiwei/davidyu_stock | R | false | false | 91 | r | library(shiny)
source("ui.R")
source("server.R")
shinyApp(ui, server)
#runApp("shiny_v1")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parafac_plot_functions.R
\name{splithalf_plot}
\alias{splithalf_plot}
\title{Plot results from a splithalf analysis}
\usage{
splithalf_plot(fits)
}
\arguments{
\item{fits}{list of components data}
}
\value{
ggplot
}
\description{
Graphs of all components of all models are plotted to be compared.
}
\examples{
data(sh)
splithalf_plot(sh)
str(sh)
}
\seealso{
\code{\link[staRdom]{splithalf}}
}
| /man/splithalf_plot.Rd | no_license | MatthiasPucher/staRdom | R | false | true | 472 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parafac_plot_functions.R
\name{splithalf_plot}
\alias{splithalf_plot}
\title{Plot results from a splithalf analysis}
\usage{
splithalf_plot(fits)
}
\arguments{
\item{fits}{list of components data}
}
\value{
ggplot
}
\description{
Graphs of all components of all models are plotted to be compared.
}
\examples{
data(sh)
splithalf_plot(sh)
str(sh)
}
\seealso{
\code{\link[staRdom]{splithalf}}
}
|
analysis = function(fileName,bitNumber){
# Constants for detecting the initial pattern
STEP1 = 255
STEP2 = 0
EVENT = 1
# Read the data from the file
file_data = read.table(fileName,sep = "\t",header = TRUE)
# List of variables used in this function
time_stamp = 0
value = 0
monitor = 0
start_point = 0
nevents = 0
events_list = 0
avg = 0
# Assigning values to the variables
time_stamp = file_data[,1]
value = file_data[,2]
# Selecting the bit from the value
monitor = (value %/% (2 ^ bitNumber)) %% 2
# Detecting the initial pattern
for(i in 2:length(value)) {
if(value[i - 1] == STEP1 & value[i] == STEP2) {
start_point = i + 1
break;
}
}
# Constructing events_list
# events_list contains the time period that an event stays high
for(i in start_point:(length(monitor) - 1)) {
if(monitor[i] == EVENT) {
events_list[nevents + 1] = time_stamp[i+1] - time_stamp[i]
nevents = nevents + 1
}
}
# returning the events_list
events_list
}
| /data_col/analysis.R | no_license | nesl/umpu | R | false | false | 1,038 | r | analysis = function(fileName,bitNumber){
# Constants for detecting the initial pattern
STEP1 = 255
STEP2 = 0
EVENT = 1
# Read the data from the file
file_data = read.table(fileName,sep = "\t",header = TRUE)
# List of variables used in this function
time_stamp = 0
value = 0
monitor = 0
start_point = 0
nevents = 0
events_list = 0
avg = 0
# Assigning values to the variables
time_stamp = file_data[,1]
value = file_data[,2]
# Selecting the bit from the value
monitor = (value %/% (2 ^ bitNumber)) %% 2
# Detecting the initial pattern
for(i in 2:length(value)) {
if(value[i - 1] == STEP1 & value[i] == STEP2) {
start_point = i + 1
break;
}
}
# Constructing events_list
# events_list contains the time period that an event stays high
for(i in start_point:(length(monitor) - 1)) {
if(monitor[i] == EVENT) {
events_list[nevents + 1] = time_stamp[i+1] - time_stamp[i]
nevents = nevents + 1
}
}
# returning the events_list
events_list
}
|
test_that("test rawresidual messages", {
## pp obj not correct
obj <- pp_hpp(lambda = 1)
class(obj) <- "non-pp"
expect_output(rawresidual(object = obj,events = c(1,2)),
"Please input the right model. Select from hp, hpp and mmhp.")
})
test_that("test simple cases", {
## special cases of point process, Poisson
obj <- pp_hpp(lambda = 0)
expect_identical(rawresidual(object = obj, events = c(1,2)), 2)
obj <- pp_hpp(lambda = 1)
expect_identical(rawresidual(object = obj, events = c(1,2)), 0)
expect_identical(rawresidual(object = obj, events = c(1,2,2.5)), 0.5)
expect_message(rawresidual(object = obj, events = c(1,2),
end = 3),
"RR calculated to specified end time.")
## special cases for Hawkes
obj <- pp_hp(lambda = 1, alpha = 0, beta = 1)
expect_identical(rawresidual(object = obj, events = c(1,2)), 0)
obj <- pp_hp(lambda = 0, alpha = 0, beta = 1)
expect_identical(rawresidual(object = obj, events = c(1,2)), 2)
## special cases for mmpp
## special cases for mmhp
# Q <- matrix(c(-0.4, 0.4, 0.2, -0.2), ncol = 2, byrow = TRUE)
# obj <- pp_mmhp(Q, delta = c(1 / 3, 2 / 3), lambda0 = 1, lambda1 = 1,
# alpha = 0, beta = 1)
# expect_identical(rawresidual(object = obj, events = c(0,1)), 1)
}) | /tests/testthat/test-rawresidual.R | permissive | wjakethompson/ppdiag | R | false | false | 1,329 | r | test_that("test rawresidual messages", {
## pp obj not correct
obj <- pp_hpp(lambda = 1)
class(obj) <- "non-pp"
expect_output(rawresidual(object = obj,events = c(1,2)),
"Please input the right model. Select from hp, hpp and mmhp.")
})
test_that("test simple cases", {
## special cases of point process, Poisson
obj <- pp_hpp(lambda = 0)
expect_identical(rawresidual(object = obj, events = c(1,2)), 2)
obj <- pp_hpp(lambda = 1)
expect_identical(rawresidual(object = obj, events = c(1,2)), 0)
expect_identical(rawresidual(object = obj, events = c(1,2,2.5)), 0.5)
expect_message(rawresidual(object = obj, events = c(1,2),
end = 3),
"RR calculated to specified end time.")
## special cases for Hawkes
obj <- pp_hp(lambda = 1, alpha = 0, beta = 1)
expect_identical(rawresidual(object = obj, events = c(1,2)), 0)
obj <- pp_hp(lambda = 0, alpha = 0, beta = 1)
expect_identical(rawresidual(object = obj, events = c(1,2)), 2)
## special cases for mmpp
## special cases for mmhp
# Q <- matrix(c(-0.4, 0.4, 0.2, -0.2), ncol = 2, byrow = TRUE)
# obj <- pp_mmhp(Q, delta = c(1 / 3, 2 / 3), lambda0 = 1, lambda1 = 1,
# alpha = 0, beta = 1)
# expect_identical(rawresidual(object = obj, events = c(0,1)), 1)
}) |
## Plotting results -- intercepts
heller <- './Data/Coats May 2018/Heller 2014 regression table.csv' %>% read.csv(stringsAsFactors = F)
heller.int <- heller %>% subset(Var == 'B0')
heller.int <- data.frame(year = heller.int$year, inter = heller.int$Overall,
source = 'H&K 2014')
seltzer <- './Data/Coats May 2018/Seltzer 2010 tables.csv' %>% read.csv(stringsAsFactors = F)
seltzer$WDB.North <- seltzer$WDB.North %>% substr(1, 5) %>% as.numeric
seltzer$WDB.London <- seltzer$WDB.London %>% substr(1, 5) %>% as.numeric
seltzer.int_temp <- seltzer %>% subset(Var %in% c('Constant', paste('year', 1890:1935, sep ='')))
seltzer.int[47, ]
seltzer.int1 <- data.frame(year = 1890:1935,
inter = (seltzer.int_temp$WDB.North[47] + c(seltzer.int_temp$WDB.North[- 47])) %>% exp,
source = 'Seltzer 2010 WDB North')
seltzer.int2 <- data.frame(year = 1890:1935,
inter = (seltzer.int_temp$WDB.London[47] + c(seltzer.int_temp$WDB.London[- 47])) %>% exp,
source = 'Seltzer 2010 WDB London')
tab.male <- './Results/Whole group regression male.csv' %>% read.csv
tab.male.int <- data.frame(year = 1889:1930, inter = tab.male$int,
source = 'Coats male')
tab.female <- './Results/Whole group regression female.csv' %>% read.csv
tab.female <- tab.female %>% subset(n >= 40) # only takes out the first row
tab.female.int <- data.frame(year = tab.female$year, inter = tab.female$int,
source = 'Coats female')
tabs <- do.call(rbind, list(heller.int, seltzer.int1, seltzer.int2,
tab.male.int, tab.female.int))
ggplot(data = tabs, aes(x = year, y = inter, colour = source)) + geom_line(size = 2) +
ylab('Annual salary £') + ggtitle('Tenure adjusted basline salary')
## plot of tenure effects at 5, 15 and so forth
tab.male.slope5 <- './Results/0 - 9 regression male.csv' %>% read.csv
tab.male.slope5 <- tab.male.slope5[- c(1:2), ]
tab.male.slope5 <- data.frame(year = tab.male.slope5$year,
inter = tab.male.slope5$slope,
source = 'Coats male 0 - 9 years')
tab.male.slope15 <- './Results/10 - 19 regression male.csv' %>% read.csv
tab.male.slope15 <- tab.male.slope15[-c(1:11), ]
tab.male.slope15 <- data.frame(year = tab.male.slope15$year,
inter = tab.male.slope15$slope,
source = 'Coats male 10 - 19 years')
## female
tab.female.slope5 <- './Results/0 - 9 regression female.csv' %>% read.csv
tab.female.slope5 <- tab.female.slope5[- 1, ]
tab.female.slope5 <- data.frame(year = tab.female.slope5$year,
inter = tab.female.slope5$slope,
source = 'Coats female 0 - 9 years')
tab.female.slope15 <- './Results/10 - 19 regression female.csv' %>% read.csv
tab.female.slope15 <- tab.female.slope15[-c(1:15), ]
tab.female.slope15 <- data.frame(year = tab.female.slope15$year,
inter = tab.female.slope15$slope,
source = 'Coats female 10 - 19 years')
## Seltzer
seltzer.slope_temp <- seltzer %>% subset(Var %in% c('tenure', paste('yearten', 1890:1935, sep ='')))
seltzer.slope5.temp1 <- seltzer.slope_temp$WDB.North[1] + seltzer.slope_temp$WDB.North[-1]
seltzer.slope5.1 <- data.frame(year = 1890:1935,
inter = (seltzer.int_temp$WDB.North[47] + c(seltzer.int_temp$WDB.North[- 47])),
source = 'Seltzer 2010 WDB North 5 years')
seltzer.slope5.1$inter <- exp(seltzer.slope5.1$inter + seltzer.slope5.temp1 * 5) - exp(seltzer.slope5.1$inter + seltzer.slope5.temp1 * 4)
seltzer.slope15.1 <- data.frame(year = 1890:1935,
inter = (seltzer.int_temp$WDB.North[47] + c(seltzer.int_temp$WDB.North[- 47])),
source = 'Seltzer 2010 WDB North 15 years')
seltzer.slope15.1$inter <- exp(seltzer.slope15.1$inter + seltzer.slope5.temp1 * 15) - exp(seltzer.slope15.1$inter + seltzer.slope5.temp1 * 14)
## lnd
seltzer.slope5.temp2 <- seltzer.slope_temp$WDB.London[1] + seltzer.slope_temp$WDB.London[-1]
seltzer.slope5.2 <- data.frame(year = 1890:1935,
inter = (seltzer.int_temp$WDB.London[47] + c(seltzer.int_temp$WDB.London[- 47])),
source = 'Seltzer 2010 WDB London 5 years')
seltzer.slope5.2$inter <- exp(seltzer.slope5.2$inter + seltzer.slope5.temp2 * 5) - exp(seltzer.slope5.2$inter + seltzer.slope5.temp2 * 4)
seltzer.slope15.2 <- data.frame(year = 1890:1935,
inter = (seltzer.int_temp$WDB.London[47] + c(seltzer.int_temp$WDB.London[- 47])),
source = 'Seltzer 2010 WDB London 15 years')
seltzer.slope15.2$inter <- exp(seltzer.slope15.2$inter + seltzer.slope5.temp2 * 15) - exp(seltzer.slope15.2$inter + seltzer.slope5.temp2 * 14)
## Heller
heller <- './Data/Coats May 2018/Heller 2014 regression table.csv' %>% read.csv(stringsAsFactors = F)
heller.slope_temp <- heller[grep('Btenure', heller$Var), ]
heller.slope5 <- data.frame(year = heller.slope_temp$year,
inter = heller.slope_temp$X0.9.years,
source = 'H&K 2014 5 years')
heller.slope15 <- data.frame(year = heller.slope_temp$year,
inter = heller.slope_temp$X10.19.years,
source = 'H&K 2014 15 years')
## All
slope.tabs <- do.call(rbind, list(heller.slope5, heller.slope15,
seltzer.slope5.1, seltzer.slope5.2,
seltzer.slope15.1, seltzer.slope15.2,
tab.male.slope15, tab.male.slope5,
tab.female.slope15, tab.female.slope5))
slope.tabs
ggplot(data = slope.tabs[c(grep(' 9 ', slope.tabs$source), grep(' 5 ', slope.tabs$source)), ], aes(x = year, y = inter, colour = source)) + geom_line(size = 2) +
ylab('Expected annual salary increase £') + ggtitle('Return on additional year of tenure at 0 - 9 years service')
ggplot(data = slope.tabs[c(grep(' 19 ', slope.tabs$source), grep(' 15 ', slope.tabs$source)), ], aes(x = year, y = inter, colour = source)) + geom_line(size = 2) +
ylab('Expected annual salary increase £') + ggtitle('Return on additional year of tenure at 10 - 19 years service')
ggplot(data = slope.tabs[grep('Coats', slope.tabs$source), ], aes(x = year, y = inter, colour = source)) +
geom_line(size = 1, alpha = 1, aes(linetype = source)) +
geom_point(aes(shape = source), size = 3) +
ylab('Expected annual salary increase £') + ggtitle('Return on additional year of tenure at Coats')
diff1 <- aggregate(inter ~ source + year, slope.tabs[grep(' 5 ', slope.tabs$source), ], mean)
diff2 <- aggregate(inter ~ source + year, slope.tabs[grep(' 15 ', slope.tabs$source), ], mean)
ggplot(data = diff3, aes(x = year, y = inter, colour = source)) + geom_line(size = 2)
#tab.female.int5 <- './Results/0 - 9 regression female.csv' %>% read.csv
## Need to sort out stuff here figure 5
coat.sub <- slope.tabs %>% subset(grepl('Coats', source))
coat.sub$f <- ifelse(grepl('female', coat.sub$source), 'Female', 'Male')
coat.sub$Tenure <- ifelse(grepl('19', coat.sub$source), '10 - 19 years', '0 - 9 years')
ggplot(data = coat.sub, aes(x = year, y = inter, colour = Tenure)) +
geom_line(aes(linetype = Tenure), size = 2) +
ylab('Expected annual salary increase £') + ggtitle('Return on additional year of tenure at Coats') + xlab('Year') +
facet_grid(f ~ .)
| /Graphing and comparisons.R | no_license | MengLeZhang/Coats-paper | R | false | false | 7,693 | r | ## Plotting results -- intercepts
heller <- './Data/Coats May 2018/Heller 2014 regression table.csv' %>% read.csv(stringsAsFactors = F)
heller.int <- heller %>% subset(Var == 'B0')
heller.int <- data.frame(year = heller.int$year, inter = heller.int$Overall,
source = 'H&K 2014')
seltzer <- './Data/Coats May 2018/Seltzer 2010 tables.csv' %>% read.csv(stringsAsFactors = F)
seltzer$WDB.North <- seltzer$WDB.North %>% substr(1, 5) %>% as.numeric
seltzer$WDB.London <- seltzer$WDB.London %>% substr(1, 5) %>% as.numeric
seltzer.int_temp <- seltzer %>% subset(Var %in% c('Constant', paste('year', 1890:1935, sep ='')))
seltzer.int[47, ]
seltzer.int1 <- data.frame(year = 1890:1935,
inter = (seltzer.int_temp$WDB.North[47] + c(seltzer.int_temp$WDB.North[- 47])) %>% exp,
source = 'Seltzer 2010 WDB North')
seltzer.int2 <- data.frame(year = 1890:1935,
inter = (seltzer.int_temp$WDB.London[47] + c(seltzer.int_temp$WDB.London[- 47])) %>% exp,
source = 'Seltzer 2010 WDB London')
tab.male <- './Results/Whole group regression male.csv' %>% read.csv
tab.male.int <- data.frame(year = 1889:1930, inter = tab.male$int,
source = 'Coats male')
tab.female <- './Results/Whole group regression female.csv' %>% read.csv
tab.female <- tab.female %>% subset(n >= 40) # only takes out the first row
tab.female.int <- data.frame(year = tab.female$year, inter = tab.female$int,
source = 'Coats female')
tabs <- do.call(rbind, list(heller.int, seltzer.int1, seltzer.int2,
tab.male.int, tab.female.int))
ggplot(data = tabs, aes(x = year, y = inter, colour = source)) + geom_line(size = 2) +
ylab('Annual salary £') + ggtitle('Tenure adjusted basline salary')
## plot of tenure effects at 5, 15 and so forth
tab.male.slope5 <- './Results/0 - 9 regression male.csv' %>% read.csv
tab.male.slope5 <- tab.male.slope5[- c(1:2), ]
tab.male.slope5 <- data.frame(year = tab.male.slope5$year,
inter = tab.male.slope5$slope,
source = 'Coats male 0 - 9 years')
tab.male.slope15 <- './Results/10 - 19 regression male.csv' %>% read.csv
tab.male.slope15 <- tab.male.slope15[-c(1:11), ]
tab.male.slope15 <- data.frame(year = tab.male.slope15$year,
inter = tab.male.slope15$slope,
source = 'Coats male 10 - 19 years')
## female
tab.female.slope5 <- './Results/0 - 9 regression female.csv' %>% read.csv
tab.female.slope5 <- tab.female.slope5[- 1, ]
tab.female.slope5 <- data.frame(year = tab.female.slope5$year,
inter = tab.female.slope5$slope,
source = 'Coats female 0 - 9 years')
tab.female.slope15 <- './Results/10 - 19 regression female.csv' %>% read.csv
tab.female.slope15 <- tab.female.slope15[-c(1:15), ]
tab.female.slope15 <- data.frame(year = tab.female.slope15$year,
inter = tab.female.slope15$slope,
source = 'Coats female 10 - 19 years')
## Seltzer
seltzer.slope_temp <- seltzer %>% subset(Var %in% c('tenure', paste('yearten', 1890:1935, sep ='')))
seltzer.slope5.temp1 <- seltzer.slope_temp$WDB.North[1] + seltzer.slope_temp$WDB.North[-1]
seltzer.slope5.1 <- data.frame(year = 1890:1935,
inter = (seltzer.int_temp$WDB.North[47] + c(seltzer.int_temp$WDB.North[- 47])),
source = 'Seltzer 2010 WDB North 5 years')
seltzer.slope5.1$inter <- exp(seltzer.slope5.1$inter + seltzer.slope5.temp1 * 5) - exp(seltzer.slope5.1$inter + seltzer.slope5.temp1 * 4)
seltzer.slope15.1 <- data.frame(year = 1890:1935,
inter = (seltzer.int_temp$WDB.North[47] + c(seltzer.int_temp$WDB.North[- 47])),
source = 'Seltzer 2010 WDB North 15 years')
seltzer.slope15.1$inter <- exp(seltzer.slope15.1$inter + seltzer.slope5.temp1 * 15) - exp(seltzer.slope15.1$inter + seltzer.slope5.temp1 * 14)
## lnd
seltzer.slope5.temp2 <- seltzer.slope_temp$WDB.London[1] + seltzer.slope_temp$WDB.London[-1]
seltzer.slope5.2 <- data.frame(year = 1890:1935,
inter = (seltzer.int_temp$WDB.London[47] + c(seltzer.int_temp$WDB.London[- 47])),
source = 'Seltzer 2010 WDB London 5 years')
seltzer.slope5.2$inter <- exp(seltzer.slope5.2$inter + seltzer.slope5.temp2 * 5) - exp(seltzer.slope5.2$inter + seltzer.slope5.temp2 * 4)
seltzer.slope15.2 <- data.frame(year = 1890:1935,
inter = (seltzer.int_temp$WDB.London[47] + c(seltzer.int_temp$WDB.London[- 47])),
source = 'Seltzer 2010 WDB London 15 years')
seltzer.slope15.2$inter <- exp(seltzer.slope15.2$inter + seltzer.slope5.temp2 * 15) - exp(seltzer.slope15.2$inter + seltzer.slope5.temp2 * 14)
## Heller
heller <- './Data/Coats May 2018/Heller 2014 regression table.csv' %>% read.csv(stringsAsFactors = F)
heller.slope_temp <- heller[grep('Btenure', heller$Var), ]
heller.slope5 <- data.frame(year = heller.slope_temp$year,
inter = heller.slope_temp$X0.9.years,
source = 'H&K 2014 5 years')
heller.slope15 <- data.frame(year = heller.slope_temp$year,
inter = heller.slope_temp$X10.19.years,
source = 'H&K 2014 15 years')
## All
slope.tabs <- do.call(rbind, list(heller.slope5, heller.slope15,
seltzer.slope5.1, seltzer.slope5.2,
seltzer.slope15.1, seltzer.slope15.2,
tab.male.slope15, tab.male.slope5,
tab.female.slope15, tab.female.slope5))
slope.tabs
ggplot(data = slope.tabs[c(grep(' 9 ', slope.tabs$source), grep(' 5 ', slope.tabs$source)), ], aes(x = year, y = inter, colour = source)) + geom_line(size = 2) +
ylab('Expected annual salary increase £') + ggtitle('Return on additional year of tenure at 0 - 9 years service')
ggplot(data = slope.tabs[c(grep(' 19 ', slope.tabs$source), grep(' 15 ', slope.tabs$source)), ], aes(x = year, y = inter, colour = source)) + geom_line(size = 2) +
ylab('Expected annual salary increase £') + ggtitle('Return on additional year of tenure at 10 - 19 years service')
ggplot(data = slope.tabs[grep('Coats', slope.tabs$source), ], aes(x = year, y = inter, colour = source)) +
geom_line(size = 1, alpha = 1, aes(linetype = source)) +
geom_point(aes(shape = source), size = 3) +
ylab('Expected annual salary increase £') + ggtitle('Return on additional year of tenure at Coats')
diff1 <- aggregate(inter ~ source + year, slope.tabs[grep(' 5 ', slope.tabs$source), ], mean)
diff2 <- aggregate(inter ~ source + year, slope.tabs[grep(' 15 ', slope.tabs$source), ], mean)
ggplot(data = diff3, aes(x = year, y = inter, colour = source)) + geom_line(size = 2)
#tab.female.int5 <- './Results/0 - 9 regression female.csv' %>% read.csv
## Need to sort out stuff here figure 5
coat.sub <- slope.tabs %>% subset(grepl('Coats', source))
coat.sub$f <- ifelse(grepl('female', coat.sub$source), 'Female', 'Male')
coat.sub$Tenure <- ifelse(grepl('19', coat.sub$source), '10 - 19 years', '0 - 9 years')
ggplot(data = coat.sub, aes(x = year, y = inter, colour = Tenure)) +
geom_line(aes(linetype = Tenure), size = 2) +
ylab('Expected annual salary increase £') + ggtitle('Return on additional year of tenure at Coats') + xlab('Year') +
facet_grid(f ~ .)
|
theta.start.est <-
function (data, distribution)
{
ndist <- numdist(distribution)
y <- Response(data)
if (generic.distribution(distribution) == "exponential") {
the.case.weights <- case.weights(data)
theta.start <- c(logb(sum(y * the.case.weights)/sum(the.case.weights)),
1)
return(theta.start)
}
cdfest.out <- cdfest(data)
if (length(cdfest.out$q) <= 10) {
if (is.even(ndist))
return(c(mean(logb(as.matrix(y)[, 1])), sqrt(var(logb(as.matrix(y)[,
1])))))
else return(c(mean(as.matrix(y)[, 1]), sqrt(var(as.matrix(y)[,
1]))))
}
cdpoints.out <- cdpoints(cdfest(data))
if (is.even(ndist))
trans.resp <- logb(cdpoints.out$yplot)
else trans.resp <- cdpoints.out$yplot
the.quantiles <- quant(cdpoints.out$pplot, distribution)
theta.start.est <- coefficients(lm(trans.resp ~ the.quantiles))
return(theta.start.est)
}
| /R/theta.start.est.R | no_license | anhnguyendepocen/SMRD | R | false | false | 964 | r | theta.start.est <-
function (data, distribution)
{
ndist <- numdist(distribution)
y <- Response(data)
if (generic.distribution(distribution) == "exponential") {
the.case.weights <- case.weights(data)
theta.start <- c(logb(sum(y * the.case.weights)/sum(the.case.weights)),
1)
return(theta.start)
}
cdfest.out <- cdfest(data)
if (length(cdfest.out$q) <= 10) {
if (is.even(ndist))
return(c(mean(logb(as.matrix(y)[, 1])), sqrt(var(logb(as.matrix(y)[,
1])))))
else return(c(mean(as.matrix(y)[, 1]), sqrt(var(as.matrix(y)[,
1]))))
}
cdpoints.out <- cdpoints(cdfest(data))
if (is.even(ndist))
trans.resp <- logb(cdpoints.out$yplot)
else trans.resp <- cdpoints.out$yplot
the.quantiles <- quant(cdpoints.out$pplot, distribution)
theta.start.est <- coefficients(lm(trans.resp ~ the.quantiles))
return(theta.start.est)
}
|
# NL-Logestic with bound V 0.4.5
# Payam Mokhtarian
##---------------------- Loading packages
library(shiny)
##---------------------- User interface
shinyUI(fluidPage(
# Title
titlePanel("Boundery decison in non-linear logistic regression"),
# Sidebar controls
sidebarLayout(
sidebarPanel(
selectInput("pattern", "Fitting Pattern:",
c("Convex" = "Convex",
"Close" = "Close")),
sliderInput("degree",
"Degree Polynomial:",
min = 1,
max = 20,
value = 1),
sliderInput("lambda",
"Lambda:",
min = 1,
max = 10,
value = 1),
selectInput("opt", "Optimization Method:",
c("BFGS Quasi-Newton" = "BFGS",
"Nelder-Mead" = "Nelder-Mead",
"Conjugate Gradient" = "CG"))
),
mainPanel(h4("Effective Genotype and Soil Acidity modelling"),
plotOutput("da.plot")
)
)
))
| /ui.R | no_license | payamgit/NL-Logestic | R | false | false | 1,056 | r | # NL-Logestic with bound V 0.4.5
# Payam Mokhtarian
##---------------------- Loading packages
library(shiny)
##---------------------- User interface
shinyUI(fluidPage(
# Title
titlePanel("Boundery decison in non-linear logistic regression"),
# Sidebar controls
sidebarLayout(
sidebarPanel(
selectInput("pattern", "Fitting Pattern:",
c("Convex" = "Convex",
"Close" = "Close")),
sliderInput("degree",
"Degree Polynomial:",
min = 1,
max = 20,
value = 1),
sliderInput("lambda",
"Lambda:",
min = 1,
max = 10,
value = 1),
selectInput("opt", "Optimization Method:",
c("BFGS Quasi-Newton" = "BFGS",
"Nelder-Mead" = "Nelder-Mead",
"Conjugate Gradient" = "CG"))
),
mainPanel(h4("Effective Genotype and Soil Acidity modelling"),
plotOutput("da.plot")
)
)
))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.