content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
# read in txt files to independent data frames.
data_subject_test <- read.table("UCI HAR Dataset/test/subject_test.txt")
data_X_test <- read.table("UCI HAR Dataset/test/X_test.txt")
data_y_test <- read.table( "UCI HAR Dataset/test/y_test.txt")
data_subject_train <- read.table("UCI HAR Dataset/train/subject_train.txt")
data_X_train <- read.table( "UCI HAR Dataset/train/X_train.txt")
data_y_train <- read.table("UCI HAR Dataset/train/y_train.txt")
data_features <- read.table( "UCI HAR Dataset/features.txt")
data_activity <- read.table( "UCI HAR Dataset/activity_labels.txt")
# merge dataframes
subject <- rbind(data_subject_test, data_subject_train)
X <- rbind(data_X_test, data_X_train)
y<- rbind(data_y_test, data_y_train)
# label X,y,subject dataframes
names(X) <- data_features[,2]
names(y) <- c("activity")
names(subject) <- c("subject")
#subset X for mean and Std
X_new <- X[,grep("mean|std", colnames(X))]
#map activity number to activity descriptor
activity_descriptor <- mapvalues(y$activity,
from = c(1,2,3,4,5,6),
to = c("LAYING", "SITTING", "STANDING", "WALKING",
"WALKING_DOWNSTAIRS", "WALKING_UPSTAIRS"))
# combine subject, activity and X_new to bulid new data frame appropriate for
#result of first tidy data set
data_set_1 <- cbind(subject,data_frame(activity_descriptor),X_new)
rm(list=setdiff(ls(), "data_set_1"))
subject_data <- group_by(data_set_1, subject)
#average of each variable for each activity and each subject
data_set_2 <- aggregate(. ~ subject+activity_descriptor , data = subject_data, mean)
# write table as requested in assignment
write.table(data_set_2, "result.txt", row.name=FALSE ,sep = " ")
|
/run_analysis.R
|
no_license
|
ryanpetm/cleandata_assignment
|
R
| false
| false
| 1,667
|
r
|
# read in txt files to independent data frames.
data_subject_test <- read.table("UCI HAR Dataset/test/subject_test.txt")
data_X_test <- read.table("UCI HAR Dataset/test/X_test.txt")
data_y_test <- read.table( "UCI HAR Dataset/test/y_test.txt")
data_subject_train <- read.table("UCI HAR Dataset/train/subject_train.txt")
data_X_train <- read.table( "UCI HAR Dataset/train/X_train.txt")
data_y_train <- read.table("UCI HAR Dataset/train/y_train.txt")
data_features <- read.table( "UCI HAR Dataset/features.txt")
data_activity <- read.table( "UCI HAR Dataset/activity_labels.txt")
# merge dataframes
subject <- rbind(data_subject_test, data_subject_train)
X <- rbind(data_X_test, data_X_train)
y<- rbind(data_y_test, data_y_train)
# label X,y,subject dataframes
names(X) <- data_features[,2]
names(y) <- c("activity")
names(subject) <- c("subject")
#subset X for mean and Std
X_new <- X[,grep("mean|std", colnames(X))]
#map activity number to activity descriptor
activity_descriptor <- mapvalues(y$activity,
from = c(1,2,3,4,5,6),
to = c("LAYING", "SITTING", "STANDING", "WALKING",
"WALKING_DOWNSTAIRS", "WALKING_UPSTAIRS"))
# combine subject, activity and X_new to bulid new data frame appropriate for
#result of first tidy data set
data_set_1 <- cbind(subject,data_frame(activity_descriptor),X_new)
rm(list=setdiff(ls(), "data_set_1"))
subject_data <- group_by(data_set_1, subject)
#average of each variable for each activity and each subject
data_set_2 <- aggregate(. ~ subject+activity_descriptor , data = subject_data, mean)
# write table as requested in assignment
write.table(data_set_2, "result.txt", row.name=FALSE ,sep = " ")
|
#### FOR ####
# empty for loop
test_that("flow_data works with an empty for loop",{
fun <- function(x) {
for(x in foo) {}
}
data <- flow_data(fun)
# flow_data(fun)
# dput2(data$nodes[1:4])
# dput2(data$edges)
expect_equal(
data$nodes[1:4],
data.frame(
id = c(0, 1, 2, -1, 3),
block_type = c("header", "for", "standard", "start", "return"),
code_str = c("fun(x)", "for (x in foo)", "", "", ""),
label = c("", "", "", "", ""),
stringsAsFactors = FALSE))
expect_equal(
data$edges,
data.frame(
from = c(0, 1, 2, 1, -1),
to = c(1, 2, -1, -1, 3),
edge_label = c("", "", "", "next", ""),
arrow = c("->", "->", "->", "<-", "->"),
stringsAsFactors = FALSE))
})
# simple for loop
test_that("flow_data works with simple for loop",{
fun <- function(x) {
for(x in foo) x
}
data <- flow_data(fun)
# flow_data(fun)
# dput2(data$nodes[1:4])
# dput2(data$edges)
expect_equal(
data$nodes[1:4],
data.frame(
id = c(0, 1, 2, -1, 3),
block_type = c("header", "for", "standard", "start", "return"),
code_str = c("fun(x)", "for (x in foo)", "x", "", ""),
label = c("", "", "", "", ""),
stringsAsFactors = FALSE))
expect_equal(
data$edges,
data.frame(
from = c(0, 1, 2, 1, -1),
to = c(1, 2, -1, -1, 3),
edge_label = c("", "", "", "next", ""),
arrow = c("->", "->", "->", "<-", "->"),
stringsAsFactors = FALSE))
})
# simple while loop
test_that("flow_data works with simple while loop",{
fun <- function(x) {
while(foo) x
}
data <- flow_data(fun)
# flow_data(fun)
# dput2(data$nodes[1:4])
# dput2(data$edges)
expect_equal(
data$nodes[1:4],
data.frame(
id = c(0, 1, 2, -1, 3),
block_type = c("header", "while", "standard", "start", "return"),
code_str = c("fun(x)", "while (foo)", "x", "", ""),
label = c("", "", "", "", ""),
stringsAsFactors = FALSE))
expect_equal(
data$edges,
data.frame(
from = c(0, 1, 2, 1, -1),
to = c(1, 2, -1, -1, 3),
edge_label = c("", "", "", "next", ""),
arrow = c("->", "->", "->", "<-", "->"),
stringsAsFactors = FALSE))
})
# simple repeat loop
test_that("flow_data works with simple for loop",{
fun <- function(x) {
repeat x
}
data <- flow_data(fun)
# flow_data(fun)
# dput2(data$nodes[1:4])
# dput2(data$edges)
expect_equal(
data$nodes[1:4],
data.frame(
id = c(0, 1, 2, -1, 3),
block_type = c("header", "repeat", "standard", "start", "return"),
code_str = c("fun(x)", "repeat", "x", "", ""),
label = c("", "", "", "", ""),
stringsAsFactors = FALSE))
expect_equal(
data$edges,
data.frame(
from = c(0, 1, 2, 1, -1),
to = c(1, 2, -1, -1, 3),
edge_label = c("", "", "", "next", ""),
arrow = c("->", "->", "->", "<-", "->"),
stringsAsFactors = FALSE))
})
# if else call with for loops on each side
test_that("flow_data works with simple for loop",{
fun <- function(x) {
if(foo)
for(x in bar) baz
else
for(x in qux) quux
}
data <- flow_data(fun)
# flow_data(fun)
# dput2(data$nodes[1:4])
# dput2(data$edges)
expect_equal(
data$nodes[1:4],
data.frame(
id = c(0, 1, 2, 3, -2, 4, 5, -4, -1, 6),
block_type = c("header", "if", "for", "standard", "start", "for", "standard", "start",
"end", "return"),
code_str = c("fun(x)", "if (foo)", "for (x in bar)", "baz", "", "for (x in qux)",
"quux", "", "", ""),
label = c("", "", "", "", "", "", "", "", "", ""),
stringsAsFactors = FALSE))
expect_equal(
data$edges,
data.frame(
from = c(0, 1, 2, 3, 2, -2, 1, 4, 5, 4, -4, -1),
to = c(1, 2, 3, -2, -2, -1, 4, 5, -4, -4, -1, 6),
edge_label = c("", "y", "", "", "next", "", "n", "", "", "next", "", ""),
arrow = c("->", "->", "->", "->", "<-", "->", "->", "->", "->", "<-",
"->", "->"),
stringsAsFactors = FALSE))
})
|
/tests/testthat/test-03_flow_data_loops.R
|
no_license
|
yuewangpanda/flow
|
R
| false
| false
| 4,058
|
r
|
#### FOR ####
# empty for loop
test_that("flow_data works with an empty for loop",{
fun <- function(x) {
for(x in foo) {}
}
data <- flow_data(fun)
# flow_data(fun)
# dput2(data$nodes[1:4])
# dput2(data$edges)
expect_equal(
data$nodes[1:4],
data.frame(
id = c(0, 1, 2, -1, 3),
block_type = c("header", "for", "standard", "start", "return"),
code_str = c("fun(x)", "for (x in foo)", "", "", ""),
label = c("", "", "", "", ""),
stringsAsFactors = FALSE))
expect_equal(
data$edges,
data.frame(
from = c(0, 1, 2, 1, -1),
to = c(1, 2, -1, -1, 3),
edge_label = c("", "", "", "next", ""),
arrow = c("->", "->", "->", "<-", "->"),
stringsAsFactors = FALSE))
})
# simple for loop
test_that("flow_data works with simple for loop",{
fun <- function(x) {
for(x in foo) x
}
data <- flow_data(fun)
# flow_data(fun)
# dput2(data$nodes[1:4])
# dput2(data$edges)
expect_equal(
data$nodes[1:4],
data.frame(
id = c(0, 1, 2, -1, 3),
block_type = c("header", "for", "standard", "start", "return"),
code_str = c("fun(x)", "for (x in foo)", "x", "", ""),
label = c("", "", "", "", ""),
stringsAsFactors = FALSE))
expect_equal(
data$edges,
data.frame(
from = c(0, 1, 2, 1, -1),
to = c(1, 2, -1, -1, 3),
edge_label = c("", "", "", "next", ""),
arrow = c("->", "->", "->", "<-", "->"),
stringsAsFactors = FALSE))
})
# simple while loop
test_that("flow_data works with simple while loop",{
fun <- function(x) {
while(foo) x
}
data <- flow_data(fun)
# flow_data(fun)
# dput2(data$nodes[1:4])
# dput2(data$edges)
expect_equal(
data$nodes[1:4],
data.frame(
id = c(0, 1, 2, -1, 3),
block_type = c("header", "while", "standard", "start", "return"),
code_str = c("fun(x)", "while (foo)", "x", "", ""),
label = c("", "", "", "", ""),
stringsAsFactors = FALSE))
expect_equal(
data$edges,
data.frame(
from = c(0, 1, 2, 1, -1),
to = c(1, 2, -1, -1, 3),
edge_label = c("", "", "", "next", ""),
arrow = c("->", "->", "->", "<-", "->"),
stringsAsFactors = FALSE))
})
# simple repeat loop
test_that("flow_data works with simple for loop",{
fun <- function(x) {
repeat x
}
data <- flow_data(fun)
# flow_data(fun)
# dput2(data$nodes[1:4])
# dput2(data$edges)
expect_equal(
data$nodes[1:4],
data.frame(
id = c(0, 1, 2, -1, 3),
block_type = c("header", "repeat", "standard", "start", "return"),
code_str = c("fun(x)", "repeat", "x", "", ""),
label = c("", "", "", "", ""),
stringsAsFactors = FALSE))
expect_equal(
data$edges,
data.frame(
from = c(0, 1, 2, 1, -1),
to = c(1, 2, -1, -1, 3),
edge_label = c("", "", "", "next", ""),
arrow = c("->", "->", "->", "<-", "->"),
stringsAsFactors = FALSE))
})
# if else call with for loops on each side
test_that("flow_data works with simple for loop",{
fun <- function(x) {
if(foo)
for(x in bar) baz
else
for(x in qux) quux
}
data <- flow_data(fun)
# flow_data(fun)
# dput2(data$nodes[1:4])
# dput2(data$edges)
expect_equal(
data$nodes[1:4],
data.frame(
id = c(0, 1, 2, 3, -2, 4, 5, -4, -1, 6),
block_type = c("header", "if", "for", "standard", "start", "for", "standard", "start",
"end", "return"),
code_str = c("fun(x)", "if (foo)", "for (x in bar)", "baz", "", "for (x in qux)",
"quux", "", "", ""),
label = c("", "", "", "", "", "", "", "", "", ""),
stringsAsFactors = FALSE))
expect_equal(
data$edges,
data.frame(
from = c(0, 1, 2, 3, 2, -2, 1, 4, 5, 4, -4, -1),
to = c(1, 2, 3, -2, -2, -1, 4, 5, -4, -4, -1, 6),
edge_label = c("", "y", "", "", "next", "", "n", "", "", "next", "", ""),
arrow = c("->", "->", "->", "->", "<-", "->", "->", "->", "->", "<-",
"->", "->"),
stringsAsFactors = FALSE))
})
|
#
# Drafts for function signatures. Dont implement yet
#
#' Transforms a FM and BV table into a length-stratified BV table
#'
#' @details
#' if BVtable is NULL, a BVtable with BVtype and BVvalue set to NA will be created
#'
#' @param FMtable
#' @param BVtable
#' @param stratificationYes code for encoding stratification in BVstratification. Included as default argument for now.
#' @noRd
makeBVtable <- function(FMtable, BVtable, stratificationYes="Y"){
if (is.null(FMtable)){
return(BVtable)
}
FMtable$stratname <- as.character(FMtable$FMclass)
BVtable <- merge(BVtable, FMtable[,c("FMid", "stratname")], by="FMid", all.x=T)
BVtable$BVstratification <- stratificationYes
#make sure any additional stratification is added to the FM-stratification
BVtable$BVstratumname <- paste(paste("FM:", BVtable$stratname, sep=""), BVtable$BVstratumname, sep="-")
#check that totals match FM table
getfirst <- function(x){stopifnot(length(unique(x))==1); return(x[1]);}
totalsInFmclass <- aggregate(list(tot=BVtable$BVnumTotal), by=list(FMid=BVtable$FMid), FUN=getfirst)
comptotals <- merge(totalsInFmclass, FMtable)
stopifnot(all(comptotals$tot == comptotals$FMnumAtUnit))
#remove column not in BV definition
BVtable$stratname <- NULL
return(BVtable)
}
#' sampling unit totals
#'
#' @details contains design parameters and sample totals for a sampling unit
#' \describe{
#' \item{StatisticTable}{data.frame with the statistic of interest for each sampling unit}
#' \item{DesignTable}{data.frame with columns 'aboveId', 'id', and the design parameters for each sampling unit}
#' \item{jiProb}{matrix with joint inclusion probabilites}
#' }
#'
"sampUnitData"
#' Estimation objecy lower hierarhcy
#'
#' @details list with the following members
#' \describe{
#' \item{Hierarchy}{character: Code identifying the type of lower hiearchy (A,B,C or D).}
#' \item{Value}{character: Code identifying the total provided in the PSU design table}
#' \item{sampleData}{list of data for each sample with members formatted as \code{\link{sampUnitData}}}
#' }
#'
#' @rname DBEestimObjLow
#'
"DBEestimObjLow"
#' Prepares specimen parameters
#'
#' @description
#' Prepares data for estimation of statistics calculated for specimen parameters.
#' Should be applied to BV and FM table from a single sample. This is enforced.
#'
#' @details
#' Estimation of number at length can be based on individual parameters (BV table) or sorting in length groups (FM table).
#' This is not supported by this function.
#' (Perhaps FMtable could be prepared directly to the format 'DBEresultsTotalPointLow' returned by computeDBEresultsTotalPointLow)
#'
#' The parameter 'stat' specifies the statstic of interest and may be specified as:
#' \descibe{
#' \item{number}{the total number of specimens}
#' }
#'
#' @param FMtable for a specific sample
#' @param BVtable for a specific sample
#' @param lowerHierarchy character identifying the lower hierarchy to extract
#' @param stat character identifying the statistic of interest, for now this supports the somewhat contrived options 'number' and 'countAtAge6'
#' @return \code{\link{sampUnitData}} lower hiearchy data prepared for estimation
doDBEestimationObjLowSpecimenParams <- function(FMtable=NULL, BVtable=NULL, lowerHierarchy=c("A","C"), stat=c("number", "numberAtAge6", "numberAtAge6"), ages=1:20){
if (lowerHierarchy == "A"){
if (any(is.na(FMtable$SAid)) | length(unique(FMtable$SAid))>1){
stop("Need unique sample (SAid)")
}
if (any(is.na(BVtable$FMid)) | !all(BVtable$FMid %in% FMtable$FMid)){
stop("BVtable does not correspond to FMtable")
}
BVtable <- makeBVtable(FMtable, BVtable)
}
else if (lowerHierarchy == "B"){
stop("No estimation from specimen parameters is possible for lower hierarchy B.") }
else if (lowerHierarchy == "C"){
stopifnot(is.null(FMtable))
if (any(is.na(BVtable$SAid)) | length(unique(BVtable$SAid))>1){
stop("Need unique sample (SAid)")
}
BVtable <- BVtable
}
else if (lowerHierarchy == "D"){
stop("No lower hierarchy estimation possible for lower hierarchy D.")
}
else{
stop("Lower hierarchy " + lowerHierarchy + " is not implemented.")
}
if (stat=="number"){
BVtable <- BVtable[!duplicated(BVtable$BVfishId),]
BVtable$count <- 1
var <- "count"
}
else if (stat=="numberAtAge6"){
BVtable <- BVtable[BVtable$BVtype=="Age",]
stopifnot(!any(duplicated(BVtable$BVfishId)))
BVtable$countAtAge6 <- as.numeric(BVtable$BVvalue == 6)
var <- "countAtAge6"
}
else if (stat=="numberAtAge"){
BVtable <- BVtable[BVtable$BVtype=="Age",]
stopifnot(!any(duplicated(BVtable$BVfishId)))
catnames <- c()
for (cat in ages){
catn <- paste("Age", cat)
BVtable[,catn] <- as.numeric(BVtable$BVvalue == cat)
catnames <- c(catnames, catn)
}
var <- catnames
}
else{
stop("Option ", stat, " is not supported for parameter 'stat'.")
}
output <- list()
output$StatisticTable <- BVtable[,c("BVfishId", var)]
names(output$StatisticTable) <- c("id", var)
output$DesignTable <- BVtable[,c("BVfishId", "BVstratification", "BVstratumname", "BVselectMeth", "BVnumTotal", "BVnumSamp", "BVselProp", "BVinclProp")]
names(output$DesignTable) <- c("id", "stratification", "stratumname", "selectMeth", "numTotal", "numSamp", "selProb", "inclProb")
output$jiProb <- matrix(nrow=nrow(output$DesignTable), ncol=nrow(output$DesignTable))
colnames(output$jiProb) <- output$DesignTable$id
rownames(output$jiProb) <- output$DesignTable$id
return(output)
}
|
/WKRDB-EST2/subGroup5/funs/doDBEestimationObjLow.R
|
no_license
|
ices-eg/WK_RDBES
|
R
| false
| false
| 5,655
|
r
|
#
# Drafts for function signatures. Dont implement yet
#
#' Transforms a FM and BV table into a length-stratified BV table
#'
#' @details
#' if BVtable is NULL, a BVtable with BVtype and BVvalue set to NA will be created
#'
#' @param FMtable
#' @param BVtable
#' @param stratificationYes code for encoding stratification in BVstratification. Included as default argument for now.
#' @noRd
makeBVtable <- function(FMtable, BVtable, stratificationYes="Y"){
if (is.null(FMtable)){
return(BVtable)
}
FMtable$stratname <- as.character(FMtable$FMclass)
BVtable <- merge(BVtable, FMtable[,c("FMid", "stratname")], by="FMid", all.x=T)
BVtable$BVstratification <- stratificationYes
#make sure any additional stratification is added to the FM-stratification
BVtable$BVstratumname <- paste(paste("FM:", BVtable$stratname, sep=""), BVtable$BVstratumname, sep="-")
#check that totals match FM table
getfirst <- function(x){stopifnot(length(unique(x))==1); return(x[1]);}
totalsInFmclass <- aggregate(list(tot=BVtable$BVnumTotal), by=list(FMid=BVtable$FMid), FUN=getfirst)
comptotals <- merge(totalsInFmclass, FMtable)
stopifnot(all(comptotals$tot == comptotals$FMnumAtUnit))
#remove column not in BV definition
BVtable$stratname <- NULL
return(BVtable)
}
#' sampling unit totals
#'
#' @details contains design parameters and sample totals for a sampling unit
#' \describe{
#' \item{StatisticTable}{data.frame with the statistic of interest for each sampling unit}
#' \item{DesignTable}{data.frame with columns 'aboveId', 'id', and the design parameters for each sampling unit}
#' \item{jiProb}{matrix with joint inclusion probabilites}
#' }
#'
"sampUnitData"
#' Estimation objecy lower hierarhcy
#'
#' @details list with the following members
#' \describe{
#' \item{Hierarchy}{character: Code identifying the type of lower hiearchy (A,B,C or D).}
#' \item{Value}{character: Code identifying the total provided in the PSU design table}
#' \item{sampleData}{list of data for each sample with members formatted as \code{\link{sampUnitData}}}
#' }
#'
#' @rname DBEestimObjLow
#'
"DBEestimObjLow"
#' Prepares specimen parameters
#'
#' @description
#' Prepares data for estimation of statistics calculated for specimen parameters.
#' Should be applied to BV and FM table from a single sample. This is enforced.
#'
#' @details
#' Estimation of number at length can be based on individual parameters (BV table) or sorting in length groups (FM table).
#' This is not supported by this function.
#' (Perhaps FMtable could be prepared directly to the format 'DBEresultsTotalPointLow' returned by computeDBEresultsTotalPointLow)
#'
#' The parameter 'stat' specifies the statstic of interest and may be specified as:
#' \descibe{
#' \item{number}{the total number of specimens}
#' }
#'
#' @param FMtable for a specific sample
#' @param BVtable for a specific sample
#' @param lowerHierarchy character identifying the lower hierarchy to extract
#' @param stat character identifying the statistic of interest, for now this supports the somewhat contrived options 'number' and 'countAtAge6'
#' @return \code{\link{sampUnitData}} lower hiearchy data prepared for estimation
doDBEestimationObjLowSpecimenParams <- function(FMtable=NULL, BVtable=NULL, lowerHierarchy=c("A","C"), stat=c("number", "numberAtAge6", "numberAtAge6"), ages=1:20){
if (lowerHierarchy == "A"){
if (any(is.na(FMtable$SAid)) | length(unique(FMtable$SAid))>1){
stop("Need unique sample (SAid)")
}
if (any(is.na(BVtable$FMid)) | !all(BVtable$FMid %in% FMtable$FMid)){
stop("BVtable does not correspond to FMtable")
}
BVtable <- makeBVtable(FMtable, BVtable)
}
else if (lowerHierarchy == "B"){
stop("No estimation from specimen parameters is possible for lower hierarchy B.") }
else if (lowerHierarchy == "C"){
stopifnot(is.null(FMtable))
if (any(is.na(BVtable$SAid)) | length(unique(BVtable$SAid))>1){
stop("Need unique sample (SAid)")
}
BVtable <- BVtable
}
else if (lowerHierarchy == "D"){
stop("No lower hierarchy estimation possible for lower hierarchy D.")
}
else{
stop("Lower hierarchy " + lowerHierarchy + " is not implemented.")
}
if (stat=="number"){
BVtable <- BVtable[!duplicated(BVtable$BVfishId),]
BVtable$count <- 1
var <- "count"
}
else if (stat=="numberAtAge6"){
BVtable <- BVtable[BVtable$BVtype=="Age",]
stopifnot(!any(duplicated(BVtable$BVfishId)))
BVtable$countAtAge6 <- as.numeric(BVtable$BVvalue == 6)
var <- "countAtAge6"
}
else if (stat=="numberAtAge"){
BVtable <- BVtable[BVtable$BVtype=="Age",]
stopifnot(!any(duplicated(BVtable$BVfishId)))
catnames <- c()
for (cat in ages){
catn <- paste("Age", cat)
BVtable[,catn] <- as.numeric(BVtable$BVvalue == cat)
catnames <- c(catnames, catn)
}
var <- catnames
}
else{
stop("Option ", stat, " is not supported for parameter 'stat'.")
}
output <- list()
output$StatisticTable <- BVtable[,c("BVfishId", var)]
names(output$StatisticTable) <- c("id", var)
output$DesignTable <- BVtable[,c("BVfishId", "BVstratification", "BVstratumname", "BVselectMeth", "BVnumTotal", "BVnumSamp", "BVselProp", "BVinclProp")]
names(output$DesignTable) <- c("id", "stratification", "stratumname", "selectMeth", "numTotal", "numSamp", "selProb", "inclProb")
output$jiProb <- matrix(nrow=nrow(output$DesignTable), ncol=nrow(output$DesignTable))
colnames(output$jiProb) <- output$DesignTable$id
rownames(output$jiProb) <- output$DesignTable$id
return(output)
}
|
#' Evaluation Function for Forecasting Models
#' @export ts_backtesting
#' @param ts.obj A univariate time series object of a class "ts"
#' @param models String, define the type of models to use in the training function:
#'
#' 'a' - auto.arima (forecast package)
#'
#' 'b' - Bayesian Structural Time Series (bsts package)
#'
#' 'e' - ets (forecast package)
#'
#' 'h' - hybrid timse series model (forecastHybrid package)
#'
#' 'n' - Neural Network Time Series (forecast package)
#'
#' 't' - tbats (forecast package)
#'
#' 'w' - Holt Winters (stats package)
#'
#' @param periods The number of periods to evaluate the models (with a minimum of 2)
#' @param error The type of error to evaluate by - "MAPE" (default) or "RMSE"
#' @param window_size An integer, the size of the backtesting window
#' @param h Integer, the horizon of the selected forecasting model
#' @param plot Logical, if TRUE desplay a plot with the backtesting progress
#' @param a.arg A list, an optional arguments to pass to the \code{\link[forecast]{auto.arima}} function
#' @param b.arg A list, an optional arguments to pass to the \code{\link[bsts]{bsts}} function
#' @param e.arg A list, an optional argument to pass to the \code{\link[forecast]{ets}} function
#' @param h.arg A list, an optional argument to pass to the \code{\link[forecastHybrid]{hybridModel}} function
#' @param n.arg A list, an optional argument to pass to the \code{\link[forecast]{nnetar}} function
#' @param t.arg A list, an optional arguments to pass to the \code{\link[forecast]{tbats}} function
#' @param w.arg A list, an optional arguments to pass to the \code{\link[stats]{HoltWinters}} function
#' @param parallel Logical, if TRUE use parallel option when applicable (auto.arima, hybridModel)
#' @param xreg.h A data.frame or matrix, optional argument,
#' set the futuer values external regressors in case using the
#' 'xreg' argument in one of the models (auto.arima, nnetar, hybrid)
#' @description Performance evaluation function for forecasting models, by training and testing the performance
#' of each model over a sequence of periods to identify the performance of a model over time
#' (both accuracy and stability)
#' @examples
#' \dontrun{
#' data(USgas)
#' USgas_backtesting <- ts_backtesting(USgas,
#' periods = 6,
#' window_size = 24,
#' h = 60,
#' error = "RMSE")
#'
#' # Selecting a specific models (auto.arima, ets and nnetar)
#' USgas_backtesting <- ts_backtesting(USgas,
#' models = "aen",
#' periods = 6,
#' window_size = 24,
#' h = 60)
#'
#' # Retrieve the models leaderboard
#' USgas_backtesting$leaderboard
#'
#'
#' # Retrieve the best forecast results
#' USgas_backtesting$leadForecast$mean
#'
#' # Retrieve the final forecast of the ets model
#' USgas_backtesting$Forecast_Final$ets$mean
#'
#' # Retrieve the ets forecast during the first period of testing
#' USgas_backtesting$period_1$ets$forecast$mean
#'
#' # Get the final plot of the models performance and the selected forecasting model
#' USgas_backtesting$summary_plot
#' }
ts_backtesting <- function(ts.obj,
models = "abehntw",
periods = 6,
error = "MAPE",
window_size = 3,
h = 3,
plot = TRUE,
a.arg = NULL,
b.arg = NULL,
e.arg = NULL,
h.arg = NULL,
n.arg = NULL,
t.arg = NULL,
w.arg = NULL,
xreg.h = NULL,
parallel = FALSE){
base::.Deprecated(new = "train_model", msg = "The 'ts_backtesting' function is deprecated, please use 'train_model' instead")
`%>%` <- magrittr::`%>%`
a <- model_list <- model_char <- color_ramp <- forecast_list <- obj.name <- NULL
variable <- value <- avgMAPE <- avgRMSE <- NULL
obj.name <- base::deparse(base::substitute(ts.obj))
# Define the model type
for(s in 1:nchar(models)){
if(!substr(models, s, s) %in% c("a", "w", "e", "n", "t", "b", "h")){
stop("The 'models' argument is not valide")
}
}
# Error handling
# Check if xreg argument is valid
if(!base::is.null(xreg.h)){
if(!"xreg" %in% names(a.arg) &
!"xreg" %in% names(n.arg) &
!"xreg" %in% names(h.arg$a.args) &
!"xreg" %in% names(h.arg$n.args) &
!"xreg" %in% names(h.arg$s.args)){
warning("There is no 'xreg' argument in any of the models arguments,",
"'xreg.h' will be ignored")
} else {
if(base::nrow(xreg.h) != h){
stop("The length of the 'xreg.h' argument is not equal to the forecast horizon")
}
}
}
# Check the xreg in a.arg is valid (if exists)
if("xreg" %in% names(a.arg)){
xreg.arima <- NULL
xreg.arima <- a.arg$xreg
if(base::nrow(xreg.arima) != base::length(ts.obj)){
stop("The length of the 'xreg' in the 'a.arg' argument is not equal to the series length")
}
}
if("xreg" %in% names(n.arg)){
xreg.nnetar <- NULL
xreg.nnetar <- n.arg$xreg
if(base::nrow(xreg.nnetar) != base::length(ts.obj)){
stop("The length of the 'xreg' in the 'n.arg' argument is not equal to the series length")
}
}
if("xreg" %in% names(h.arg$a.args)){
xreg.hybrid.arima <- NULL
xreg.hybrid.arima <- h.arg$a.args$xreg
if(base::nrow(xreg.hybrid.arima) != base::length(ts.obj)){
stop("The length of the 'xreg' of the auto.arima model in the 'h.arg' argument is not equal to the series length")
}
}
if("xreg" %in% names(h.arg$n.args)){
xreg.hybrid.nnetar <- NULL
xreg.hybrid.nnetar <- h.arg$n.args$xreg
if(base::nrow(xreg.hybrid.nnetar) != base::length(ts.obj)){
stop("The length of the 'xreg' of the nnetar model in the 'h.arg' argument is not equal to the series length")
}
}
if("xreg" %in% names(h.arg$s.args)){
xreg.hybrid.stlm <- NULL
xreg.hybrid.stlm <- h.arg$s.args$xreg
if(base::nrow(xreg.hybrid.stlm) != base::length(ts.obj)){
stop("The length of the 'xreg' of the stlm model in the 'h.arg' argument is not equal to the series length")
}
}
if(!base::is.numeric(periods) | periods != base::round(periods) | periods <= 0){
stop("The value of the 'periods' parameters is no valid")
} else {
if((base::length(ts.obj) - periods - window_size) < 2 * stats::frequency(ts.obj)){
stop("The length of the series is long enough to create a forecast")
}
}
if(!base::is.numeric(window_size) | window_size != base::round(window_size) | window_size <= 0){
stop("The value of the 'window_size' parameters is no valid")
} else {
if((base::length(ts.obj) - periods - window_size) < 2 * stats::frequency(ts.obj)){
stop("The length of the series is long enough to create a forecast")
}
}
if (stats::is.ts(ts.obj)) {
if (stats::is.mts(ts.obj)) {
warning("The 'ts.obj' has multiple columns, only the first column will be plot")
ts.obj <- ts.obj[, 1]
}
}else {
stop("The 'ts.obj' is not a 'ts' class")
}
if(!error %in% c("MAPE", "RMSE")){
warning("The value of the 'error' parameter is invalid, using the default setting - 'MAPE'")
error <- "MAPE"
}
if(!base::is.logical(plot)){
warning("The value of the 'plot' parameter is invalid, using default option TRUE")
plot <- TRUE
}
# Setting the output object
modelOutput <- list()
# Define the plot colors
if(base::nchar(models) < 3){
color_ramp <- RColorBrewer::brewer.pal(3,"Dark2")[1:base::nchar(models)]
} else{
color_ramp <- RColorBrewer::brewer.pal(base::nchar(models),"Dark2")
}
model_char <- base::unlist(base::strsplit(models, split = ""))
modelOutput$Models_Final <- list()
modelOutput$Forecast_Final <- list()
# Final forecast
if("a" %in% model_char){
model_list <- c(model_list, "auto.arima")
md_auto.arima <- fc_auto.arima <- NULL
a.arg$parallel <- parallel
md_auto.arima <- base::do.call(forecast::auto.arima, c(list(ts.obj), a.arg))
if("xreg" %in% base::names(a.arg)){
fc_auto.arima <- forecast::forecast(md_auto.arima, h = h, xreg = xreg.h)
} else{
fc_auto.arima <- forecast::forecast(md_auto.arima, h = h)
}
modelOutput$Models_Final$auto.arima <- md_auto.arima
modelOutput$Forecast_Final$auto.arima <- fc_auto.arima
}
if("w" %in% model_char){
model_list <- c(model_list, "HoltWinters")
md_HoltWinters <- fc_HoltWinters <- NULL
md_HoltWinters <- base::do.call(stats::HoltWinters, c(list(ts.obj), w.arg))
fc_HoltWinters <- forecast::forecast(md_HoltWinters, h = h)
modelOutput$Models_Final$HoltWinters <- md_HoltWinters
modelOutput$Forecast_Final$HoltWinters <- fc_HoltWinters
}
if("e" %in% model_char){
model_list <- c(model_list, "ets")
md_ets <- fc_ets <- NULL
md_ets <- base::do.call(forecast::ets, c(list(ts.obj), e.arg))
fc_ets <- forecast::forecast(md_ets, h = h)
modelOutput$Models_Final$ets <- md_ets
modelOutput$Forecast_Final$ets <- fc_ets
}
if("n" %in% model_char){
model_list <- c(model_list, "nnetar")
md_nnetar <- fc_nnetar <- NULL
md_nnetar <- base::do.call(forecast::nnetar, c(list(ts.obj), n.arg))
if("xreg" %in% base::names(n.arg)){
fc_nnetar <- forecast::forecast(md_nnetar, h = h, xreg = xreg.h)
} else{
fc_nnetar <- forecast::forecast(md_nnetar, h = h)
}
modelOutput$Models_Final$nnetar <- md_nnetar
modelOutput$Forecast_Final$nnetar <- fc_nnetar
}
if("t" %in% model_char){
model_list <- c(model_list, "tbats")
md_tbats <- fc_tbats <- NULL
t.arg$use.parallel <- parallel
md_tbats <- base::do.call(forecast::tbats, c(list(ts.obj), t.arg))
fc_tbats <- forecast::forecast(md_tbats, h = h)
modelOutput$Models_Final$tbats <- md_tbats
modelOutput$Forecast_Final$tbats <- fc_tbats
}
if("b" %in% model_char){
# Check if the bsts arguments are valid
if(is.null(b.arg)){
b.arg <- list(linear_trend = TRUE,
seasonal = TRUE,
niter = 1000,
ping = 0,
family = "gaussian",
seed=1234)
} else{
if("linear_trend" %in% names(b.arg)){
if(!b.arg$linear_trend %in% c(TRUE, FALSE)){
warning("The value of the 'linear_trend' argument of the bsts model is invalid, using default (TRUE)")
b.arg$linear_trend <- TRUE
}
} else {
warning("The 'linear_trend' was not defined, using TRUE as default")
b.arg$linear_trend <- TRUE
}
if("seasonal" %in% names(b.arg)){
if(!b.arg$seasonal %in% c(TRUE, FALSE)){
warning("The value of the 'seasonal' argument of the bsts model is invalid, using TRUE as default")
b.arg$seasonal <- TRUE
}
} else {
warning("The 'seasonal' argument was not defined, using TRUE as default")
b.arg$seasonal <- TRUE
}
if("niter" %in% names(b.arg)){
if(!base::is.numeric(b.arg$niter)){
warning("The value of the 'niter' argument of the bsts model is invalid, setting the argument to 1000")
b.arg$niter <- 1000
} else if(b.arg$niter %% 1 != 0){
warning("The value of the 'niter' argument of the bsts model is not integer, setting the argument to 1000")
b.arg$niter <- 1000
}
} else {
warning("The 'niter' argument was not defined, setting the argument to 1000")
b.arg$niter <- 1000
}
if("ping" %in% names(b.arg)){
if(!base::is.numeric(b.arg$ping)){
warning("The value of the 'ping' argument of the bsts model is invalid, setting the argument to 100")
b.arg$ping <- 100
} else if(b.arg$ping %% 1 != 0){
warning("The value of the 'ping' argument of the bsts model is not integer, setting the argument to 100")
b.arg$ping <- 1000
}
} else {
warning("The 'ping' argument was not defined, setting the argument to 100")
b.arg$ping <- 100
}
if("seed" %in% names(b.arg)){
if(!base::is.numeric(b.arg$seed)){
warning("The value of the 'seed' argument of the bsts model is invalid, setting the argument to 1234")
b.arg$seed <- 1234
} else if(b.arg$seed %% 1 != 0){
warning("The value of the 'seed' argument of the bsts model is not integer, setting the argument to 1234")
b.arg$seed <- 1234
}
} else {
warning("The 'seed' argument was not defined, setting the argument to 1234")
b.arg$seed <- 1234
}
if("family" %in% names(b.arg)){
if(!b.arg$family %in% c("gaussian", "logit", "poisson", "student")){
warning("The value of the 'family' argument of the bsts model is invalid, using 'gaussian' as default")
b.arg$family <- "gaussian"
}
} else{
warning("The value of the 'family' argument is missing, using 'gaussian' as default")
b.arg$family <- "gaussian"
}
}
model_list <- c(model_list, "bsts")
md_bsts <- fc_bsts <- ss <- fit.bsts <- burn <- NULL
ss <- list()
if(b.arg$linear_trend){
ss <- bsts::AddLocalLinearTrend(ss, ts.obj)
}
if(b.arg$seasonal){
ss <- bsts::AddSeasonal(ss, ts.obj,
nseasons = stats::frequency(ts.obj))
}
md_bsts <- bsts::bsts(ts.obj,
state.specification = ss,
niter = b.arg$niter,
ping= b.arg$ping,
seed= b.arg$seed,
family = b.arg$family)
fc_bsts <- stats::predict(md_bsts, horizon = h, quantiles = c(.025, .975))
modelOutput$Models_Final$bsts <- md_bsts
modelOutput$Forecast_Final$bsts <- fc_bsts
}
if("h" %in% model_char){
model_list <- c(model_list, "hybrid")
md_hybrid <- fc_hybrid <- NULL
h.arg$parallel <- parallel
md_hybrid <- base::do.call(forecastHybrid::hybridModel, c(list(ts.obj), h.arg))
if("xreg" %in% names(h.arg$a.args) ||
"xreg" %in% names(h.arg$n.args) ||
"xreg" %in% names(h.arg$s.args)){
fc_hybrid <- forecast::forecast(md_hybrid, h = h, xreg = base::as.data.frame(xreg.h))
} else{
fc_hybrid <- forecast::forecast(md_hybrid, h = h)
}
modelOutput$Models_Final$hybrid <- md_hybrid
modelOutput$Forecast_Final$hybrid <- fc_hybrid
}
s <- length(ts.obj) - periods + 1
e <- length(ts.obj)
MAPE_df <- NULL
MAPE_df <- base::data.frame(matrix(NA, ncol = length(model_list) + 1 , nrow = periods))
names(MAPE_df) <- c("Period", model_list)
MAPE_df$Period <- s:e - s + 1
RMSE_df <- NULL
RMSE_df <- base::data.frame(matrix(NA, ncol = length(model_list) + 1 , nrow = periods))
names(RMSE_df) <- c("Period", model_list)
RMSE_df$Period <- s:e - s + 1
# Loop over the series
for(i in s:e){
period_name <- NULL
period_name <- paste("period", (i - s + 1), sep = "_")
eval(parse(text = paste("modelOutput$", period_name, "<- list()", sep = "")))
ts.subset <- split_ts <- train <- test <- NULL
ts.subset <- stats::window(ts.obj, start = stats::time(ts.obj)[1], end = stats::time(ts.obj)[i])
split_ts <- TSstudio::ts_split(ts.subset, sample.out = window_size)
train <- split_ts$train
test <- split_ts$test
if("a" %in% model_char){
md <- fc <- NULL
if("xreg" %in% names(a.arg)){
a.xreg.train <- xreg.arima[1:length(train),]
a.xreg.test <- xreg.arima[(length(train) + 1):(length(train) + window_size),]
a.arg.xreg <- a.arg
a.arg.xreg$xreg <- a.xreg.train
md <- base::do.call(forecast::auto.arima, c(list(train), a.arg.xreg))
fc <- forecast::forecast(md, h = window_size, xreg = a.xreg.test)
} else {
md <- base::do.call(forecast::auto.arima, c(list(train), a.arg))
fc <- forecast::forecast(md, h = window_size)
}
MAPE_df$auto.arima[i - s + 1] <- base::round(forecast::accuracy(fc,test)[10], 2)
RMSE_df$auto.arima[i - s + 1] <- base::round(forecast::accuracy(fc,test)[4], 2)
eval(parse(text = paste("modelOutput$", period_name, "$auto.arima <- list(model = md, forecast = fc)", sep = "")))
}
if("w" %in% model_char){
md <- fc <- NULL
md <- base::do.call(stats::HoltWinters, c(list(train), w.arg))
fc <- forecast::forecast(md, h = window_size)
MAPE_df$HoltWinters[i - s + 1] <- base::round(forecast::accuracy(fc, test)[10], 2)
RMSE_df$HoltWinters[i - s + 1] <- base::round(forecast::accuracy(fc, test)[4], 2)
eval(parse(text = paste("modelOutput$", period_name, "$HoltWinters <- list(model = md, forecast = fc)", sep = "")))
}
if("e" %in% model_char){
md <- fc <- NULL
md <- base::do.call(forecast::ets, c(list(train), e.arg))
fc <- forecast::forecast(train, h = window_size)
MAPE_df$ets[i - s + 1] <- base::round(forecast::accuracy(fc, test)[10], 2)
RMSE_df$ets[i - s + 1] <- base::round(forecast::accuracy(fc, test)[4], 2)
eval(parse(text = paste("modelOutput$", period_name, "$ets <- list(model = md, forecast = fc)", sep = "")))
}
if("n" %in% model_char){
md <- fc <- NULL
if("xreg" %in% names(n.arg)){
n.xreg.train <- xreg.arima[1:length(train),]
n.xreg.test <- xreg.arima[(length(train) + 1):(length(train) + window_size),]
n.arg.xreg <- n.arg
n.arg.xreg$xreg <- n.xreg.train
md <- base::do.call(forecast::nnetar, c(list(train), n.arg.xreg))
fc <- forecast::forecast(md, h = window_size, xreg = n.xreg.test)
} else {
md <- base::do.call(forecast::nnetar, c(list(train), n.arg))
fc <- forecast::forecast(md, h = window_size)
}
MAPE_df$nnetar[i - s + 1] <- base::round(forecast::accuracy(fc, test)[10],2)
RMSE_df$nnetar[i - s + 1] <- base::round(forecast::accuracy(fc, test)[4],2)
eval(parse(text = paste("modelOutput$", period_name, "$nnetar <- list(model = md, forecast = fc)", sep = "")))
}
if("t" %in% model_char){
md <- fc <- NULL
md <- base::do.call(forecast::tbats, c(list(train), t.arg))
fc <- forecast::forecast(md, h = window_size)
MAPE_df$tbats[i - s + 1] <- base::round(forecast::accuracy(fc, test)[10], 2)
RMSE_df$tbats[i - s + 1] <- base::round(forecast::accuracy(fc, test)[4], 2)
eval(parse(text = paste("modelOutput$", period_name, "$tbats <- list(model = md, forecast = fc)", sep = "")))
}
if("b" %in% model_char){
md <- fc <- ss <- NULL
ss <- list()
if(b.arg$linear_trend){
ss <- bsts::AddLocalLinearTrend(ss, ts.obj)
}
if(b.arg$seasonal){
ss <- bsts::AddSeasonal(ss, ts.obj,
nseasons = stats::frequency(ts.obj))
}
md <- bsts::bsts(train,
state.specification = ss,
niter = b.arg$niter,
ping= b.arg$ping,
seed= b.arg$seed,
family = b.arg$family)
fc <- stats::predict(md, horizon = window_size, quantiles = c(.025, .975))
pred <- fc$mean
MAPE_df$bsts[i - s + 1] <- base::round(mean(100 * base::abs((test - pred) / test)), 2)
RMSE_df$bsts[i - s + 1] <- base::round((mean((test - pred)^ 2)) ^ 0.5, 2)
eval(parse(text = paste("modelOutput$", period_name, "$bsts <- list(model = md, forecast = fc)", sep = "")))
}
if("h" %in% model_char){
md <- fc <- NULL
if("xreg" %in% names(h.arg$a.args) ||
"xreg" %in% names(h.arg$n.args) ||
"xreg" %in% names(h.arg$s.args)){
h.arg.xreg <- h.test <- NULL
h.arg.xreg <- h.arg
if("xreg" %in% names(h.arg$a.args)){
h.arg.xreg$a.args$xreg <- xreg.hybrid.arima[1:length(train),]
h.test <- xreg.hybrid.arima[(length(train) + 1):(length(train) + window_size),]
}
if("xreg" %in% names(h.arg$n.args)){
h.arg.xreg$n.args$xreg <- xreg.hybrid.nnetar[1:length(train),]
h.test <- xreg.hybrid.nnetar[(length(train) + 1):(length(train) + window_size),]
}
if("xreg" %in% names(h.arg$s.args)){
h.arg.xreg$s.args$xreg <- xreg.hybrid.stlm[1:length(train),]
h.test <- xreg.hybrid.stlm[(length(train) + 1):(length(train) + window_size),]
}
md <- base::do.call(forecastHybrid::hybridModel, c(list(train), h.arg.xreg))
fc <- forecast::forecast(md, h = window_size, xreg = base::as.data.frame(h.test))
} else {
md <- base::do.call(forecastHybrid::hybridModel, c(list(train), h.arg))
fc <- forecast::forecast(md, h = window_size)
}
eval(parse(text = paste("modelOutput$", period_name, "$hybrid <- list(model = md, forecast = fc)", sep = "")))
MAPE_df$hybrid[i - s + 1] <- base::round(forecast::accuracy(fc, test)[10], 2)
RMSE_df$hybrid[i - s + 1] <- base::round(forecast::accuracy(fc, test)[4], 2)
}
if((i -s + 1) >= 1){
p <- p1 <- p2 <- p3 <- p4 <- p5 <- p6 <-NULL
p <- base::suppressWarnings(plotly::plot_ly(x = stats::time(train), y = base::as.numeric(train), mode = "lines", name = "Training", type = "scatter", line = list(color = "#00526d")) %>%
plotly::add_lines(x = stats::time(test), y = base::as.numeric(test), line = list(color = "green", width = 4, dash = "dash"), name = "Testing") %>%
plotly::layout(xaxis = list(range = c(base::min(stats::time(ts.obj)), base::max(stats::time(ts.obj)))),
title = base::paste(obj.name, " Backtesting - Error Distribution by Period/Model", sep = ""), annotations = a))
p1 <- base::suppressWarnings(plotly::plot_ly(data = MAPE_df))
for(r1 in 2:ncol(MAPE_df)){
p1 <- base::suppressWarnings(p1 %>% plotly::add_lines(x = MAPE_df[, 1],
y = MAPE_df[, r1],
name = names(MAPE_df)[r1],
line = list(color = color_ramp[(r1 -1)])))
}
p1 <- base::suppressWarnings(p1 %>% plotly::layout(xaxis = list(tickvals = MAPE_df[, 1], ticktext = MAPE_df[, 1],
range = c(min(MAPE_df$Period), max(MAPE_df$Period)))))
p2 <- base::suppressWarnings(plotly::plot_ly(data = MAPE_df))
for(r2 in 2:base::ncol(MAPE_df)){
p2 <- base::suppressWarnings(p2 %>% plotly::add_trace(y = MAPE_df[, r2],
type = "box",
boxpoints = "all",
jitter = 0.3,
pointpos = -1.8,
name = names(MAPE_df)[r2],
marker = list(color = color_ramp[(r2 -1)]),
line = list(color = color_ramp[(r2 -1)]),
showlegend=F
))
}
p1 <- base::suppressWarnings(p1 %>% plotly::layout(title = "Error by Period",
yaxis = list(title = "MAPE"),
xaxis = list(title = "Period", tickvals = MAPE_df[, 1], ticktext = MAPE_df[, 1])))
p2 <- base::suppressWarnings(p2 %>% plotly::layout(title = "Error Distribution by Model",
yaxis = list(title = "MAPE")))
p3 <- base::suppressWarnings(plotly::subplot(p1, p2, nrows = 2, titleY = TRUE, titleX = TRUE, margin = 0.06))
p4 <- base::suppressWarnings(plotly::plot_ly(data = RMSE_df))
for(r1 in 2:ncol(RMSE_df)){
p4 <- base::suppressWarnings(p4 %>% plotly::add_lines(x = RMSE_df[, 1],
y = RMSE_df[, r1],
name = names(RMSE_df)[r1],
line = list(color = color_ramp[(r1 -1)])))
}
p4 <- base::suppressWarnings(p4 %>% plotly::layout(xaxis = list(tickvals = RMSE_df[, 1], ticktext = RMSE_df[, 1],
range = c(min(RMSE_df$Period), max(RMSE_df$Period)))))
p5 <- base::suppressWarnings(plotly::plot_ly(data = RMSE_df))
for(r2 in 2:base::ncol(RMSE_df)){
p5 <- base::suppressWarnings(p5 %>% plotly::add_trace(y = RMSE_df[, r2],
type = "box",
boxpoints = "all",
jitter = 0.3,
pointpos = -1.8,
name = names(RMSE_df)[r2],
marker = list(color = color_ramp[(r2 -1)]),
line = list(color = color_ramp[(r2 -1)]),
showlegend=F
))
}
p4 <- base::suppressWarnings(p4 %>% plotly::layout(title = "Error by Period",
yaxis = list(title = "RMSE"),
xaxis = list(title = "Period", tickvals = RMSE_df[, 1], ticktext = RMSE_df[, 1])))
p5 <- base::suppressWarnings(p5 %>% plotly::layout(title = "Error Distribution by Model",
yaxis = list(title = "RMSE")))
p6 <- base::suppressWarnings(plotly::subplot(p4, p5, nrows = 2, titleY = TRUE, titleX = TRUE, margin = 0.1))
if(error == "MAPE" & plot & periods > 1){
p7 <- base::suppressWarnings(plotly::subplot(plotly::subplot(p1, p2, nrows = 1, titleY = TRUE, shareY = TRUE, margin = 0.02, titleX = TRUE),
p, nrows = 2, margin = 0.08, titleY = TRUE))
print(p7)
} else if(error == "RMSE" & plot & periods > 1){
p7 <- base::suppressWarnings(plotly::subplot(plotly::subplot(p4, p5, nrows = 1, titleY = TRUE, shareY = TRUE, margin = 0.02, titleX = TRUE),
p, nrows = 2, margin = 0.08, titleY = TRUE))
print(p7)
}
}
}
modelOutput$MAPE_score <- MAPE_df
modelOutput$RMSE_score <- RMSE_df
if(periods > 1){
modelOutput$MAPE_plot <- p3
modelOutput$RMSE_plot <- p6
}
leaderboard <- base::suppressMessages(
(modelOutput$MAPE_score %>% reshape2::melt(id.vars = c("Period")) %>%
dplyr::group_by(variable) %>%
dplyr::summarise(avgMAPE = base::mean(value),
sdMAPE = stats::sd(value))) %>%
dplyr::left_join(
modelOutput$RMSE_score %>% reshape2::melt(id.vars = c("Period")) %>%
dplyr::group_by(variable) %>%
dplyr::summarise(avgRMSE = base::mean(value),
sdRMSE = stats::sd(value))
)
)
names(leaderboard)[1] <- "Model_Name"
modelOutput$leaderboard <- leaderboard
forecast_final_plot_arg <- list(
text = paste(obj.name, " Best Forecast by ", error, " - ", leaderboard$Model_Name[1], sep = ""),
xref = "paper",
yref = "paper",
yanchor = "bottom",
xanchor = "center",
align = "center",
x = 0.5,
y = 1,
showarrow = FALSE
)
if(error == "MAPE"){
leaderboard <- leaderboard %>% dplyr::arrange(avgMAPE)
eval(parse(text = paste("modelOutput$leadForecast <- modelOutput$Forecast_Final$", leaderboard$Model_Name[1], sep = "")))
if(periods > 1){
forecast_final_plot_arg <- list(
text = paste(obj.name, " Best Forecast by ", error, " - ", leaderboard$Model_Name[1], sep = ""),
xref = "paper",
yref = "paper",
yanchor = "bottom",
xanchor = "center",
align = "center",
x = 0.5,
y = 1,
showarrow = FALSE
)
final_forecast_plot <- base::suppressWarnings(TSstudio::plot_forecast(modelOutput$leadForecast) %>%
plotly::layout(annotations = forecast_final_plot_arg,
title = base::paste(obj.name, " Backtesting - Error Distribution by Period/Model", sep = "")))
final_plot <- base::suppressWarnings(plotly::subplot(plotly::subplot(p1, p2, nrows = 1, titleY = TRUE, shareY = TRUE, margin = 0.02, titleX = TRUE),
final_forecast_plot, nrows = 2, margin = 0.1, titleY = TRUE))
}
leaderboard <- leaderboard %>% dplyr::arrange(avgMAPE) %>% as.data.frame()
modelOutput$leaderboard <- leaderboard
} else if(error == "RMSE"){
leaderboard <- leaderboard %>% dplyr::arrange(avgRMSE)
eval(parse(text = paste("modelOutput$leadForecast <- modelOutput$Forecast_Final$", leaderboard$Model_Name[1], sep = "")))
if(periods > 1){
forecast_final_plot_arg <- list(
text = paste(obj.name, " Best Forecast by ", error, " - ", leaderboard$Model_Name[1], sep = ""),
xref = "paper",
yref = "paper",
yanchor = "bottom",
xanchor = "center",
align = "center",
x = 0.5,
y = 1,
showarrow = FALSE
)
final_forecast_plot <- base::suppressWarnings(TSstudio::plot_forecast(modelOutput$leadForecast) %>%
plotly::layout(annotations = forecast_final_plot_arg))
final_plot <- base::suppressWarnings(plotly::subplot(plotly::subplot(p4, p5, nrows = 1, titleY = TRUE, shareY = TRUE, margin = 0.02, titleX = TRUE),
final_forecast_plot, nrows = 2, margin = 0.1, titleY = TRUE))
}
leaderboard <- leaderboard %>% dplyr::arrange(avgRMSE) %>% as.data.frame()
modelOutput$leaderboard <- leaderboard
}
modelOutput$summary_plot <- final_plot
if(plot){
print(final_plot)
}
print(leaderboard)
class(modelOutput) <- "ts_backtest"
return(modelOutput)
}
#' Tuning Time Series Forecasting Models Parameters with Grid Search
#' @export ts_grid
#' @param ts.obj A univariate time series object of a class "ts"
#' @param model A string, defines the model
#' @param optim A string, set the optimization method - c("MAPE", "RMSE")
#' @param periods A string, set the number backtesting periods
#' @param window_length An integer, defines the length of the backtesting training window.
#' If set to NULL (default) will use an expending window starting the from the first observation,
#' otherwise will use a sliding window.
#' @param window_space An integer, set the space length between each of the backtesting training partition
#' @param window_test An integer, set the length of the backtesting testing partition
#' @param hyper_params A list, defines the tuning parameters and their range
#' @param parallel Logical, if TRUE use multiple cores in parallel
#' @param n.cores Set the number of cores to use if the parallel argument is set to TRUE.
#' @description Tuning time series models with grid search approach using backtesting method.
#' If set to "auto" (default), will use all available cores in the system minus 1
#' @return A list
#' @examples
#' \dontrun{
#' data(USgas)
#'
#' # Starting with a shallow search (sequence between 0 and 1 with jumps of 0.1)
#' # To speed up the process, will set the parallel option to TRUE
#' # to run the search in parallel using 8 cores
#'
#' hw_grid_shallow <- ts_grid(ts.obj = USgas,
#' periods = 6,
#' model = "HoltWinters",
#' optim = "MAPE",
#' window_space = 6,
#' window_test = 12,
#' hyper_params = list(alpha = seq(0.01, 1,0.1),
#' beta = seq(0.01, 1,0.1),
#' gamma = seq(0.01, 1,0.1)),
#' parallel = TRUE,
#' n.cores = 8)
#'
#'
#' # Use the parameter range of the top 20 models
#' # to set a narrow but more agressive search
#'
#' a_min <- min(hw_grid_shallow$grid_df$alpha[1:20])
#' a_max <- max(hw_grid_shallow$grid_df$alpha[1:20])
#'
#' b_min <- min(hw_grid_shallow$grid_df$beta[1:20])
#' b_max <- max(hw_grid_shallow$grid_df$beta[1:20])
#'
#' g_min <- min(hw_grid_shallow$grid_df$gamma[1:20])
#' g_max <- max(hw_grid_shallow$grid_df$gamma[1:20])
#'
#' hw_grid_second <- ts_grid(ts.obj = USgas,
#' periods = 6,
#' model = "HoltWinters",
#' optim = "MAPE",
#' window_space = 6,
#' window_test = 12,
#' hyper_params = list(alpha = seq(a_min, a_max,0.05),
#' beta = seq(b_min, b_max,0.05),
#' gamma = seq(g_min, g_max,0.05)),
#' parallel = TRUE,
#' n.cores = 8)
#'
#' md <- HoltWinters(USgas,
#' alpha = hw_grid_second$alpha,
#' beta = hw_grid_second$beta,
#' gamma = hw_grid_second$gamma)
#'
#' library(forecast)
#'
#' fc <- forecast(md, h = 60)
#'
#' plot_forecast(fc)
#'
#' }
ts_grid <- function(ts.obj,
model,
optim = "MAPE",
periods,
window_length = NULL,
window_space,
window_test,
hyper_params,
parallel = TRUE,
n.cores = "auto"){
error <- period <- start_time <- NULL
`%>%` <- magrittr::`%>%`
# Error handling
if(!stats::is.ts(ts.obj)){
stop("The input object is not 'ts' object")
} else if(stats::is.mts(ts.obj)){
stop("The input object is 'mts' object, please use 'ts'")
}
if(!optim %in% c("MAPE", "RMSE") || base::length(optim) != 1){
warning("The value of the optim argument is not valid, using default option (MAPE)")
optim <- "MAPE"
}
if(!base::is.logical(parallel)){
warning("The 'parallel' argument is not a boolean operator, setting it to TRUE")
parallel <- TRUE
}
if(n.cores != "auto"){
if(!base::is.numeric(n.cores)){
warning("The value of the 'n.cores' argument is not valid,",
" setting it to 'auto' mode")
n.cores <- "auto"
} else if(base::is.numeric(n.cores) &&
(n.cores %% 1 != 0 || n.cores < 1)){
warning("The value of the 'n.cores' argument is not valid,",
" setting it to 'auto' mode")
n.cores <- "auto"
} else{
if(future::availableCores() < n.cores){
warning("The value of the 'n.cores' argument is not valid,",
"(the requested number of cores are greater than available)",
", setting it to 'auto' mode")
n.cores <- "auto"
}
}
}
if(n.cores == "auto"){
n.cores <- base::as.numeric(future::availableCores() - 1)
}
if(!base::exists("model")){
stop("The 'model' argument is missing")
} else if(!model %in% c("HoltWinters")){
stop("The 'model' argument is not valid")
}
# Set the backtesting partitions
s <- length(ts.obj) - window_space * (periods - 1) # the length of the first partition
e <- length(ts.obj) # the end of the backtesting partition
w_end <- seq(from = s, by = window_space, to = e) # Set the cutting points for the backtesting partions
if(!base::is.null(window_length)){
w_start <- w_end - window_test - window_length + 1
} else {
w_start <- base::rep(1, base::length(w_end))
}
if(model == "HoltWinters"){
hw_par <- hyper_input <- hyper_null <- hyper_false <- NULL
hw_par <- c("alpha", "beta", "gamma")
if(!base::all(base::names(hyper_params) %in% hw_par)){
stop("The 'hyper_params' argument is invalid")
}
if("alpha" %in% base::names(hyper_params)){
alpha <- NULL
if(is.null(hyper_params$alpha)){
hyper_null <- c(hyper_null, "alpha")
} else if(base::is.logical(hyper_params$alpha)){
stop("The value of the 'alpha' argument cannot be only numeric")
} else {
if(base::any(which(hyper_params$alpha < 0)) ||
base::any(which(hyper_params$alpha > 1))){
stop("The value of the 'alpha' parameter is out of range,",
" cannot exceed 1 or be less or equal to 0")
}
if(any(which(hyper_params$alpha == 0))){
hyper_params$alpha[base::which(hyper_params$alpha == 0)] <- 1e-5
warning("The value of the 'alpha' parameter cannot be equal to 0",
" replacing 0 with 1e-5")
}
alpha <- hyper_params$alpha
hyper_input <- c(hyper_input, "alpha")
}
}
if("beta" %in% base::names(hyper_params)){
beta <- NULL
if(is.null(hyper_params$beta)){
hyper_null <- c(hyper_null, "beta")
} else if(base::is.logical(hyper_params$beta) &&
!base::isTRUE(hyper_params$beta)){
beta <- FALSE
hyper_false <- c(hyper_false, "beta")
} else {
if(base::any(which(hyper_params$beta < 0)) ||
base::any(which(hyper_params$beta > 1))){
stop("The value of the 'beta' parameter is out of range,",
" cannot exceed 1 or be less or equal to 0")
}
if(any(which(hyper_params$beta == 0))){
hyper_params$beta[base::which(hyper_params$beta == 0)] <- 1e-5
warning("The value of the 'beta' parameter cannot be equal to 0",
" replacing 0 with 1e-5")
}
beta <- hyper_params$beta
hyper_input <- c(hyper_input, "beta")
}
}
if("gamma" %in% base::names(hyper_params)){
gamma <- NULL
if(is.null(hyper_params$gamma)){
hyper_null <- c(hyper_null, "gamma")
} else if(base::is.logical(hyper_params$gamma) &&
!base::isTRUE(hyper_params$gamma)){
gamma <- FALSE
hyper_false <- c(hyper_false, "beta")
} else {
if(base::any(which(hyper_params$gamma < 0)) ||
base::any(which(hyper_params$gamma > 1))){
stop("The value of the 'gamma' parameter is out of range,",
" cannot exceed 1 or be less or equal to 0")
}
if(any(which(hyper_params$gamma == 0))){
hyper_params$gamma[base::which(hyper_params$gamma == 0)] <- 1e-5
warning("The value of the 'gamma' parameter cannot be equal to 0",
" replacing 0 with 1e-5")
}
gamma <- hyper_params$gamma
hyper_input <- c(hyper_input, "gamma")
}
}
grid_df <- base::eval(
base::parse(text = base::paste("base::expand.grid(",
base::paste(hyper_input, collapse = ", "),
")",
sep = "")))
base::names(grid_df) <- hyper_input
if(!base::is.null(hyper_false)){
for(f in hyper_false){
grid_df[f] <- FALSE
}
}
grid_model <- base::paste("stats::HoltWinters(x = train", sep = "")
for(i in hw_par){
if(i %in% base::names(grid_df)){
grid_model <- base::paste(grid_model, ", ", i, " = search_df$", i, "[i]",
sep = "" )
} else {
grid_model <- base::paste(grid_model, ", ", i, " = NULL", sep = "")
}
}
grid_model <- base::paste(grid_model, ")", sep = "")
}
grid_output <- NULL
if(!parallel){
grid_output <- base::lapply(1:periods, function(n){
ts_sub <- train <- test <- search_df <- NULL
search_df <- grid_df
search_df$period <- n
search_df$error <- NA
ts_sub <- stats::window(ts.obj,
start = stats::time(ts.obj)[w_start[n]],
end = stats::time(ts.obj)[w_end[n]])
partition <- TSstudio::ts_split(ts_sub, sample.out = window_test)
train <- partition$train
test <- partition$test
for(i in 1:nrow(search_df)){
md <- fc <- NULL
md <- base::eval(base::parse(text = grid_model))
fc <- forecast::forecast(md, h = window_test)
if(optim == "MAPE"){
search_df$error[i] <- forecast::accuracy(fc, test)[10]
} else if(optim == "RMSE"){
search_df$error[i] <- forecast::accuracy(fc, test)[4]
}
}
return(search_df)
}) %>%
dplyr::bind_rows() %>%
tidyr::spread(key = period, value = error)
} else if(parallel){
future::plan(future::multiprocess, workers = n.cores)
start_time <- Sys.time()
grid_output <- future.apply::future_lapply(1:periods, function(n){
ts_sub <- train <- test <- search_df <- NULL
search_df <- grid_df
search_df$period <- n
search_df$error <- NA
ts_sub <- stats::window(ts.obj,
start = stats::time(ts.obj)[w_start[n]],
end = stats::time(ts.obj)[w_end[n]])
partition <- TSstudio::ts_split(ts_sub, sample.out = window_test)
train <- partition$train
test <- partition$test
for(i in 1:nrow(search_df)){
md <- fc <- NULL
md <- base::eval(base::parse(text = grid_model))
fc <- forecast::forecast(md, h = window_test)
if(optim == "MAPE"){
search_df$error[i] <- forecast::accuracy(fc, test)[10]
} else if(optim == "RMSE"){
search_df$error[i] <- forecast::accuracy(fc, test)[4]
}
}
return(search_df)
}) %>%
dplyr::bind_rows() %>%
tidyr::spread(key = period, value = error)
}
col_mean <- base::which(!base::names(grid_output) %in% base::names(hyper_params) )
grid_output$mean <- base::rowMeans(grid_output[, col_mean])
grid_output <- grid_output %>% dplyr::arrange(mean)
final_output <- list(grid_df = grid_output)
for(i in base::names(hyper_params)){
final_output[[i]] <- grid_output[1, i]
}
final_output[["parameters"]] <- list(series = ts.obj,
model = model,
optim = optim,
periods = periods,
window_length = window_length,
window_space = window_space,
window_test = window_test,
hyper_params = hyper_params,
parallel = parallel,
n.cores = n.cores)
base::class(final_output) <- "ts_grid"
return(final_output)
}
#' Visualizing Grid Search Results
#' @export plot_grid
#' @param grid.obj A ts_grid output object
#' @param top An integer, set the number of hyper-parameters combinations to visualize
#' (ordered by accuracy). If set to NULL (default), will plot the top 100 combinations
#' @param type The plot type, either "3D" for 3D plot or
#' "parcoords" for parallel coordinates plot.
#' Note: the 3D plot option is applicable whenever there are three tuning parameters,
#' otherwise will use a 2D plot for two tuning parameters.
#' @param highlight A proportion between 0 (excluding) and 1,
#' set the number of hyper-parameters combinations to highlight
#' (by accuracy), if the type argument is set to "parcoords"
#' @param colors A list of plotly arguments for the color scale setting:
#'
#' showscale - display the color scale if set to TRUE.
#'
#' reversescale - reverse the color scale if set to TRUE
#'
#' colorscale set the color scale of the plot, possible palettes are:
#' Greys, YlGnBu, Greens , YlOrRd,
#' Bluered, RdBu, Reds, Blues, Picnic,
#' Rainbow, Portland, Jet, Hot, Blackbody,
#' Earth, Electric, Viridis, Cividis
plot_grid <- function(grid.obj,
top = NULL,
highlight = 0.1,
type = "parcoords",
colors = list(showscale = TRUE,
reversescale = FALSE,
colorscale = "Jet")){
# Setting the pipe operator
`%>%` <- magrittr::`%>%`
# Setting variables
color_option <- p <- par_names <- sizeref <- NULL
# List of optional color scale
color_option <- c("Greys","YlGnBu", "Greens", "YlOrRd",
"Bluered", "RdBu", "Reds", "Blues", "Picnic",
"Rainbow", "Portland", "Jet", "Hot", "Blackbody",
"Earth", "Electric", "Viridis", "Cividis")
# Error handling
if(class(grid.obj) != "ts_grid"){
stop("The input object is not a 'ts_grid' class")
}
if(!base::is.list(colors)){
warning("The 'colors' argument is not valid, using default option")
colors = base::list(showscale = TRUE,
reversescale = FALSE,
colorscale = "Jet")
} else if(!all(base::names(colors) %in% c("showscale", "reversescale", "colorscale"))){
warning("The 'colors' argument is not valid, using default option")
colors = base::list(showscale = TRUE,
reversescale = FALSE,
colorscale = "Jet")
}
if(!base::is.logical(colors$showscale)){
warning("The 'showscale' parameter of the 'colors' argument is not logical, using default option (TRUE)")
colors$showscale <- TRUE
}
if(!base::is.logical(colors$reversescale)){
warning("The 'reversescale' parameter of the 'colors' argument is not logical, using default option (FALSE)")
colors$reversescale <- FALSE
}
if(!base::is.character(colors$colorscale) ||
base::length(colors$colorscale) != 1 ||
!colors$colorscale %in% color_option){
warning("The 'colorscale' parameter of the 'colors' argument is not logical, using default option (Jet)")
}
if(type != "parcoords" && type != "3D"){
warning("The value of the 'type' argument is not valid, using default option (parcoords)")
type <- "parcoords"
}
if(!base::is.null(top)){
if(!base::is.numeric(top) || top %% 1 != 0){
warning("The value of the 'top' argument is not valid, using default option (top 100 models)")
top <- ifelse(base::nrow(grid.obj$grid_df) > 100, 100, base::nrow(grid.obj$grid_df))
}
if(top > base::nrow(grid.obj$grid_df)){
warning("The value of the 'top' argument exceeding the number of models, using default option (top 100 models)")
top <- ifelse(base::nrow(grid.obj$grid_df) > 100, 100, base::nrow(grid.obj$grid_df))
}
} else {
top <- ifelse(base::nrow(grid.obj$grid_df) > 100, 100, base::nrow(grid.obj$grid_df))
}
if(!base::is.numeric(highlight) || highlight <= 0 || highlight > 1){
warning("The value of the 'highlight' argument is not valid, using default (0.1)")
highlight <- 0.1
}
par_names <- base::names(grid.obj$parameters$hyper_params)
for(i in par_names){
if(base::is.null(grid.obj$parameters$hyper_params[[i]]) ||
grid.obj$parameters$hyper_params[[i]] == FALSE){
par_names <- par_names[-which(par_names == i)]
}
}
if(type == "parcoords"){
if(grid.obj$parameters$model == "HoltWinters"){
if(base::length(par_names) < 2){
stop("Cannot create a parallel coordinates plot for a single hyper parameter")
}
hw_dim <- NULL
hw_dim <- base::list()
for(i in base::seq_along(par_names)){
hw_dim[[i]] <- base::eval(base::parse(text = base::paste("list(range = c(0,1),
constraintrange = c(min(grid.obj$grid_df[1:", base::ceiling(top * highlight), ", i]),
max(grid.obj$grid_df[1:", base::ceiling(top * highlight), ",i])),
label = '", par_names[i],"', values = ~",
par_names[i],
")",
sep = "")
))
}
p <- grid.obj$grid_df[1:top,] %>%
plotly::plot_ly(type = 'parcoords',
line = list(color = ~ mean,
colorscale = colors$colorscale,
showscale = colors$showscale,
reversescale = colors$reversescale,
cmin = base::min(grid.obj$grid_df$mean),
cmax = base::max(grid.obj$grid_df$mean[1:top]),
colorbar=list(
title= base::paste("Avg.", grid.obj$parameters$optim, sep = " ")
)),
dimensions = hw_dim
) %>% plotly::layout(title = base::paste(grid.obj$parameters$model,
" Parameters Grid Search Results (Avg. ",
grid.obj$parameters$optim,
") for Top ",
top,
" Models", sep = ""),
xaxis = list(title = base::paste("Testing Over", grid.obj$parameters$periods, "Periods", sep = " ")))
}
}else if(type == "3D"){
if(grid.obj$parameters$model == "HoltWinters"){
if(base::length(par_names) == 3){
p <- plotly::plot_ly(data = grid.obj$grid_df[1:top,],
type="scatter3d",
mode = "markers",
x = ~ alpha,
y = ~ beta,
z = ~ gamma,
hoverinfo = 'text',
text = paste(base::paste("Avg.", grid.obj$parameters$optim, sep = " "),
": ", base::round(grid.obj$grid_df[1:top, "mean"], 2),
"<br>", par_names[1],": ", grid.obj$grid_df[1:top, par_names[1]],
"<br>", par_names[2],": ", grid.obj$grid_df[1:top, par_names[2]],
"<br>", par_names[3],": ", grid.obj$grid_df[1:top, par_names[3]],
sep = ""),
marker = list(color = ~ mean,
colorscale = colors$colorscale,
showscale = colors$showscale,
reversescale = colors$reversescale,
colorbar=list(
title= base::paste("Avg.", grid.obj$parameters$optim, sep = " ")
))) %>%
plotly::layout(title = base::paste(grid.obj$parameters$model,
" Parameters Grid Search Results (Avg. ",
grid.obj$parameters$optim,
") for Top ",
top,
" Models", sep = ""),
xaxis = list(title = base::paste("Testing Over", grid.obj$parameters$periods, "Periods", sep = " ")))
} else if(base::length(par_names) == 2){
warning("Cannot create a 3D plot for two hyper parameters")
# Scaling the bubbles size
sizeref <- 2.0 * max(grid.obj$grid_df$mean[1:top]) / (20**2)
p <- plotly::plot_ly(x = grid.obj$grid_df[1:top, par_names[1]],
y = grid.obj$grid_df[1:top, par_names[2]],
type = "scatter",
mode = "markers",
hoverinfo = 'text',
text = paste(base::paste("Avg.", grid.obj$parameters$optim, sep = " "),
": ", base::round(grid.obj$grid_df[1:top, "mean"], 2),
"<br>", par_names[1],": ", grid.obj$grid_df[1:top, par_names[1]],
"<br>", par_names[2],": ", grid.obj$grid_df[1:top, par_names[2]],
sep = ""),
marker = list(color = grid.obj$grid_df[1:top, "mean"],
size = grid.obj$grid_df[1:top, "mean"],
sizemode = 'area', sizeref = sizeref,
colorscale = colors$colorscale,
showscale = colors$showscale,
reversescale = colors$reversescale,
colorbar=list(
title= base::paste("Avg.", grid.obj$parameters$optim, sep = " ")
))
) %>%
plotly::layout(title = base::paste(grid.obj$parameters$model,
"Parameters Grid Search Results (Avg.",
base::paste(grid.obj$parameters$optim, ")", sep = ""),
"for Top",
top,
"Models",
sep = " "),
xaxis = list(title = par_names[1]),
yaxis = list(title = par_names[2]))
} else if(base::length(par_names) <= 1){
stop("Cannot create a 3D plot for a single hyper parameter")
}
}
}
return(p)
}
#' Train, Test, Evaluate, and Forecast Multiple Time Series Forecasting Models
#' @export
#' @description Method for train test and compare multiple time series models using either one partition (i.e., sample out)
#' or multipe partitions (backtesting)
#' @param input A univariate time series object (ts class)
#' @param methods A list, defines the models to use for training and forecasting the series.
#' The list must include a sub list with the model type, and the model's arguments (when applicable) and notes about the model.
#' The sub-list name will be used as the model ID. Possible models:
#'
#' \code{\link[stats]{arima}} - model from the stats package
#'
#' \code{\link[forecast]{auto.arima}} - model from the forecast package
#'
#' \code{\link[forecast]{ets}} - model from the forecast package
#'
#' \code{\link[stats]{HoltWinters}} - model from the stats package
#'
#' \code{\link[forecast]{nnetar}} - model from the forecast package
#'
#' \code{\link[forecast]{tslm}} - model from the forecast package (note that the 'tslm' model must have the formula argument in the 'method_arg' argument)
#'
#' @param train_method A list, defines the backtesting parameters:
#'
#' partitions - an integer, set the number of training and testing partitions to be used in the backtesting process,
#' where when partition is set to 1 it is a simple holdout training approach
#'
#' space - an integer, defines the length of the backtesting window expansion
#'
#' sample.in - an integer, optional, defines the length of the training partitions, and therefore the backtesting window structure.
#' By default, it set to NULL and therefore, the backtesting using expending window.
#' Otherwise, when the sample.in defined, the window structure is sliding
#'
#' sample.in - an integer, optional, defines the length of the training partitions, and therefore the type of the backtesting window.
#'By default, is set to NULL, which implay that the backtesting is using an expending window. Otherwise, when defining the size of the training partition, th
#' defines the train approach, either using a single testing partition (sample out)
#' or use multiple testing partitions (backtesting). The list should include the training method argument, (please see 'details' for the structure of the argument)
#' @param horizon An integer, defines the forecast horizon
#' @param xreg Optional, a list with two vectors (e.g., data.frame or matrix) of external regressors,
#' one vector corresponding to the input series and second to the forecast itself
#' (e.g., must have the same length as the input and forecast horizon, respectively)
#' @param error A character, defines the error metrics to be used to sort the models leaderboard. Possible metric - "MAPE" or "RMSE"
#' @param level An integer, set the confidence level of the prediction intervals
#' @examples
#'
#' # Defining the models and their arguments
#' methods <- list(ets1 = list(method = "ets",
#' method_arg = list(opt.crit = "lik"),
#' notes = "ETS model with opt.crit = lik"),
#' ets2 = list(method = "ets",
#' method_arg = list(opt.crit = "amse"),
#' notes = "ETS model with opt.crit = amse"),
#' arima1 = list(method = "arima",
#' method_arg = list(order = c(2,1,0)),
#' notes = "ARIMA(2,1,0)"),
#' arima2 = list(method = "arima",
#' method_arg = list(order = c(2,1,2),
#' seasonal = list(order = c(1,1,1))),
#' notes = "SARIMA(2,1,2)(1,1,1)"),
#' auto_arima = list(method = "auto.arima",
#' method_arg = NULL,
#' notes = "auto.arima model"),
#' hw = list(method = "HoltWinters",
#' method_arg = NULL,
#' notes = "HoltWinters Model"),
#' tslm = list(method = "tslm",
#' method_arg = list(formula = input ~ trend + season),
#' notes = "tslm model with trend and seasonal components"))
#' # Training the models with backtesting
#' md <- train_model(input = USgas,
#' methods = methods,
#' train_method = list(partitions = 6,
#' sample.out = 12,
#' space = 3),
#' horizon = 12,
#' error = "MAPE")
#' # View the model performance on the backtesting partitions
#' md$leaderboard
#'
train_model <- function(input,
methods,
train_method,
horizon,
error = "MAPE",
xreg = NULL,
level = c(80, 95)){
# Setting the pipe operator
`%>%` <- magrittr::`%>%`
method_list <- input_freq <- input_length <- w <- s1 <- s2 <- NULL
grid_df <- models_df <- w_range <- notes <- NULL
methods_selected <- model_id <- start <- end <- partition <- NULL
model <- avg_mape <- avg_rmse <- NULL
# Whenever updating, need to update the add_method function as well
method_list <- list("arima", "auto.arima", "ets", "HoltWinters", "nnetar", "tslm")
### Error Handling
# Check the level argument
if(base::all(!is.numeric(level)) ||
base::any(level %% 1 != 0) ||
base::any(level <= 0 | level > 100)){
stop("Error on the 'level' argument: the argument is out of range (0,100]")
}
# Check the error argument
if(base::is.null(error) || !base::is.character(error) || base::length(error) !=1){
stop("Error on the 'error' argument: the input is not valid, please use either 'RMSE' or 'MAPE'")
} else if( error != "MAPE" && error != "RMSE"){
stop("Error on the 'error' argument: the input is not valid, please use either 'RMSE' or 'MAPE'")
}
# Checking the input argument
if(!stats::is.ts(input)){
stop("The input argument is not a valid 'ts' object")
} else if(stats::is.mts(input)){
stop("Cannot use multiple time series object as input")
}
# Getting the attributes of the input object
input_freq <- stats::frequency(input)
input_length <- base::length(input)
# Validating the methods argument
if(!base::is.list(methods)){
stop("Error on the 'methods' argument: the argument is not a list")
} else if(base::is.null(base::names(methods))){
stop("Error on the 'methods' argument: could not find the models IDs")
} else if(base::any("NULL" %in% base::as.character(methods %>% purrr::map(~.x[["method"]])))){
stop("Error on the 'methods' argument: at least one of the methods is missing the 'method' argument")
}
models_df <- base::data.frame(model_id = base::names(methods),
methods_selected = base::as.character(methods %>% purrr::map_chr(~.x[["method"]])),
notes = base::as.character(methods %>% purrr::map(~.x[["notes"]])),
stringsAsFactors = FALSE)
if(!base::all(models_df$methods_selected %in% method_list)){
stop("Error on the 'methods' argument: at least one of the models methods is not valid")
}
# Checking the train argument
if(!base::is.list(train_method)){
stop("Error on the 'train_method' argument: the argument is not a list")
} else if(!"partitions" %in% base::names(train_method)){
stop("Error on the 'train_method' argument: the 'partition' argument is missing")
} else if(!"space" %in% base::names(train_method)){
stop("Error on the 'train_method' argument: the 'space' argument is missing")
} else if(!"sample.out" %in% base::names(train_method)){
stop("Error on the 'train_method' argument: the 'sample.out' argument is missing")
} else if(!base::is.numeric(train_method$sample.out) ||
train_method$sample.out < 1 ||
train_method$sample.out %% 1 != 0){
stop("Error on the 'train_method' argument: the 'sample.out' argument is not valide, please use a positive integer")
} else if(!base::is.numeric(train_method$partitions) ||
train_method$partitions < 1 ||
train_method$partitions %% 1 != 0){
stop("Error on the 'train_method' argument: the 'partitions' argument is not valide, please use a positive integer")
} else if(!base::is.numeric(train_method$space) ||
train_method$space < 1 ||
train_method$space %% 1 != 0){
stop("Error on the 'train_method' argument: the 'space' argument is not valide, please use a positive integer")
}
w <- seq(from = input_length - train_method$space * (train_method$partitions - 1),
by = train_method$space,
length.out = train_method$partitions)
if(min(w) < input_freq * 2){
stop("Error on the 'train_method' argument: the length of the first partition is not sufficient to train a model",
" (must leave at least two full cycles for the sample in partition)")
}
# If not using sample.in, will define the start point as 1
if(!"sample.in" %in% base::names(train_method) ||
("sample.in" %in% base::names(train_method) &&
base::is.null(train_method$sample.in))){
s1 <- s2 <- 1
w_range <- base::data.frame(start = c(base::rep(s1, base::length(w)), s2),
end = c(w, input_length),
type = c(base::rep("train", base::length(w)), "forecast"),
partition = c(base::paste0("partition_", 1:base::length(w), sep = ""), "final_partition"),
stringsAsFactors = FALSE)
# If defining the sample.in -> check that the argument is valid
} else if("sample.in" %in% base::names(train_method)){
# If defining the sample.in -> check that the argument is valid
if(!base::is.numeric(train_method$sample.in) ||
train_method$sample.in < 1 ||
train_method$sample.in %% 1 != 0){
stop("Error on the 'train_method' argument: the training partition length (sample in) of the backtesting is not valid. Please use a positive integer")
} else if( train_method$sample.in < input_freq * 2){
stop("Error on the 'train_method' argument: the training partition length (sample in) must have at least two cycles")
}
s1 <- w - train_method$sample.out - train_method$sample.in + 1
s2 <- input_length - train_method$sample.in + 1
w_range <- base::data.frame(start = c(s1, s2),
end = c(w, input_length),
type = c(base::rep("train", base::length(w)), "forecast"),
partition = c(base::paste0("partition_", 1:base::length(w), sep = ""), "final_partition"),
stringsAsFactors = FALSE)
}
# Checking the horizon argument
if(horizon %% 1 != 0 || !base::is.numeric(horizon) || horizon <=0){
stop("Error on the 'horizon' argument: the 'horizon' is not valid, please make sure using positive integer")
}
# Checking the xreg argument
if(!base::is.null(xreg)){
if(!all(c("train", "forecast") %in% base::names(xreg))){
stop("Error on the 'xreg' argument: the 'xreg' list is not valid, please make sure setting the correspinding regressor",
" inputs for the 'input' argument (train) and for the forecast horizon (forecast)")
} else if(base::nrow(xreg$train) != base::length(input)){
stop("Error on the 'xreg' argument: the length of the xreg train input is not aligned with the length of the input series")
} else if(base::nrow(xreg$forecast) != horizon){
stop("Error on the 'xreg' argument: the length of the xreg forecast input is not aligned with the forecast horizon")
}
}
# Creating grid of all the modeling combinations
grid_df <- base::expand.grid(models_df$model_id, s1, train_method$sample.out, stringsAsFactors = FALSE) %>%
stats::setNames(c("model_id", "start", "horizon")) %>%
dplyr::left_join(models_df, by = c("model_id")) %>%
dplyr::mutate(type = "train") %>% dplyr::bind_rows(
base::expand.grid(models_df$model_id, s2, horizon, stringsAsFactors = FALSE) %>%
stats::setNames(c("model_id", "start", "horizon")) %>%
dplyr::left_join(models_df, by = c("model_id")) %>%
dplyr::mutate(type = "forecast")
) %>%
dplyr::left_join(w_range, by = c("start", "type"))
fc_output <- lapply(base::seq_along(grid_df$model_id), function(i){
ts.obj <- train <- test <- md <- fc <- arg <- NULL
ts.obj <- stats::window(input,
start = stats::time(input)[grid_df$start[i]],
end = stats::time(input)[grid_df$end[i]])
if(grid_df$type[i] == "train"){
ts_partitions <- TSstudio::ts_split(ts.obj = ts.obj, sample.out = train_method$sample.out)
train <- ts_partitions$train
test <- ts_partitions$test
if(!base::is.null(xreg)){
xreg_base <- xreg$train[grid_df$start[i]:grid_df$end[i],]
xreg_train <- xreg_base[1:base::length(train),]
xreg_test <- xreg_base[(base::length(train) + 1):nrow(xreg_base),]
}
if(grid_df$methods_selected[i] == "arima"){
if(!base::is.null(methods[[grid_df$model_id[i]]]$method_arg)){
arg <- methods[[grid_df$model_id[i]]]$method_arg
}
if("xreg" %in% base::names(arg) && !base::is.null(xreg)){
arg_xreg <- arg
arg_xreg$xreg <- xreg_train[,arg$xreg]
md <- do.call(stats::arima,c(base::list(train), arg_xreg))
fc <- forecast::forecast(md,
h = grid_df$horizon[i],
xreg = xreg_test[,arg$xreg],
level = level)
} else {
md <- do.call(stats::arima,c(base::list(train), arg))
fc <- forecast::forecast(md,
h = grid_df$horizon[i],
level = level)
}
}
if(grid_df$methods_selected[i] == "HoltWinters"){
if(!base::is.null(methods[[grid_df$model_id[i]]]$method_arg)){
arg <- methods[[grid_df$model_id[i]]]$method_arg
}
md <- do.call(stats::HoltWinters,c(base::list(train), arg))
fc <- forecast::forecast(md,
h = grid_df$horizon[i],
level = level)
}
if(grid_df$methods_selected[i] == "auto.arima"){
if(!base::is.null(methods[[grid_df$model_id[i]]]$method_arg)){
arg <- methods[[grid_df$model_id[i]]]$method_arg
}
if("xreg" %in% base::names(arg) && !base::is.null(xreg)){
arg_xreg <- arg
arg_xreg$xreg <- xreg_train[,arg$xreg]
md <- do.call(forecast::auto.arima,c(base::list(train), arg_xreg))
fc <- forecast::forecast(md,
h = grid_df$horizon[i],
xreg = xreg_test[,arg$xreg],
level = level)
} else {
md <- do.call(forecast::auto.arima,c(base::list(train), arg))
fc <- forecast::forecast(md,
h = grid_df$horizon[i],
level = level)
}
}
if(grid_df$methods_selected[i] == "ets"){
if(!base::is.null(methods[[grid_df$model_id[i]]]$method_arg)){
arg <- methods[[grid_df$model_id[i]]]$method_arg
}
md <- do.call(forecast::ets,c(base::list(train), arg))
fc <- forecast::forecast(md,
h = grid_df$horizon[i],
level = level)
}
if(grid_df$methods_selected[i] == "nnetar"){
if(!base::is.null(methods[[grid_df$model_id[i]]]$method_arg)){
arg <- methods[[grid_df$model_id[i]]]$method_arg
}
if("xreg" %in% base::names(arg) && !base::is.null(xreg)){
arg_xreg <- arg
arg_xreg$xreg <- xreg_train[,arg$xreg]
md <- do.call(forecast::nnetar,c(base::list(train), arg_xreg))
fc <- forecast::forecast(md,
h = grid_df$horizon[i],
xreg = xreg_test[,arg$xreg],
level = level)
} else {
md <- do.call(forecast::nnetar,c(base::list(train), arg))
fc <- forecast::forecast(md,
h = grid_df$horizon[i],
level = level)
}
}
if(grid_df$methods_selected[i] == "tslm"){
# tslm model must have formula argument
if(!base::is.null(methods[[grid_df$model_id[i]]]$method_arg)){
arg <- methods[[grid_df$model_id[i]]]$method_arg
# Validate the formula
if(!"formula" %in% base::names(arg)){
stop("Error on the 'train_method' argument: cannot run 'tslm' model without the 'formula' argument")
}
f <- base::Reduce(base::paste, base::deparse(arg$formula))
tilde <- base::regexpr("~", f) %>% base::as.numeric()
# If the tilde is missing return error
if(tilde == -1){
stop("Error on the 'train_method' argument: cannot run 'tslm' model without the 'formula' argument")
}
# If formula good check the castumize the xreg argument
# Parsing the formula
f1 <- base::substr(f, tilde + 1, base::nchar(f))
f2 <- base::gsub('[\\+]' , "", base::gsub('\"', "", f1))
f3 <- base::unlist(base::strsplit(x = f2, split = " "))
f4 <- f3[base::which(f3 != "")]
# Checkig for external variables
if(any(!f4 %in% c("trend", "season"))){
if(!f4[which(!f4 %in% c("trend", "season"))] %in% base::names(xreg$train)){
stop(base::paste("Error on the tslm model formula: the ",
f4[which(!f4 %in% c("trend", "season"))],
"variables could not be found on the xreg input",
sep = " "))
}
arg$data <- xreg_train
arg$formula <- stats::as.formula(base::paste("train ~ ",
base::paste0(f4, collapse = "+"),
sep = ""))
md <- do.call(forecast::tslm, arg)
fc <- forecast::forecast(md,
h = grid_df$horizon[i],
newdata = xreg_test,
level = level)
} else {
arg$formula <- stats::as.formula(base::paste("train ~ ",
base::paste0(f4, collapse = "+"),
sep = ""))
md <- do.call(forecast::tslm, arg)
fc <- forecast::forecast(md,
h = grid_df$horizon[i],
level = level)
}
} else {
stop("Error on the 'train_method' argument: cannot run 'tslm' model without the function's arguments")
}
}
} else if(grid_df$type[i] == "forecast"){
if(!base::is.null(xreg)){
xreg_forecast <- xreg_train <- NULL
xreg_train <- xreg$train[grid_df$start[i]:grid_df$end[i],]
xreg_forecast <- xreg$forecast
}
if(grid_df$methods_selected[i] == "arima"){
if(!base::is.null(methods[[grid_df$model_id[i]]]$method_arg)){
arg <- methods[[grid_df$model_id[i]]]$method_arg
}
if("xreg" %in% base::names(arg) && !base::is.null(xreg)){
arg_xreg <- arg
arg_xreg$xreg <- xreg_train[,arg$xreg]
md <- do.call(stats::arima,c(base::list(ts.obj), arg_xreg))
fc <- forecast::forecast(md,
h = grid_df$horizon[i],
xreg = xreg_forecast[,arg$xreg],
level = level)
} else {
md <- do.call(stats::arima,c(base::list(ts.obj), arg))
fc <- forecast::forecast(md,
h = grid_df$horizon[i],
level = level)
}
}
if(grid_df$methods_selected[i] == "HoltWinters"){
if(!base::is.null(methods[[grid_df$model_id[i]]]$method_arg)){
arg <- methods[[grid_df$model_id[i]]]$method_arg
}
md <- do.call(stats::HoltWinters,c(base::list(ts.obj), arg))
fc <- forecast::forecast(md,
h = grid_df$horizon[i],
level = level)
}
if(grid_df$methods_selected[i] == "auto.arima"){
if(!base::is.null(methods[[grid_df$model_id[i]]]$method_arg)){
arg <- methods[[grid_df$model_id[i]]]$method_arg
}
if("xreg" %in% base::names(arg) && !base::is.null(xreg)){
arg_xreg <- arg
arg_xreg$xreg <- xreg_train[,arg$xreg]
md <- do.call(forecast::auto.arima,c(base::list(ts.obj), arg_xreg))
fc <- forecast::forecast(md,
h = grid_df$horizon[i],
xreg = xreg_test[,arg$xreg],
level = level)
} else {
md <- do.call(forecast::auto.arima,c(base::list(ts.obj), arg))
fc <- forecast::forecast(md,
h = grid_df$horizon[i],
level = level)
}
}
if(grid_df$methods_selected[i] == "ets"){
if(!base::is.null(methods[[grid_df$model_id[i]]]$method_arg)){
arg <- methods[[grid_df$model_id[i]]]$method_arg
}
md <- do.call(forecast::ets,c(base::list(ts.obj), arg))
fc <- forecast::forecast(md,
h = grid_df$horizon[i],
level = level)
}
if(grid_df$methods_selected[i] == "nnetar"){
if(!base::is.null(methods[[grid_df$model_id[i]]]$method_arg)){
arg <- methods[[grid_df$model_id[i]]]$method_arg
}
if("xreg" %in% base::names(arg) && !base::is.null(xreg)){
arg_xreg <- arg
arg_xreg$xreg <- xreg_train[,arg$xreg]
md <- do.call(forecast::nnetar,c(base::list(ts.obj), arg_xreg))
fc <- forecast::forecast(md,
h = grid_df$horizon[i],
xreg = xreg_test[,arg$xreg],
level = level)
} else {
md <- do.call(forecast::nnetar,c(base::list(ts.obj), arg))
fc <- forecast::forecast(md,
h = grid_df$horizon[i],
level = level)
}
}
if(grid_df$methods_selected[i] == "tslm"){
# tslm model must have formula argument
if(!base::is.null(methods[[grid_df$model_id[i]]]$method_arg)){
arg <- methods[[grid_df$model_id[i]]]$method_arg
# Validate the formula
if(!"formula" %in% base::names(arg)){
stop("Error on the 'train_method' argument: cannot run 'tslm' model without the 'formula' argument")
}
f <- base::Reduce(base::paste, base::deparse(arg$formula))
tilde <- base::regexpr("~", f) %>% base::as.numeric()
# If the tilde is missing return error
if(tilde == -1){
stop("Error on the 'train_method' argument: cannot run 'tslm' model without the 'formula' argument")
}
# If formula good check the castumize the xreg argument
# Parsing the formula
f1 <- base::substr(f, tilde + 1, base::nchar(f))
f2 <- base::gsub('[\\+]' , "", base::gsub('\"', "", f1))
f3 <- base::unlist(base::strsplit(x = f2, split = " "))
f4 <- f3[base::which(f3 != "")]
# Checkig for external variables
if(any(!f4 %in% c("trend", "season"))){
if(!f4[which(!f4 %in% c("trend", "season"))] %in% base::names(xreg$train)){
stop(base::paste("Error on the tslm model formula: the ",
f4[which(!f4 %in% c("trend", "season"))],
"variables could not be found on the xreg input",
sep = " "))
}
arg$data <- xreg_train
arg$formula <- stats::as.formula(base::paste("ts.obj ~ ",
base::paste0(f4, collapse = "+"),
sep = ""))
md <- do.call(forecast::tslm, arg)
fc <- forecast::forecast(md,
h = grid_df$horizon[i],
newdata = xreg_forecast,
level = level)
} else {
arg$formula <- stats::as.formula(base::paste("ts.obj ~ ",
base::paste0(f4, collapse = "+"),
sep = ""))
md <- do.call(forecast::tslm, arg)
fc <- forecast::forecast(md,
h = grid_df$horizon[i],
level = level)
}
} else {
stop("Error on the 'train_method' argument: cannot run 'tslm' model without the function's arguments")
}
}
}
output <- list(model = md,
forecast = fc,
parameters = base::list(
type = grid_df$type[i],
model_id = grid_df$model_id[i],
method = grid_df$methods_selected[i],
horizon = grid_df$horizon[i],
partition = grid_df$partition[i]))
return(output)
})
input_window <- grid_df %>% dplyr::select(start, end, horizon, partition) %>% dplyr::distinct()
t <- base::which(fc_output %>% purrr::map("parameters") %>% purrr::map_chr("type") == "train")
p1 <- fc_output[t] %>% purrr::map("parameters") %>% purrr::map_chr("partition") %>% base::unique()
training <- lapply(base::seq_along(p1), function(i1){
l <- NULL
l <- base::which(fc_output[t] %>% purrr::map("parameters") %>% purrr::map_chr("partition") == p1[i1])
md_id <- fc_output[l] %>% purrr::map("parameters") %>% purrr::map_chr("model_id")
ts.obj <- ts_partitions <- train <- test <- NULL
ts.obj <- stats::window(input,
start = stats::time(input)[input_window$start[which(input_window$partition == p1[i1])]],
end = stats::time(input)[input_window$end[which(input_window$partition == p1[i1])]])
ts_partitions <- TSstudio::ts_split(ts.obj = ts.obj, sample.out = input_window$horizon[which(input_window$partition == p1[i1])])
partition_output <- lapply(l, function(i2){
x <- fc_output[[i2]]
y <- base::list()
y[[x$parameters$model_id]] <- list(model = x$model, forecast = x$forecast, parameters = x$parameters)
}) %>% stats::setNames(md_id)
partition_output$train <- ts_partitions$train
partition_output$test <- ts_partitions$test
return(partition_output)
}) %>% stats::setNames(p1)
f <- base::which(fc_output %>% purrr::map("parameters") %>% purrr::map_chr("type") == "forecast")
p2 <- fc_output[f] %>% purrr::map("parameters") %>% purrr::map_chr("partition") %>% base::unique()
forecast <- lapply(base::seq_along(p2), function(i1){
l <- NULL
l <- base::which(fc_output[f] %>% purrr::map("parameters") %>% purrr::map_chr("partition") == p2[i1])
md_id <- fc_output[l] %>% purrr::map("parameters") %>% purrr::map_chr("model_id")
partition_output <- lapply(l, function(i2){
x <- fc_output[[i2]]
y <- base::list()
y[[x$parameters$model_id]] <- list(model = x$model, forecast = x$forecast, parameters = x$parameters)
}) %>% stats::setNames(md_id)
ts.obj <- NULL
ts.obj <- stats::window(input,
start = stats::time(input)[input_window$start[which(input_window$partition == p2[i1])]],
end = stats::time(input)[input_window$end[which(input_window$partition == p2[i1])]])
partition_output$train <- ts.obj
return(partition_output)
}) %>% stats::setNames(p2)
error_summary <- lapply(models_df$model_id, function(m){
f <- training[p1] %>% purrr::map(m) %>% purrr::map("forecast") %>% purrr::map("mean")
p <- f %>% base::names()
a <- training[p1] %>% purrr::map("test")
u <- training[p1] %>% purrr::map(m) %>% purrr::map("forecast") %>% purrr::map("upper")
l <- training[p1] %>% purrr::map(m) %>% purrr::map("forecast") %>% purrr::map("lower")
levels <- training[p1] %>% purrr::map(m) %>% purrr::map("forecast") %>% purrr::map("level")
error_df <- lapply(base::seq_along(p),function(n){
df <- coverage_df <- NULL
if(base::is.null(base::colnames(u[[p[n]]]))){
if(base::is.null(base::dim(u[[p[n]]]))){
u[[p[n]]] <- u[[p[n]]] %>% as.matrix()
l[[p[n]]] <- l[[p[n]]] %>% as.matrix()
}
}
base::colnames(u[[p[n]]]) <- base::paste0(levels[[p[n]]], "%")
base::colnames(l[[p[n]]]) <- base::paste0(levels[[p[n]]], "%")
coverage_df <- lapply(base::colnames(u[[p[n]]]), function(i){
df <- base::data.frame(coverage = base::sum(ifelse(u[[p[n]]][, i] >= a[[p[n]]] & l[[p[n]]][, i] <= a[[p[n]]], 1, 0)) / base::length(u[[p[n]]][, i]))
return(df)
}) %>% dplyr::bind_rows() %>%
base::t() %>%
base::as.data.frame() %>%
stats::setNames(base::paste0("coverage_", base::colnames(u[[p[n]]])))
df <- base::cbind(base::data.frame(partition = n,
model_id = m,
mape = base::mean(base::abs(f[[p[n]]] - a[[p[n]]]) / a[[p[n]]]),
rmse = (base::mean((a[[p[n]]] - f[[p[n]]]) ^ 2)) ^ 0.5,
stringsAsFactors = FALSE),
coverage_df)
return(df)
}) %>% dplyr::bind_rows()
return(error_df)
}) %>% stats::setNames(models_df$model_id)
leaderboard <- error_summary %>% dplyr::bind_rows() %>%
dplyr::group_by(model_id) %>%
dplyr::summarise_all(~mean(.)) %>% dplyr::select(-partition) %>%
dplyr::left_join(models_df %>%
dplyr::select(model_id, model = methods_selected, notes),
by = "model_id") %>%
dplyr::select(model_id, model, notes, dplyr::everything())
base::names(leaderboard) <- c("model_id",
"model",
"notes",
base::paste0("avg_", base::names(leaderboard)[4:base::ncol(leaderboard)]))
if(error == "MAPE"){
leaderboard <- leaderboard %>% dplyr::arrange(avg_mape)
} else if(error == "RMSE"){
leaderboard <- leaderboard %>% dplyr::arrange(avg_rmse)
}
output <- base::list(train = training,
forecast = forecast$final_partition,
input = input,
error_summary = error_summary,
leaderboard = leaderboard,
parameters = list(methods = methods,
train_method = train_method,
horizon = horizon,
xreg = xreg,
error_metric = error,
level = level))
print(leaderboard)
class(output) <- "train_model"
return(output)
}
#' Build the \code{\link[TSstudio]{train_model}} Function's Components
#' @description Add, edit, or remove the components of the \code{\link[TSstudio]{train_model}} function
#' @export
#' @param model.obj The train_model skeleton, created by the create_model
#' function or edited by add_input, add_methods, remove_methods, add_train_method or add_horizon
#' @param input A univariate time series object (ts class)
#' @param methods A list, defines the models to use for training and forecasting the series.
#' The list must include a sub list with the model type, and the model's arguments (when applicable) and notes about the model.
#' The sub-list name will be used as the model ID. Possible models:
#'
#' \code{\link[stats]{arima}} - model from the stats package
#'
#' \code{\link[forecast]{auto.arima}} - model from the forecast package
#'
#' \code{\link[forecast]{ets}} - model from the forecast package
#'
#' \code{\link[stats]{HoltWinters}} - model from the stats package
#'
#' \code{\link[forecast]{nnetar}} - model from the forecast package
#'
#' \code{\link[forecast]{tslm}} - model from the forecast package (note that the 'tslm' model must have the formula argument in the 'method_arg' argument)
#'
#' @param train_method A list, defines the train approach, either using a single testing partition (sample out)
#' or use multiple testing partitions (backtesting). The list should include the training method argument, (please see 'details' for the structure of the argument)
#' @param method_ids A character, defines the IDs of the model methods to be remove with the remove_methods function
#' @param horizon An integer, defines the forecast horizon
#' @param xreg Optional, a list with two vectors (e.g., data.frame or matrix) of external regressors,
#' one vector corresponding to the input series and second to the forecast itself
#' (e.g., must have the same length as the input and forecast horizon, respectively)
#' @param error A character, defines the error metrics to be used to sort the models leaderboard. Possible metric - "MAPE" or "RMSE"
#' @param level An integer, set the confidence level of the prediction intervals
#' @examples
#'
#' ### Building train_model function by adding its different components
#' # Create a skeleton model
#' md <- create_model()
#'
#' class(md)
#'
#' # Add input
#' data(USgas)
#' md <- add_input(model.obj = md, input = USgas)
#'
#' # Add methods
#' methods <- list(ets1 = list(method = "ets",
#' method_arg = list(opt.crit = "lik"),
#' notes = "ETS model with opt.crit = lik"),
#' ets2 = list(method = "ets",
#' method_arg = list(opt.crit = "amse"),
#' notes = "ETS model with opt.crit = amse"),
#' arima1 = list(method = "arima",
#' method_arg = list(order = c(1,1,1),
#' seasonal = list(order = c(1,0,1))),
#' notes = "SARIMA(1,1,1)(1,0,1)"))
#'
#' md <- add_methods(model.obj = md, methods = methods)
#'
#' # Add additional methods
#' methods2 <- list(arima2 = list(method = "arima",
#' method_arg = list(order = c(2,1,2),
#' seasonal = list(order = c(1,1,1))),
#' notes = "SARIMA(2,1,2)(1,1,1)"),
#' hw = list(method = "HoltWinters",
#' method_arg = NULL,
#' notes = "HoltWinters Model"),
#' tslm = list(method = "tslm",
#' method_arg = list(formula = input ~ trend + season),
#' notes = "tslm model with trend and seasonal components"))
#'
#' md <- add_methods(model.obj = md, methods = methods2)
#'
#' # Remove methods
#' md <- remove_methods(model.obj = md, method_ids = c("ets2", "auto_arima"))
#'
#' # Add train method
#' md <- add_train_method(model.obj = md, train_method = list(partitions = 6,
#' sample.out = 12,
#' space = 3))
#'
#'
#' # Set the forecast horizon
#' md <- add_horizon(model.obj = md, horizon = 12)
#'
#' # Add the forecast prediction intervals confidence level
#' md <- add_level(model.obj = md, level = c(90, 95))
#'
#' ### Alternatively, pipe the function with the magrittr package
#'
#' library(magrittr)
#'
#' md <- create_model() %>%
#' add_input(input = USgas) %>%
#' add_methods(methods = methods) %>%
#' add_methods(methods = methods2) %>%
#' add_train_method(train_method = list(partitions = 6,
#' sample.out = 12,
#' space = 3)) %>%
#' add_horizon(horizon = 12) %>%
#' add_level(level = c(90, 95))
#'
#' # Run the model
#' fc <- md %>% build_model()
create_model <- function(){
model_base <-base::list(input = NULL,
methods = NULL,
train_method = NULL,
horizon = NULL)
class(model_base) <- "train_model"
return(model_base)
}
#' @export
#' @rdname create_model
#'
add_input <- function(model.obj, input){
`%>%` <- magrittr::`%>%`
# Error handling
# Checking the input object
if(!stats::is.ts(input)){
stop("The input argument is not a valid 'ts' object")
} else if(stats::is.mts(input)){
stop("Cannot use multiple time series object as an input")
}
# Checking the model.obj
if(base::class(model.obj) != "train_model"){
stop("The 'model.obj' is not valid 'train_model' object")
} else if("input" %in% base::names(model.obj) && base::is.null(model.obj$input)){
model.obj$input <- input
} else if("input" %in% base::names(model.obj) && !base::is.null(model.obj$input)){
q <- base::readline("The 'model.obj' already has input object, do you want to overwrite it? yes/no ") %>% base::tolower()
if(q == "y" || q == "yes"){
model.obj$input <- input
} else if( q == "n" || q == "no"){
warning("The 'input' was not added to the model object")
} else {
stop("Invalid input...")
}
}
return(model.obj)
}
#' @export
#' @rdname create_model
#'
add_methods <- function(model.obj, methods){
`%>%` <- magrittr::`%>%`
method_list <- models_df <- NULL
method_list <- list("arima", "auto.arima", "ets", "HoltWinters", "nnetar", "tslm")
# Error handling
# Checking the model.obj class
if(base::class(model.obj) != "train_model"){
stop("The 'model.obj' is not valid 'train_model' object")
}
# Validating the methods object
if(!base::is.list(methods)){
stop("Error on the 'methods' argument: the argument is not a list")
} else if(base::is.null(base::names(methods))){
stop("Error on the 'methods' argument: could not find the models IDs")
} else if(base::any("NULL" %in% base::as.character(methods %>% purrr::map(~.x[["method"]])))){
stop("Error on the 'methods' argument: at least one of the methods is missing the 'method' argument")
}
if(!base::all(base::as.character(methods %>% purrr::map_chr(~.x[["method"]])) %in% method_list)){
stop("Error on the 'methods' argument: at least one of the models methods is not valid")
}
# Adding the metods to the model.obj object
if(("methods" %in% base::names(model.obj) && base::is.null(model.obj$methods))|| !"methods" %in% base::names(model.obj)){
model.obj$methods <- methods
# In case the object has existing methods
} else if("methods" %in% base::names(model.obj) && !base::is.null(model.obj$methods)) {
# Validating that object is not exist already
for(i in base::names(methods)){
if(i %in% base::names(model.obj$methods)){
q <- base::readline(base::paste("The", i, "method already exists in the model object, do you wish to overwrite it? yes/no ", sep = " ")) %>% base::tolower()
if(q == "y" || q == "yes"){
model.obj$methods[[i]] <- methods[[i]]
} else{
warning(base::paste("Method", i, "were not added", sep = " "))
}
} else {
model.obj$methods[[i]] <- methods[[i]]
}
}
}
return(model.obj)
}
#' @export
#' @rdname create_model
#'
remove_methods <- function(model.obj, method_ids){
`%>%` <- magrittr::`%>%`
# Error handling
# Checking the model.obj class
if(base::class(model.obj) != "train_model"){
stop("The 'model.obj' is not valid 'train_model' object")
}
# Checking the method_ids argument
if(!is.character(method_ids)){
stop("The 'method_ids' argument is not valid input")
}
if(!"methods" %in% base::names(model.obj) || base::is.null(model.obj$methods)){
stop("The input model object does not have any available method")
}
for(i in method_ids){
if(i %in% base::names(model.obj$methods)){
model.obj$methods[[i]] <- NULL
} else {
warning(base::paste("The", i, "does not exist on the model object"))
}
}
return(model.obj)
}
#' @export
#' @rdname create_model
#'
add_train_method <- function(model.obj, train_method){
`%>%` <- magrittr::`%>%`
q <- NULL
# Error handling
# Checking the model.obj class
if(base::class(model.obj) != "train_model"){
stop("The 'model.obj' is not valid 'train_model' object")
}
# Checking the train argument
if(!base::is.list(train_method)){
stop("Error on the 'train_method' argument: the argument is not a list")
} else if(!"partitions" %in% base::names(train_method)){
stop("Error on the 'train_method' argument: the 'partition' argument is missing")
} else if(!"space" %in% base::names(train_method)){
stop("Error on the 'train_method' argument: the 'space' argument is missing")
} else if(!"sample.out" %in% base::names(train_method)){
stop("Error on the 'train_method' argument: the 'sample.out' argument is missing")
} else if(!base::is.numeric(train_method$sample.out) ||
train_method$sample.out < 1 ||
train_method$sample.out %% 1 != 0){
stop("Error on the 'train_method' argument: the 'sample.out' argument is not valide, please use a positive integer")
} else if(!base::is.numeric(train_method$partitions) ||
train_method$partitions < 1 ||
train_method$partitions %% 1 != 0){
stop("Error on the 'train_method' argument: the 'partitions' argument is not valide, please use a positive integer")
} else if(!base::is.numeric(train_method$space) ||
train_method$space < 1 ||
train_method$space %% 1 != 0){
stop("Error on the 'train_method' argument: the 'space' argument is not valide, please use a positive integer")
}
# Adding the train object
if(!"train_method" %in% base::names(model.obj) || base::is.null(model.obj$train_method)){
model.obj$train_method <- train_method
} else if(!base::is.null(model.obj$train_method)){
q <- base::readline(base::paste("The model object already has train method, do you wish to overwrite it? (yes) ", sep = " ")) %>% base::tolower()
if(q == "y" || q == "yes" || q == ""){
model.obj$train_method <- train_method
} else{
warning("Did not update the train method")
}
}
return(model.obj)
}
#' @export
#' @rdname create_model
#'
add_horizon <- function(model.obj, horizon){
`%>%` <- magrittr::`%>%`
# Error handling
# Checking the model.obj class
if(base::class(model.obj) != "train_model"){
stop("The 'model.obj' is not valid 'train_model' object")
}
if(!"horizon" %in% base::names(model.obj) || base::is.null(model.obj$horizon)){
model.obj$horizon <- horizon
} else if(!base::is.null(model.obj$horizon)){
q <- base::readline(base::paste("The model object already has horizon, do you wish to overwrite it? yes/no ", sep = " ")) %>% base::tolower()
if(q == "y" || q == "yes"){
model.obj$horizon <- horizon
} else{
warning("No change had made on the model 'horizon' argument")
}
}
return(model.obj)
}
#' @export
#' @rdname create_model
#'
build_model <- function(model.obj){
`%>%` <- magrittr::`%>%`
# Error handling
# Checking the model.obj class
if(base::class(model.obj) != "train_model"){
stop("The 'model.obj' is not valid 'train_model' object")
}
if(!"horizon" %in% base::names(model.obj) || base::is.null(model.obj$horizon)){
stop("Cannot build a model, the 'horizon' argument is missing")
}
if(!"methods" %in% base::names(model.obj) || base::is.null(model.obj$methods)){
stop("Cannot build a model, the 'methods' argument is missing")
}
if(!"train_method" %in% base::names(model.obj) || base::is.null(model.obj$train_method)){
stop("Cannot build a model, the 'train_method' argument is missing")
}
if(!"input" %in% base::names(model.obj) || base::is.null(model.obj$input)){
stop("Cannot build a model, the 'input' argument is missing")
}
if(!"error" %in% base::names(model.obj)){
model.obj$error <- "MAPE"
}
if(!"level" %in% base::names(model.obj)){
model.obj$level <- c(80, 95)
}
if(!"xreg" %in% base::names(model.obj)){
model.obj$xreg <- NULL
}
output <- NULL
output <- TSstudio::train_model(input = model.obj$input,
methods = model.obj$methods,
train_method = model.obj$train_method,
horizon = model.obj$horizon,
xreg = model.obj$xreg,
error = model.obj$error,
level = model.obj$level)
return(output)
}
#' @export
#' @rdname create_model
#'
set_error <- function(model.obj, error){
`%>%` <- magrittr::`%>%`
# Error handling
# Checking the model.obj class
if(base::class(model.obj) != "train_model"){
stop("The 'model.obj' is not valid 'train_model' object")
}
# Check the error argument
if(base::is.null(error) || !base::is.character(error) || base::length(error) !=1){
stop("Error on the 'error' argument: the input is not valid, please use either 'RMSE' or 'MAPE'")
} else if( error != "MAPE" && error != "RMSE"){
stop("Error on the 'error' argument: the input is not valid, please use either 'RMSE' or 'MAPE'")
}
if(!"error" %in% base::names(model.obj) ||
("error" %in% base::names(model.obj) && base::is.null(model.obj$error))){
model.obj$error <- error
} else if("error" %in% base::names(model.obj) && !base::is.null(model.obj$error)){
q <- base::readline(base::paste("The model object already has 'error' argument, do you wish to overwrite it? (yes) ", sep = " ")) %>% base::tolower()
if(q == "y" || q == "yes" || q == ""){
model.obj$error <- error
} else{
warning("No change had made on the model 'error' argument")
}
}
return(model.obj)
}
#' @export
#' @rdname create_model
#'
add_xreg <- function(model.obj, xreg){
`%>%` <- magrittr::`%>%`
q <- NULL
# Error handling
# Checking the model.obj class
if(base::class(model.obj) != "train_model"){
stop("The 'model.obj' is not valid 'train_model' object")
}
# Checking the xreg argument
if(!base::is.null(xreg)){
if(!all(c("train", "forecast") %in% base::names(xreg))){
stop("Error on the 'xreg' argument: the 'xreg' list is not valid, please make sure setting the correspinding regressor",
" inputs for the 'input' argument (train) and for the forecast horizon (forecast)")
}
}
if(!"xreg" %in% base::names(model.obj) ||
("xreg" %in% base::names(model.obj) && base::is.null(model.obj$xreg))){
model.obj$xreg <- xreg
} else if("xreg" %in% base::names(model.obj) && !base::is.null(model.obj$xreg)){
q <- base::readline(base::paste("The model object already has 'xreg' argument, do you wish to overwrite it? (yes) ", sep = " ")) %>% base::tolower()
if(q == "y" || q == "yes" || q == ""){
model.obj$xreg <- xreg
} else{
warning("No change had made on the model 'xreg' argument")
}
}
return(model.obj)
}
#' @export
#' @rdname create_model
#'
add_level <- function(model.obj, level){
`%>%` <- magrittr::`%>%`
q <- NULL
# Error handling
# Checking the model.obj class
if(base::class(model.obj) != "train_model"){
stop("The 'model.obj' is not valid 'train_model' object")
}
### Error Handling
# Check the level argument
if(base::all(!is.numeric(level)) ||
base::any(level %% 1 != 0) ||
base::any(level <= 0 | level > 100)){
stop("Error on the 'level' argument: the argument is out of range (0,100]")
}
if(!"level" %in% base::names(model.obj) || base::is.null(model.obj$level)){
model.obj$level <- level
} else if(!base::is.null(model.obj$level)){
q <- base::readline(base::paste("The model object already has 'level', do you wish to overwrite it? (yes) ", sep = " ")) %>% base::tolower()
if(q == "y" || q == "yes" || q == ""){
model.obj$level <- level
} else{
warning("No change had made on the model 'level' argument")
}
}
return(model.obj)
}
#' Plot the Models Performance on the Testing Partitions
#' @export
#' @details The plot_model provides a visualization of the models performance on the testing paritions for the train_model function output
#' @param model.obj A train_model object
#' @param model_ids A character, defines the trained models to plot, if set to NULL (default), will plot all the models
#' @return Animation of models forecast on the testing partitions compared to the actuals
#' @examples
#' # Defining the models and their arguments
#' methods <- list(ets1 = list(method = "ets",
#' method_arg = list(opt.crit = "lik"),
#' notes = "ETS model with opt.crit = lik"),
#' ets2 = list(method = "ets",
#' method_arg = list(opt.crit = "amse"),
#' notes = "ETS model with opt.crit = amse"),
#' arima1 = list(method = "arima",
#' method_arg = list(order = c(2,1,0)),
#' notes = "ARIMA(2,1,0)"),
#' arima2 = list(method = "arima",
#' method_arg = list(order = c(2,1,2),
#' seasonal = list(order = c(1,1,1))),
#' notes = "SARIMA(2,1,2)(1,1,1)"),
#' hw = list(method = "HoltWinters",
#' method_arg = NULL,
#' notes = "HoltWinters Model"),
#' tslm = list(method = "tslm",
#' method_arg = list(formula = input ~ trend + season),
#' notes = "tslm model with trend and seasonal components"))
#' # Training the models with backtesting
#' md <- train_model(input = USgas,
#' methods = methods,
#' train_method = list(partitions = 6,
#' sample.out = 12,
#' space = 3),
#' horizon = 12,
#' error = "MAPE")
#' # Plot the models performance on the testing partitions
#' plot_model(model.obj = md)
#'
#' # Plot only the ETS models
#' plot_model(model.obj = md , model_ids = c("ets1", "ets2"))
#'
plot_model <- function(model.obj, model_ids = NULL){
`%>%` <- magrittr::`%>%`
m <- p <- ac_df <- fc_df <- df <- output <- obj_name <- NULL
obj_name <- obj.name <- base::deparse(base::substitute(model.obj))
# Error handling
# Checking the model.obj class
if(base::class(model.obj) != "train_model"){
stop("The 'model.obj' is not valid 'train_model' object")
}
m <- model.obj$parameters$methods %>% base::names()
if(base::is.null(m)){
stop("Error on the 'model.obj' argument: cannot find any method in the 'model.obj' argument")
}
if(!base::is.null(model_ids)){
if(!base::all(model_ids %in% m)){
stop("Error on the 'model_ids' argument: cannot find some (or all) of the model ids in the 'model.obj' object")
}
m <- model_ids
}
p <- model.obj$parameters$train_method$partitions
ac_df <- base::data.frame(y = rep(base::as.numeric(model.obj$input), p),
time = base::rep(base::as.numeric(stats::time(model.obj$input)), p),
partition = base::rep(1:p, each = base::length(model.obj$input)),
type = "actual")
fc_df <- lapply(m, function(i){
df1 <- df2 <- NULL
df1 <- model.obj$train %>%
purrr::map(~.x[[i]]) %>%
purrr::map(~.x[["forecast"]]) %>%
purrr::map(~.x[["mean"]]) %>%
dplyr::bind_cols()
for(c in 1:base::ncol(df1)){
temp <- df3 <- NULL
temp <- df1[, c] %>%
as.data.frame() %>%
stats::setNames("y")
df3 <- base::data.frame(y = base::as.numeric(temp$y),
time = base::as.numeric(stats::time(temp$y)),
partition = c,
type = i,
stringsAsFactors = FALSE)
df2 <- dplyr::bind_rows(df2, df3)
}
df1 <- model.obj$train %>%
purrr::map(~.x[[i]]) %>%
purrr::map(~.x[["forecast"]]) %>%
purrr::map(~.x[["mean"]]) %>%
dplyr::bind_cols()
return(df2)
}) %>% dplyr::bind_rows()
df <- rbind(fc_df, ac_df)
output <- plotly::plot_ly(data = df,
x = ~ time,
y = ~ y,
split = ~ type,
frame = ~ partition,
type = 'scatter',
mode = 'lines',
line = list(simplyfy = F))%>%
plotly::layout(title = base::paste(obj_name, "Models Performance by Testing Partitions", sep = " "),
margin = 50,
title = "",
xaxis = list(
title = "Date",
zeroline = F),
yaxis = list(
title = "",
zeroline = F
),
font = list(color = "black"),
plot_bgcolor = "white",
paper_bgcolor = "white"
) %>%
plotly::animation_opts(
frame = 500,
transition = 0,
redraw = F
) %>%
plotly::animation_slider(
hide = F
) %>%
plotly::animation_button(
x = 1, xanchor = "right", y = 0, yanchor = "bottom"
)
return(output)
}
#' Plot the Models Error Metric on the Testing Partitions
#' @export
#' @details The plot_model provides a visualization of the models performance on the testing paritions for the train_model function output
#' @param model.obj A train_model object
#' @param error A character, defines the type of error metrics to plot, possible metric - "MAPE" or "RMSE"
#' @param palette A character, defines the color type to used on the plot, use row.names(RColorBrewer::brewer.pal.info) to view possible color palletes
#' @return A plot with a summery of the models error rate by testing partition
#' @examples
#' # Defining the models and their arguments
#' methods <- list(ets1 = list(method = "ets",
#' method_arg = list(opt.crit = "lik"),
#' notes = "ETS model with opt.crit = lik"),
#' ets2 = list(method = "ets",
#' method_arg = list(opt.crit = "amse"),
#' notes = "ETS model with opt.crit = amse"),
#' arima1 = list(method = "arima",
#' method_arg = list(order = c(2,1,0)),
#' notes = "ARIMA(2,1,0)"),
#' arima2 = list(method = "arima",
#' method_arg = list(order = c(2,1,2),
#' seasonal = list(order = c(1,1,1))),
#' notes = "SARIMA(2,1,2)(1,1,1)"),
#' hw = list(method = "HoltWinters",
#' method_arg = NULL,
#' notes = "HoltWinters Model"),
#' tslm = list(method = "tslm",
#' method_arg = list(formula = input ~ trend + season),
#' notes = "tslm model with trend and seasonal components"))
#' # Training the models with backtesting
#' md <- train_model(input = USgas,
#' methods = methods,
#' train_method = list(partitions = 6,
#' sample.out = 12,
#' space = 3),
#' horizon = 12,
#' error = "MAPE")
#'
#' # Plot the models performance on the testing partitions
#' plot_error(model.obj = md)
#'
plot_error <- function(model.obj, error = "MAPE", palette = "Set1"){
`%>%` <- magrittr::`%>%`
m<- n_colors <- colors_list <- p1 <- p2 <- output <- error_df <- model_id <- NULL
hex_to_rgb <- function(hex){
rgb <- base::paste0(as.numeric(grDevices::col2rgb(hex) %>% base::t()), collapse = ",")
return(rgb)
}
# Error handling
# Checking the model.obj class
if(base::class(model.obj) != "train_model"){
stop("The 'model.obj' is not valid 'train_model' object")
}
# Checking the error argument
if(error != "MAPE" && error != "RMSE"){
stop("Error on the 'error' argument: in valid error metric, can use either 'MAPE' or 'RMSE'")
}
error_df <- model.obj$error_summary %>% dplyr::bind_rows()
m <- unique(error_df$model_id)
palette_list <- base::row.names(RColorBrewer::brewer.pal.info)
if(base::length(palette) != 1 || !palette %in% palette_list){
stop("Error on the 'palette' argument: cannot find the color palette on the RColorBrewer palettes list, ",
"use row.names(RColorBrewer::brewer.pal.info) to view possible color palettes")
}
n_colors <- RColorBrewer::brewer.pal.info$maxcolors[row.names(RColorBrewer::brewer.pal.info) == palette]
colors_list <- grDevices::colorRampPalette(RColorBrewer::brewer.pal(n_colors, palette))(base::length(m))
p1 <- plotly::plot_ly()
p2 <- plotly::plot_ly()
if(error == "MAPE"){
for(i in base::seq_along(m)){
df <- NULL
df <- error_df %>% dplyr::filter(model_id == m[i])
p1 <- p1 %>% plotly::add_lines(x = df$partition, y = df$mape * 100, name = m[i],
showlegend = TRUE,
legendgroup = m[i],
line = list(color = colors_list[i]))
p2 <- p2 %>% plotly::add_trace(y = df$mape * 100, name = m[i],
type = "box",
fillcolor = base::paste("rgba(", hex_to_rgb(colors_list[i]), ", 0.5)", sep = ""),
line = list(color = colors_list[i]),
marker = list(color = colors_list[i]),
boxpoints = "all",
jitter = 0.3,
pointpos = -1.8,
showlegend = FALSE,
legendgroup = m[i])
}
p1 <- p1 %>% plotly::layout(yaxis = list(title = "MAPE", ticksuffix = '%'),
xaxis = list(title = "Partition"))
p2 <- p2 %>% plotly::layout(yaxis = list(title = "MAPE", ticksuffix = '%'),
xaxis = list(title = "Partition"))
output <- plotly::subplot(p1, p2, nrows = 1, shareY = T) %>%
plotly::layout(title = "Model Performance by Testing Partition - MAPE")
} else if(error == "RMSE"){
for(i in base::seq_along(m)){
df <- NULL
df <- error_df %>% dplyr::filter(model_id == m[i])
p1 <- p1 %>% plotly::add_lines(x = df$partition, y = df$rmse, name = m[i],
showlegend = TRUE,
legendgroup = m[i],
line = list(color = colors_list[i]))
p2 <- p2 %>% plotly::add_trace(y = df$rmse, name = m[i],
type = "box",
fillcolor = base::paste("rgba(", hex_to_rgb(colors_list[i]), ", 0.5)", sep = ""),
line = list(color = colors_list[i]),
marker = list(color = colors_list[i]),
boxpoints = "all",
jitter = 0.3,
pointpos = -1.8,
showlegend = FALSE,
legendgroup = m[i])
}
p1 <- p1 %>% plotly::layout(yaxis = list(title = "RMSE"),
xaxis = list(title = "Partition"))
p2 <- p2 %>% plotly::layout(yaxis = list(title = "RMSE"),
xaxis = list(title = "Partition"))
output <- plotly::subplot(p1, p2, nrows = 1, shareY = T) %>%
plotly::layout(title = "Model Performance by Testing Partition - RMSE")
}
return(output)
}
|
/R/train_functions.R
|
permissive
|
tarinishukla/TSstudio
|
R
| false
| false
| 118,894
|
r
|
#' Evaluation Function for Forecasting Models
#' @export ts_backtesting
#' @param ts.obj A univariate time series object of a class "ts"
#' @param models String, define the type of models to use in the training function:
#'
#' 'a' - auto.arima (forecast package)
#'
#' 'b' - Bayesian Structural Time Series (bsts package)
#'
#' 'e' - ets (forecast package)
#'
#' 'h' - hybrid timse series model (forecastHybrid package)
#'
#' 'n' - Neural Network Time Series (forecast package)
#'
#' 't' - tbats (forecast package)
#'
#' 'w' - Holt Winters (stats package)
#'
#' @param periods The number of periods to evaluate the models (with a minimum of 2)
#' @param error The type of error to evaluate by - "MAPE" (default) or "RMSE"
#' @param window_size An integer, the size of the backtesting window
#' @param h Integer, the horizon of the selected forecasting model
#' @param plot Logical, if TRUE desplay a plot with the backtesting progress
#' @param a.arg A list, an optional arguments to pass to the \code{\link[forecast]{auto.arima}} function
#' @param b.arg A list, an optional arguments to pass to the \code{\link[bsts]{bsts}} function
#' @param e.arg A list, an optional argument to pass to the \code{\link[forecast]{ets}} function
#' @param h.arg A list, an optional argument to pass to the \code{\link[forecastHybrid]{hybridModel}} function
#' @param n.arg A list, an optional argument to pass to the \code{\link[forecast]{nnetar}} function
#' @param t.arg A list, an optional arguments to pass to the \code{\link[forecast]{tbats}} function
#' @param w.arg A list, an optional arguments to pass to the \code{\link[stats]{HoltWinters}} function
#' @param parallel Logical, if TRUE use parallel option when applicable (auto.arima, hybridModel)
#' @param xreg.h A data.frame or matrix, optional argument,
#' set the futuer values external regressors in case using the
#' 'xreg' argument in one of the models (auto.arima, nnetar, hybrid)
#' @description Performance evaluation function for forecasting models, by training and testing the performance
#' of each model over a sequence of periods to identify the performance of a model over time
#' (both accuracy and stability)
#' @examples
#' \dontrun{
#' data(USgas)
#' USgas_backtesting <- ts_backtesting(USgas,
#' periods = 6,
#' window_size = 24,
#' h = 60,
#' error = "RMSE")
#'
#' # Selecting a specific models (auto.arima, ets and nnetar)
#' USgas_backtesting <- ts_backtesting(USgas,
#' models = "aen",
#' periods = 6,
#' window_size = 24,
#' h = 60)
#'
#' # Retrieve the models leaderboard
#' USgas_backtesting$leaderboard
#'
#'
#' # Retrieve the best forecast results
#' USgas_backtesting$leadForecast$mean
#'
#' # Retrieve the final forecast of the ets model
#' USgas_backtesting$Forecast_Final$ets$mean
#'
#' # Retrieve the ets forecast during the first period of testing
#' USgas_backtesting$period_1$ets$forecast$mean
#'
#' # Get the final plot of the models performance and the selected forecasting model
#' USgas_backtesting$summary_plot
#' }
ts_backtesting <- function(ts.obj,
models = "abehntw",
periods = 6,
error = "MAPE",
window_size = 3,
h = 3,
plot = TRUE,
a.arg = NULL,
b.arg = NULL,
e.arg = NULL,
h.arg = NULL,
n.arg = NULL,
t.arg = NULL,
w.arg = NULL,
xreg.h = NULL,
parallel = FALSE){
base::.Deprecated(new = "train_model", msg = "The 'ts_backtesting' function is deprecated, please use 'train_model' instead")
`%>%` <- magrittr::`%>%`
a <- model_list <- model_char <- color_ramp <- forecast_list <- obj.name <- NULL
variable <- value <- avgMAPE <- avgRMSE <- NULL
obj.name <- base::deparse(base::substitute(ts.obj))
# Define the model type
for(s in 1:nchar(models)){
if(!substr(models, s, s) %in% c("a", "w", "e", "n", "t", "b", "h")){
stop("The 'models' argument is not valide")
}
}
# Error handling
# Check if xreg argument is valid
if(!base::is.null(xreg.h)){
if(!"xreg" %in% names(a.arg) &
!"xreg" %in% names(n.arg) &
!"xreg" %in% names(h.arg$a.args) &
!"xreg" %in% names(h.arg$n.args) &
!"xreg" %in% names(h.arg$s.args)){
warning("There is no 'xreg' argument in any of the models arguments,",
"'xreg.h' will be ignored")
} else {
if(base::nrow(xreg.h) != h){
stop("The length of the 'xreg.h' argument is not equal to the forecast horizon")
}
}
}
# Check the xreg in a.arg is valid (if exists)
if("xreg" %in% names(a.arg)){
xreg.arima <- NULL
xreg.arima <- a.arg$xreg
if(base::nrow(xreg.arima) != base::length(ts.obj)){
stop("The length of the 'xreg' in the 'a.arg' argument is not equal to the series length")
}
}
if("xreg" %in% names(n.arg)){
xreg.nnetar <- NULL
xreg.nnetar <- n.arg$xreg
if(base::nrow(xreg.nnetar) != base::length(ts.obj)){
stop("The length of the 'xreg' in the 'n.arg' argument is not equal to the series length")
}
}
if("xreg" %in% names(h.arg$a.args)){
xreg.hybrid.arima <- NULL
xreg.hybrid.arima <- h.arg$a.args$xreg
if(base::nrow(xreg.hybrid.arima) != base::length(ts.obj)){
stop("The length of the 'xreg' of the auto.arima model in the 'h.arg' argument is not equal to the series length")
}
}
if("xreg" %in% names(h.arg$n.args)){
xreg.hybrid.nnetar <- NULL
xreg.hybrid.nnetar <- h.arg$n.args$xreg
if(base::nrow(xreg.hybrid.nnetar) != base::length(ts.obj)){
stop("The length of the 'xreg' of the nnetar model in the 'h.arg' argument is not equal to the series length")
}
}
if("xreg" %in% names(h.arg$s.args)){
xreg.hybrid.stlm <- NULL
xreg.hybrid.stlm <- h.arg$s.args$xreg
if(base::nrow(xreg.hybrid.stlm) != base::length(ts.obj)){
stop("The length of the 'xreg' of the stlm model in the 'h.arg' argument is not equal to the series length")
}
}
if(!base::is.numeric(periods) | periods != base::round(periods) | periods <= 0){
stop("The value of the 'periods' parameters is no valid")
} else {
if((base::length(ts.obj) - periods - window_size) < 2 * stats::frequency(ts.obj)){
stop("The length of the series is long enough to create a forecast")
}
}
if(!base::is.numeric(window_size) | window_size != base::round(window_size) | window_size <= 0){
stop("The value of the 'window_size' parameters is no valid")
} else {
if((base::length(ts.obj) - periods - window_size) < 2 * stats::frequency(ts.obj)){
stop("The length of the series is long enough to create a forecast")
}
}
if (stats::is.ts(ts.obj)) {
if (stats::is.mts(ts.obj)) {
warning("The 'ts.obj' has multiple columns, only the first column will be plot")
ts.obj <- ts.obj[, 1]
}
}else {
stop("The 'ts.obj' is not a 'ts' class")
}
if(!error %in% c("MAPE", "RMSE")){
warning("The value of the 'error' parameter is invalid, using the default setting - 'MAPE'")
error <- "MAPE"
}
if(!base::is.logical(plot)){
warning("The value of the 'plot' parameter is invalid, using default option TRUE")
plot <- TRUE
}
# Setting the output object
modelOutput <- list()
# Define the plot colors
if(base::nchar(models) < 3){
color_ramp <- RColorBrewer::brewer.pal(3,"Dark2")[1:base::nchar(models)]
} else{
color_ramp <- RColorBrewer::brewer.pal(base::nchar(models),"Dark2")
}
model_char <- base::unlist(base::strsplit(models, split = ""))
modelOutput$Models_Final <- list()
modelOutput$Forecast_Final <- list()
# Final forecast
if("a" %in% model_char){
model_list <- c(model_list, "auto.arima")
md_auto.arima <- fc_auto.arima <- NULL
a.arg$parallel <- parallel
md_auto.arima <- base::do.call(forecast::auto.arima, c(list(ts.obj), a.arg))
if("xreg" %in% base::names(a.arg)){
fc_auto.arima <- forecast::forecast(md_auto.arima, h = h, xreg = xreg.h)
} else{
fc_auto.arima <- forecast::forecast(md_auto.arima, h = h)
}
modelOutput$Models_Final$auto.arima <- md_auto.arima
modelOutput$Forecast_Final$auto.arima <- fc_auto.arima
}
if("w" %in% model_char){
model_list <- c(model_list, "HoltWinters")
md_HoltWinters <- fc_HoltWinters <- NULL
md_HoltWinters <- base::do.call(stats::HoltWinters, c(list(ts.obj), w.arg))
fc_HoltWinters <- forecast::forecast(md_HoltWinters, h = h)
modelOutput$Models_Final$HoltWinters <- md_HoltWinters
modelOutput$Forecast_Final$HoltWinters <- fc_HoltWinters
}
if("e" %in% model_char){
model_list <- c(model_list, "ets")
md_ets <- fc_ets <- NULL
md_ets <- base::do.call(forecast::ets, c(list(ts.obj), e.arg))
fc_ets <- forecast::forecast(md_ets, h = h)
modelOutput$Models_Final$ets <- md_ets
modelOutput$Forecast_Final$ets <- fc_ets
}
if("n" %in% model_char){
model_list <- c(model_list, "nnetar")
md_nnetar <- fc_nnetar <- NULL
md_nnetar <- base::do.call(forecast::nnetar, c(list(ts.obj), n.arg))
if("xreg" %in% base::names(n.arg)){
fc_nnetar <- forecast::forecast(md_nnetar, h = h, xreg = xreg.h)
} else{
fc_nnetar <- forecast::forecast(md_nnetar, h = h)
}
modelOutput$Models_Final$nnetar <- md_nnetar
modelOutput$Forecast_Final$nnetar <- fc_nnetar
}
if("t" %in% model_char){
model_list <- c(model_list, "tbats")
md_tbats <- fc_tbats <- NULL
t.arg$use.parallel <- parallel
md_tbats <- base::do.call(forecast::tbats, c(list(ts.obj), t.arg))
fc_tbats <- forecast::forecast(md_tbats, h = h)
modelOutput$Models_Final$tbats <- md_tbats
modelOutput$Forecast_Final$tbats <- fc_tbats
}
if("b" %in% model_char){
# Check if the bsts arguments are valid
if(is.null(b.arg)){
b.arg <- list(linear_trend = TRUE,
seasonal = TRUE,
niter = 1000,
ping = 0,
family = "gaussian",
seed=1234)
} else{
if("linear_trend" %in% names(b.arg)){
if(!b.arg$linear_trend %in% c(TRUE, FALSE)){
warning("The value of the 'linear_trend' argument of the bsts model is invalid, using default (TRUE)")
b.arg$linear_trend <- TRUE
}
} else {
warning("The 'linear_trend' was not defined, using TRUE as default")
b.arg$linear_trend <- TRUE
}
if("seasonal" %in% names(b.arg)){
if(!b.arg$seasonal %in% c(TRUE, FALSE)){
warning("The value of the 'seasonal' argument of the bsts model is invalid, using TRUE as default")
b.arg$seasonal <- TRUE
}
} else {
warning("The 'seasonal' argument was not defined, using TRUE as default")
b.arg$seasonal <- TRUE
}
if("niter" %in% names(b.arg)){
if(!base::is.numeric(b.arg$niter)){
warning("The value of the 'niter' argument of the bsts model is invalid, setting the argument to 1000")
b.arg$niter <- 1000
} else if(b.arg$niter %% 1 != 0){
warning("The value of the 'niter' argument of the bsts model is not integer, setting the argument to 1000")
b.arg$niter <- 1000
}
} else {
warning("The 'niter' argument was not defined, setting the argument to 1000")
b.arg$niter <- 1000
}
if("ping" %in% names(b.arg)){
if(!base::is.numeric(b.arg$ping)){
warning("The value of the 'ping' argument of the bsts model is invalid, setting the argument to 100")
b.arg$ping <- 100
} else if(b.arg$ping %% 1 != 0){
warning("The value of the 'ping' argument of the bsts model is not integer, setting the argument to 100")
b.arg$ping <- 1000
}
} else {
warning("The 'ping' argument was not defined, setting the argument to 100")
b.arg$ping <- 100
}
if("seed" %in% names(b.arg)){
if(!base::is.numeric(b.arg$seed)){
warning("The value of the 'seed' argument of the bsts model is invalid, setting the argument to 1234")
b.arg$seed <- 1234
} else if(b.arg$seed %% 1 != 0){
warning("The value of the 'seed' argument of the bsts model is not integer, setting the argument to 1234")
b.arg$seed <- 1234
}
} else {
warning("The 'seed' argument was not defined, setting the argument to 1234")
b.arg$seed <- 1234
}
if("family" %in% names(b.arg)){
if(!b.arg$family %in% c("gaussian", "logit", "poisson", "student")){
warning("The value of the 'family' argument of the bsts model is invalid, using 'gaussian' as default")
b.arg$family <- "gaussian"
}
} else{
warning("The value of the 'family' argument is missing, using 'gaussian' as default")
b.arg$family <- "gaussian"
}
}
model_list <- c(model_list, "bsts")
md_bsts <- fc_bsts <- ss <- fit.bsts <- burn <- NULL
ss <- list()
if(b.arg$linear_trend){
ss <- bsts::AddLocalLinearTrend(ss, ts.obj)
}
if(b.arg$seasonal){
ss <- bsts::AddSeasonal(ss, ts.obj,
nseasons = stats::frequency(ts.obj))
}
md_bsts <- bsts::bsts(ts.obj,
state.specification = ss,
niter = b.arg$niter,
ping= b.arg$ping,
seed= b.arg$seed,
family = b.arg$family)
fc_bsts <- stats::predict(md_bsts, horizon = h, quantiles = c(.025, .975))
modelOutput$Models_Final$bsts <- md_bsts
modelOutput$Forecast_Final$bsts <- fc_bsts
}
if("h" %in% model_char){
model_list <- c(model_list, "hybrid")
md_hybrid <- fc_hybrid <- NULL
h.arg$parallel <- parallel
md_hybrid <- base::do.call(forecastHybrid::hybridModel, c(list(ts.obj), h.arg))
if("xreg" %in% names(h.arg$a.args) ||
"xreg" %in% names(h.arg$n.args) ||
"xreg" %in% names(h.arg$s.args)){
fc_hybrid <- forecast::forecast(md_hybrid, h = h, xreg = base::as.data.frame(xreg.h))
} else{
fc_hybrid <- forecast::forecast(md_hybrid, h = h)
}
modelOutput$Models_Final$hybrid <- md_hybrid
modelOutput$Forecast_Final$hybrid <- fc_hybrid
}
s <- length(ts.obj) - periods + 1
e <- length(ts.obj)
MAPE_df <- NULL
MAPE_df <- base::data.frame(matrix(NA, ncol = length(model_list) + 1 , nrow = periods))
names(MAPE_df) <- c("Period", model_list)
MAPE_df$Period <- s:e - s + 1
RMSE_df <- NULL
RMSE_df <- base::data.frame(matrix(NA, ncol = length(model_list) + 1 , nrow = periods))
names(RMSE_df) <- c("Period", model_list)
RMSE_df$Period <- s:e - s + 1
# Loop over the series
for(i in s:e){
period_name <- NULL
period_name <- paste("period", (i - s + 1), sep = "_")
eval(parse(text = paste("modelOutput$", period_name, "<- list()", sep = "")))
ts.subset <- split_ts <- train <- test <- NULL
ts.subset <- stats::window(ts.obj, start = stats::time(ts.obj)[1], end = stats::time(ts.obj)[i])
split_ts <- TSstudio::ts_split(ts.subset, sample.out = window_size)
train <- split_ts$train
test <- split_ts$test
if("a" %in% model_char){
md <- fc <- NULL
if("xreg" %in% names(a.arg)){
a.xreg.train <- xreg.arima[1:length(train),]
a.xreg.test <- xreg.arima[(length(train) + 1):(length(train) + window_size),]
a.arg.xreg <- a.arg
a.arg.xreg$xreg <- a.xreg.train
md <- base::do.call(forecast::auto.arima, c(list(train), a.arg.xreg))
fc <- forecast::forecast(md, h = window_size, xreg = a.xreg.test)
} else {
md <- base::do.call(forecast::auto.arima, c(list(train), a.arg))
fc <- forecast::forecast(md, h = window_size)
}
MAPE_df$auto.arima[i - s + 1] <- base::round(forecast::accuracy(fc,test)[10], 2)
RMSE_df$auto.arima[i - s + 1] <- base::round(forecast::accuracy(fc,test)[4], 2)
eval(parse(text = paste("modelOutput$", period_name, "$auto.arima <- list(model = md, forecast = fc)", sep = "")))
}
if("w" %in% model_char){
md <- fc <- NULL
md <- base::do.call(stats::HoltWinters, c(list(train), w.arg))
fc <- forecast::forecast(md, h = window_size)
MAPE_df$HoltWinters[i - s + 1] <- base::round(forecast::accuracy(fc, test)[10], 2)
RMSE_df$HoltWinters[i - s + 1] <- base::round(forecast::accuracy(fc, test)[4], 2)
eval(parse(text = paste("modelOutput$", period_name, "$HoltWinters <- list(model = md, forecast = fc)", sep = "")))
}
if("e" %in% model_char){
md <- fc <- NULL
md <- base::do.call(forecast::ets, c(list(train), e.arg))
fc <- forecast::forecast(train, h = window_size)
MAPE_df$ets[i - s + 1] <- base::round(forecast::accuracy(fc, test)[10], 2)
RMSE_df$ets[i - s + 1] <- base::round(forecast::accuracy(fc, test)[4], 2)
eval(parse(text = paste("modelOutput$", period_name, "$ets <- list(model = md, forecast = fc)", sep = "")))
}
if("n" %in% model_char){
md <- fc <- NULL
if("xreg" %in% names(n.arg)){
n.xreg.train <- xreg.arima[1:length(train),]
n.xreg.test <- xreg.arima[(length(train) + 1):(length(train) + window_size),]
n.arg.xreg <- n.arg
n.arg.xreg$xreg <- n.xreg.train
md <- base::do.call(forecast::nnetar, c(list(train), n.arg.xreg))
fc <- forecast::forecast(md, h = window_size, xreg = n.xreg.test)
} else {
md <- base::do.call(forecast::nnetar, c(list(train), n.arg))
fc <- forecast::forecast(md, h = window_size)
}
MAPE_df$nnetar[i - s + 1] <- base::round(forecast::accuracy(fc, test)[10],2)
RMSE_df$nnetar[i - s + 1] <- base::round(forecast::accuracy(fc, test)[4],2)
eval(parse(text = paste("modelOutput$", period_name, "$nnetar <- list(model = md, forecast = fc)", sep = "")))
}
if("t" %in% model_char){
md <- fc <- NULL
md <- base::do.call(forecast::tbats, c(list(train), t.arg))
fc <- forecast::forecast(md, h = window_size)
MAPE_df$tbats[i - s + 1] <- base::round(forecast::accuracy(fc, test)[10], 2)
RMSE_df$tbats[i - s + 1] <- base::round(forecast::accuracy(fc, test)[4], 2)
eval(parse(text = paste("modelOutput$", period_name, "$tbats <- list(model = md, forecast = fc)", sep = "")))
}
if("b" %in% model_char){
md <- fc <- ss <- NULL
ss <- list()
if(b.arg$linear_trend){
ss <- bsts::AddLocalLinearTrend(ss, ts.obj)
}
if(b.arg$seasonal){
ss <- bsts::AddSeasonal(ss, ts.obj,
nseasons = stats::frequency(ts.obj))
}
md <- bsts::bsts(train,
state.specification = ss,
niter = b.arg$niter,
ping= b.arg$ping,
seed= b.arg$seed,
family = b.arg$family)
fc <- stats::predict(md, horizon = window_size, quantiles = c(.025, .975))
pred <- fc$mean
MAPE_df$bsts[i - s + 1] <- base::round(mean(100 * base::abs((test - pred) / test)), 2)
RMSE_df$bsts[i - s + 1] <- base::round((mean((test - pred)^ 2)) ^ 0.5, 2)
eval(parse(text = paste("modelOutput$", period_name, "$bsts <- list(model = md, forecast = fc)", sep = "")))
}
if("h" %in% model_char){
md <- fc <- NULL
if("xreg" %in% names(h.arg$a.args) ||
"xreg" %in% names(h.arg$n.args) ||
"xreg" %in% names(h.arg$s.args)){
h.arg.xreg <- h.test <- NULL
h.arg.xreg <- h.arg
if("xreg" %in% names(h.arg$a.args)){
h.arg.xreg$a.args$xreg <- xreg.hybrid.arima[1:length(train),]
h.test <- xreg.hybrid.arima[(length(train) + 1):(length(train) + window_size),]
}
if("xreg" %in% names(h.arg$n.args)){
h.arg.xreg$n.args$xreg <- xreg.hybrid.nnetar[1:length(train),]
h.test <- xreg.hybrid.nnetar[(length(train) + 1):(length(train) + window_size),]
}
if("xreg" %in% names(h.arg$s.args)){
h.arg.xreg$s.args$xreg <- xreg.hybrid.stlm[1:length(train),]
h.test <- xreg.hybrid.stlm[(length(train) + 1):(length(train) + window_size),]
}
md <- base::do.call(forecastHybrid::hybridModel, c(list(train), h.arg.xreg))
fc <- forecast::forecast(md, h = window_size, xreg = base::as.data.frame(h.test))
} else {
md <- base::do.call(forecastHybrid::hybridModel, c(list(train), h.arg))
fc <- forecast::forecast(md, h = window_size)
}
eval(parse(text = paste("modelOutput$", period_name, "$hybrid <- list(model = md, forecast = fc)", sep = "")))
MAPE_df$hybrid[i - s + 1] <- base::round(forecast::accuracy(fc, test)[10], 2)
RMSE_df$hybrid[i - s + 1] <- base::round(forecast::accuracy(fc, test)[4], 2)
}
if((i -s + 1) >= 1){
p <- p1 <- p2 <- p3 <- p4 <- p5 <- p6 <-NULL
p <- base::suppressWarnings(plotly::plot_ly(x = stats::time(train), y = base::as.numeric(train), mode = "lines", name = "Training", type = "scatter", line = list(color = "#00526d")) %>%
plotly::add_lines(x = stats::time(test), y = base::as.numeric(test), line = list(color = "green", width = 4, dash = "dash"), name = "Testing") %>%
plotly::layout(xaxis = list(range = c(base::min(stats::time(ts.obj)), base::max(stats::time(ts.obj)))),
title = base::paste(obj.name, " Backtesting - Error Distribution by Period/Model", sep = ""), annotations = a))
p1 <- base::suppressWarnings(plotly::plot_ly(data = MAPE_df))
for(r1 in 2:ncol(MAPE_df)){
p1 <- base::suppressWarnings(p1 %>% plotly::add_lines(x = MAPE_df[, 1],
y = MAPE_df[, r1],
name = names(MAPE_df)[r1],
line = list(color = color_ramp[(r1 -1)])))
}
p1 <- base::suppressWarnings(p1 %>% plotly::layout(xaxis = list(tickvals = MAPE_df[, 1], ticktext = MAPE_df[, 1],
range = c(min(MAPE_df$Period), max(MAPE_df$Period)))))
p2 <- base::suppressWarnings(plotly::plot_ly(data = MAPE_df))
for(r2 in 2:base::ncol(MAPE_df)){
p2 <- base::suppressWarnings(p2 %>% plotly::add_trace(y = MAPE_df[, r2],
type = "box",
boxpoints = "all",
jitter = 0.3,
pointpos = -1.8,
name = names(MAPE_df)[r2],
marker = list(color = color_ramp[(r2 -1)]),
line = list(color = color_ramp[(r2 -1)]),
showlegend=F
))
}
p1 <- base::suppressWarnings(p1 %>% plotly::layout(title = "Error by Period",
yaxis = list(title = "MAPE"),
xaxis = list(title = "Period", tickvals = MAPE_df[, 1], ticktext = MAPE_df[, 1])))
p2 <- base::suppressWarnings(p2 %>% plotly::layout(title = "Error Distribution by Model",
yaxis = list(title = "MAPE")))
p3 <- base::suppressWarnings(plotly::subplot(p1, p2, nrows = 2, titleY = TRUE, titleX = TRUE, margin = 0.06))
p4 <- base::suppressWarnings(plotly::plot_ly(data = RMSE_df))
for(r1 in 2:ncol(RMSE_df)){
p4 <- base::suppressWarnings(p4 %>% plotly::add_lines(x = RMSE_df[, 1],
y = RMSE_df[, r1],
name = names(RMSE_df)[r1],
line = list(color = color_ramp[(r1 -1)])))
}
p4 <- base::suppressWarnings(p4 %>% plotly::layout(xaxis = list(tickvals = RMSE_df[, 1], ticktext = RMSE_df[, 1],
range = c(min(RMSE_df$Period), max(RMSE_df$Period)))))
p5 <- base::suppressWarnings(plotly::plot_ly(data = RMSE_df))
for(r2 in 2:base::ncol(RMSE_df)){
p5 <- base::suppressWarnings(p5 %>% plotly::add_trace(y = RMSE_df[, r2],
type = "box",
boxpoints = "all",
jitter = 0.3,
pointpos = -1.8,
name = names(RMSE_df)[r2],
marker = list(color = color_ramp[(r2 -1)]),
line = list(color = color_ramp[(r2 -1)]),
showlegend=F
))
}
p4 <- base::suppressWarnings(p4 %>% plotly::layout(title = "Error by Period",
yaxis = list(title = "RMSE"),
xaxis = list(title = "Period", tickvals = RMSE_df[, 1], ticktext = RMSE_df[, 1])))
p5 <- base::suppressWarnings(p5 %>% plotly::layout(title = "Error Distribution by Model",
yaxis = list(title = "RMSE")))
p6 <- base::suppressWarnings(plotly::subplot(p4, p5, nrows = 2, titleY = TRUE, titleX = TRUE, margin = 0.1))
if(error == "MAPE" & plot & periods > 1){
p7 <- base::suppressWarnings(plotly::subplot(plotly::subplot(p1, p2, nrows = 1, titleY = TRUE, shareY = TRUE, margin = 0.02, titleX = TRUE),
p, nrows = 2, margin = 0.08, titleY = TRUE))
print(p7)
} else if(error == "RMSE" & plot & periods > 1){
p7 <- base::suppressWarnings(plotly::subplot(plotly::subplot(p4, p5, nrows = 1, titleY = TRUE, shareY = TRUE, margin = 0.02, titleX = TRUE),
p, nrows = 2, margin = 0.08, titleY = TRUE))
print(p7)
}
}
}
modelOutput$MAPE_score <- MAPE_df
modelOutput$RMSE_score <- RMSE_df
if(periods > 1){
modelOutput$MAPE_plot <- p3
modelOutput$RMSE_plot <- p6
}
leaderboard <- base::suppressMessages(
(modelOutput$MAPE_score %>% reshape2::melt(id.vars = c("Period")) %>%
dplyr::group_by(variable) %>%
dplyr::summarise(avgMAPE = base::mean(value),
sdMAPE = stats::sd(value))) %>%
dplyr::left_join(
modelOutput$RMSE_score %>% reshape2::melt(id.vars = c("Period")) %>%
dplyr::group_by(variable) %>%
dplyr::summarise(avgRMSE = base::mean(value),
sdRMSE = stats::sd(value))
)
)
names(leaderboard)[1] <- "Model_Name"
modelOutput$leaderboard <- leaderboard
forecast_final_plot_arg <- list(
text = paste(obj.name, " Best Forecast by ", error, " - ", leaderboard$Model_Name[1], sep = ""),
xref = "paper",
yref = "paper",
yanchor = "bottom",
xanchor = "center",
align = "center",
x = 0.5,
y = 1,
showarrow = FALSE
)
if(error == "MAPE"){
leaderboard <- leaderboard %>% dplyr::arrange(avgMAPE)
eval(parse(text = paste("modelOutput$leadForecast <- modelOutput$Forecast_Final$", leaderboard$Model_Name[1], sep = "")))
if(periods > 1){
forecast_final_plot_arg <- list(
text = paste(obj.name, " Best Forecast by ", error, " - ", leaderboard$Model_Name[1], sep = ""),
xref = "paper",
yref = "paper",
yanchor = "bottom",
xanchor = "center",
align = "center",
x = 0.5,
y = 1,
showarrow = FALSE
)
final_forecast_plot <- base::suppressWarnings(TSstudio::plot_forecast(modelOutput$leadForecast) %>%
plotly::layout(annotations = forecast_final_plot_arg,
title = base::paste(obj.name, " Backtesting - Error Distribution by Period/Model", sep = "")))
final_plot <- base::suppressWarnings(plotly::subplot(plotly::subplot(p1, p2, nrows = 1, titleY = TRUE, shareY = TRUE, margin = 0.02, titleX = TRUE),
final_forecast_plot, nrows = 2, margin = 0.1, titleY = TRUE))
}
leaderboard <- leaderboard %>% dplyr::arrange(avgMAPE) %>% as.data.frame()
modelOutput$leaderboard <- leaderboard
} else if(error == "RMSE"){
leaderboard <- leaderboard %>% dplyr::arrange(avgRMSE)
eval(parse(text = paste("modelOutput$leadForecast <- modelOutput$Forecast_Final$", leaderboard$Model_Name[1], sep = "")))
if(periods > 1){
forecast_final_plot_arg <- list(
text = paste(obj.name, " Best Forecast by ", error, " - ", leaderboard$Model_Name[1], sep = ""),
xref = "paper",
yref = "paper",
yanchor = "bottom",
xanchor = "center",
align = "center",
x = 0.5,
y = 1,
showarrow = FALSE
)
final_forecast_plot <- base::suppressWarnings(TSstudio::plot_forecast(modelOutput$leadForecast) %>%
plotly::layout(annotations = forecast_final_plot_arg))
final_plot <- base::suppressWarnings(plotly::subplot(plotly::subplot(p4, p5, nrows = 1, titleY = TRUE, shareY = TRUE, margin = 0.02, titleX = TRUE),
final_forecast_plot, nrows = 2, margin = 0.1, titleY = TRUE))
}
leaderboard <- leaderboard %>% dplyr::arrange(avgRMSE) %>% as.data.frame()
modelOutput$leaderboard <- leaderboard
}
modelOutput$summary_plot <- final_plot
if(plot){
print(final_plot)
}
print(leaderboard)
class(modelOutput) <- "ts_backtest"
return(modelOutput)
}
#' Tuning Time Series Forecasting Models Parameters with Grid Search
#' @export ts_grid
#' @param ts.obj A univariate time series object of a class "ts"
#' @param model A string, defines the model
#' @param optim A string, set the optimization method - c("MAPE", "RMSE")
#' @param periods A string, set the number backtesting periods
#' @param window_length An integer, defines the length of the backtesting training window.
#' If set to NULL (default) will use an expending window starting the from the first observation,
#' otherwise will use a sliding window.
#' @param window_space An integer, set the space length between each of the backtesting training partition
#' @param window_test An integer, set the length of the backtesting testing partition
#' @param hyper_params A list, defines the tuning parameters and their range
#' @param parallel Logical, if TRUE use multiple cores in parallel
#' @param n.cores Set the number of cores to use if the parallel argument is set to TRUE.
#' @description Tuning time series models with grid search approach using backtesting method.
#' If set to "auto" (default), will use all available cores in the system minus 1
#' @return A list
#' @examples
#' \dontrun{
#' data(USgas)
#'
#' # Starting with a shallow search (sequence between 0 and 1 with jumps of 0.1)
#' # To speed up the process, will set the parallel option to TRUE
#' # to run the search in parallel using 8 cores
#'
#' hw_grid_shallow <- ts_grid(ts.obj = USgas,
#' periods = 6,
#' model = "HoltWinters",
#' optim = "MAPE",
#' window_space = 6,
#' window_test = 12,
#' hyper_params = list(alpha = seq(0.01, 1,0.1),
#' beta = seq(0.01, 1,0.1),
#' gamma = seq(0.01, 1,0.1)),
#' parallel = TRUE,
#' n.cores = 8)
#'
#'
#' # Use the parameter range of the top 20 models
#' # to set a narrow but more agressive search
#'
#' a_min <- min(hw_grid_shallow$grid_df$alpha[1:20])
#' a_max <- max(hw_grid_shallow$grid_df$alpha[1:20])
#'
#' b_min <- min(hw_grid_shallow$grid_df$beta[1:20])
#' b_max <- max(hw_grid_shallow$grid_df$beta[1:20])
#'
#' g_min <- min(hw_grid_shallow$grid_df$gamma[1:20])
#' g_max <- max(hw_grid_shallow$grid_df$gamma[1:20])
#'
#' hw_grid_second <- ts_grid(ts.obj = USgas,
#' periods = 6,
#' model = "HoltWinters",
#' optim = "MAPE",
#' window_space = 6,
#' window_test = 12,
#' hyper_params = list(alpha = seq(a_min, a_max,0.05),
#' beta = seq(b_min, b_max,0.05),
#' gamma = seq(g_min, g_max,0.05)),
#' parallel = TRUE,
#' n.cores = 8)
#'
#' md <- HoltWinters(USgas,
#' alpha = hw_grid_second$alpha,
#' beta = hw_grid_second$beta,
#' gamma = hw_grid_second$gamma)
#'
#' library(forecast)
#'
#' fc <- forecast(md, h = 60)
#'
#' plot_forecast(fc)
#'
#' }
ts_grid <- function(ts.obj,
model,
optim = "MAPE",
periods,
window_length = NULL,
window_space,
window_test,
hyper_params,
parallel = TRUE,
n.cores = "auto"){
error <- period <- start_time <- NULL
`%>%` <- magrittr::`%>%`
# Error handling
if(!stats::is.ts(ts.obj)){
stop("The input object is not 'ts' object")
} else if(stats::is.mts(ts.obj)){
stop("The input object is 'mts' object, please use 'ts'")
}
if(!optim %in% c("MAPE", "RMSE") || base::length(optim) != 1){
warning("The value of the optim argument is not valid, using default option (MAPE)")
optim <- "MAPE"
}
if(!base::is.logical(parallel)){
warning("The 'parallel' argument is not a boolean operator, setting it to TRUE")
parallel <- TRUE
}
if(n.cores != "auto"){
if(!base::is.numeric(n.cores)){
warning("The value of the 'n.cores' argument is not valid,",
" setting it to 'auto' mode")
n.cores <- "auto"
} else if(base::is.numeric(n.cores) &&
(n.cores %% 1 != 0 || n.cores < 1)){
warning("The value of the 'n.cores' argument is not valid,",
" setting it to 'auto' mode")
n.cores <- "auto"
} else{
if(future::availableCores() < n.cores){
warning("The value of the 'n.cores' argument is not valid,",
"(the requested number of cores are greater than available)",
", setting it to 'auto' mode")
n.cores <- "auto"
}
}
}
if(n.cores == "auto"){
n.cores <- base::as.numeric(future::availableCores() - 1)
}
if(!base::exists("model")){
stop("The 'model' argument is missing")
} else if(!model %in% c("HoltWinters")){
stop("The 'model' argument is not valid")
}
# Set the backtesting partitions
s <- length(ts.obj) - window_space * (periods - 1) # the length of the first partition
e <- length(ts.obj) # the end of the backtesting partition
w_end <- seq(from = s, by = window_space, to = e) # Set the cutting points for the backtesting partions
if(!base::is.null(window_length)){
w_start <- w_end - window_test - window_length + 1
} else {
w_start <- base::rep(1, base::length(w_end))
}
if(model == "HoltWinters"){
hw_par <- hyper_input <- hyper_null <- hyper_false <- NULL
hw_par <- c("alpha", "beta", "gamma")
if(!base::all(base::names(hyper_params) %in% hw_par)){
stop("The 'hyper_params' argument is invalid")
}
if("alpha" %in% base::names(hyper_params)){
alpha <- NULL
if(is.null(hyper_params$alpha)){
hyper_null <- c(hyper_null, "alpha")
} else if(base::is.logical(hyper_params$alpha)){
stop("The value of the 'alpha' argument cannot be only numeric")
} else {
if(base::any(which(hyper_params$alpha < 0)) ||
base::any(which(hyper_params$alpha > 1))){
stop("The value of the 'alpha' parameter is out of range,",
" cannot exceed 1 or be less or equal to 0")
}
if(any(which(hyper_params$alpha == 0))){
hyper_params$alpha[base::which(hyper_params$alpha == 0)] <- 1e-5
warning("The value of the 'alpha' parameter cannot be equal to 0",
" replacing 0 with 1e-5")
}
alpha <- hyper_params$alpha
hyper_input <- c(hyper_input, "alpha")
}
}
if("beta" %in% base::names(hyper_params)){
beta <- NULL
if(is.null(hyper_params$beta)){
hyper_null <- c(hyper_null, "beta")
} else if(base::is.logical(hyper_params$beta) &&
!base::isTRUE(hyper_params$beta)){
beta <- FALSE
hyper_false <- c(hyper_false, "beta")
} else {
if(base::any(which(hyper_params$beta < 0)) ||
base::any(which(hyper_params$beta > 1))){
stop("The value of the 'beta' parameter is out of range,",
" cannot exceed 1 or be less or equal to 0")
}
if(any(which(hyper_params$beta == 0))){
hyper_params$beta[base::which(hyper_params$beta == 0)] <- 1e-5
warning("The value of the 'beta' parameter cannot be equal to 0",
" replacing 0 with 1e-5")
}
beta <- hyper_params$beta
hyper_input <- c(hyper_input, "beta")
}
}
if("gamma" %in% base::names(hyper_params)){
gamma <- NULL
if(is.null(hyper_params$gamma)){
hyper_null <- c(hyper_null, "gamma")
} else if(base::is.logical(hyper_params$gamma) &&
!base::isTRUE(hyper_params$gamma)){
gamma <- FALSE
hyper_false <- c(hyper_false, "beta")
} else {
if(base::any(which(hyper_params$gamma < 0)) ||
base::any(which(hyper_params$gamma > 1))){
stop("The value of the 'gamma' parameter is out of range,",
" cannot exceed 1 or be less or equal to 0")
}
if(any(which(hyper_params$gamma == 0))){
hyper_params$gamma[base::which(hyper_params$gamma == 0)] <- 1e-5
warning("The value of the 'gamma' parameter cannot be equal to 0",
" replacing 0 with 1e-5")
}
gamma <- hyper_params$gamma
hyper_input <- c(hyper_input, "gamma")
}
}
grid_df <- base::eval(
base::parse(text = base::paste("base::expand.grid(",
base::paste(hyper_input, collapse = ", "),
")",
sep = "")))
base::names(grid_df) <- hyper_input
if(!base::is.null(hyper_false)){
for(f in hyper_false){
grid_df[f] <- FALSE
}
}
grid_model <- base::paste("stats::HoltWinters(x = train", sep = "")
for(i in hw_par){
if(i %in% base::names(grid_df)){
grid_model <- base::paste(grid_model, ", ", i, " = search_df$", i, "[i]",
sep = "" )
} else {
grid_model <- base::paste(grid_model, ", ", i, " = NULL", sep = "")
}
}
grid_model <- base::paste(grid_model, ")", sep = "")
}
grid_output <- NULL
if(!parallel){
grid_output <- base::lapply(1:periods, function(n){
ts_sub <- train <- test <- search_df <- NULL
search_df <- grid_df
search_df$period <- n
search_df$error <- NA
ts_sub <- stats::window(ts.obj,
start = stats::time(ts.obj)[w_start[n]],
end = stats::time(ts.obj)[w_end[n]])
partition <- TSstudio::ts_split(ts_sub, sample.out = window_test)
train <- partition$train
test <- partition$test
for(i in 1:nrow(search_df)){
md <- fc <- NULL
md <- base::eval(base::parse(text = grid_model))
fc <- forecast::forecast(md, h = window_test)
if(optim == "MAPE"){
search_df$error[i] <- forecast::accuracy(fc, test)[10]
} else if(optim == "RMSE"){
search_df$error[i] <- forecast::accuracy(fc, test)[4]
}
}
return(search_df)
}) %>%
dplyr::bind_rows() %>%
tidyr::spread(key = period, value = error)
} else if(parallel){
future::plan(future::multiprocess, workers = n.cores)
start_time <- Sys.time()
grid_output <- future.apply::future_lapply(1:periods, function(n){
ts_sub <- train <- test <- search_df <- NULL
search_df <- grid_df
search_df$period <- n
search_df$error <- NA
ts_sub <- stats::window(ts.obj,
start = stats::time(ts.obj)[w_start[n]],
end = stats::time(ts.obj)[w_end[n]])
partition <- TSstudio::ts_split(ts_sub, sample.out = window_test)
train <- partition$train
test <- partition$test
for(i in 1:nrow(search_df)){
md <- fc <- NULL
md <- base::eval(base::parse(text = grid_model))
fc <- forecast::forecast(md, h = window_test)
if(optim == "MAPE"){
search_df$error[i] <- forecast::accuracy(fc, test)[10]
} else if(optim == "RMSE"){
search_df$error[i] <- forecast::accuracy(fc, test)[4]
}
}
return(search_df)
}) %>%
dplyr::bind_rows() %>%
tidyr::spread(key = period, value = error)
}
col_mean <- base::which(!base::names(grid_output) %in% base::names(hyper_params) )
grid_output$mean <- base::rowMeans(grid_output[, col_mean])
grid_output <- grid_output %>% dplyr::arrange(mean)
final_output <- list(grid_df = grid_output)
for(i in base::names(hyper_params)){
final_output[[i]] <- grid_output[1, i]
}
final_output[["parameters"]] <- list(series = ts.obj,
model = model,
optim = optim,
periods = periods,
window_length = window_length,
window_space = window_space,
window_test = window_test,
hyper_params = hyper_params,
parallel = parallel,
n.cores = n.cores)
base::class(final_output) <- "ts_grid"
return(final_output)
}
#' Visualizing Grid Search Results
#' @export plot_grid
#' @param grid.obj A ts_grid output object
#' @param top An integer, set the number of hyper-parameters combinations to visualize
#' (ordered by accuracy). If set to NULL (default), will plot the top 100 combinations
#' @param type The plot type, either "3D" for 3D plot or
#' "parcoords" for parallel coordinates plot.
#' Note: the 3D plot option is applicable whenever there are three tuning parameters,
#' otherwise will use a 2D plot for two tuning parameters.
#' @param highlight A proportion between 0 (excluding) and 1,
#' set the number of hyper-parameters combinations to highlight
#' (by accuracy), if the type argument is set to "parcoords"
#' @param colors A list of plotly arguments for the color scale setting:
#'
#' showscale - display the color scale if set to TRUE.
#'
#' reversescale - reverse the color scale if set to TRUE
#'
#' colorscale set the color scale of the plot, possible palettes are:
#' Greys, YlGnBu, Greens , YlOrRd,
#' Bluered, RdBu, Reds, Blues, Picnic,
#' Rainbow, Portland, Jet, Hot, Blackbody,
#' Earth, Electric, Viridis, Cividis
plot_grid <- function(grid.obj,
top = NULL,
highlight = 0.1,
type = "parcoords",
colors = list(showscale = TRUE,
reversescale = FALSE,
colorscale = "Jet")){
# Setting the pipe operator
`%>%` <- magrittr::`%>%`
# Setting variables
color_option <- p <- par_names <- sizeref <- NULL
# List of optional color scale
color_option <- c("Greys","YlGnBu", "Greens", "YlOrRd",
"Bluered", "RdBu", "Reds", "Blues", "Picnic",
"Rainbow", "Portland", "Jet", "Hot", "Blackbody",
"Earth", "Electric", "Viridis", "Cividis")
# Error handling
if(class(grid.obj) != "ts_grid"){
stop("The input object is not a 'ts_grid' class")
}
if(!base::is.list(colors)){
warning("The 'colors' argument is not valid, using default option")
colors = base::list(showscale = TRUE,
reversescale = FALSE,
colorscale = "Jet")
} else if(!all(base::names(colors) %in% c("showscale", "reversescale", "colorscale"))){
warning("The 'colors' argument is not valid, using default option")
colors = base::list(showscale = TRUE,
reversescale = FALSE,
colorscale = "Jet")
}
if(!base::is.logical(colors$showscale)){
warning("The 'showscale' parameter of the 'colors' argument is not logical, using default option (TRUE)")
colors$showscale <- TRUE
}
if(!base::is.logical(colors$reversescale)){
warning("The 'reversescale' parameter of the 'colors' argument is not logical, using default option (FALSE)")
colors$reversescale <- FALSE
}
if(!base::is.character(colors$colorscale) ||
base::length(colors$colorscale) != 1 ||
!colors$colorscale %in% color_option){
warning("The 'colorscale' parameter of the 'colors' argument is not logical, using default option (Jet)")
}
if(type != "parcoords" && type != "3D"){
warning("The value of the 'type' argument is not valid, using default option (parcoords)")
type <- "parcoords"
}
if(!base::is.null(top)){
if(!base::is.numeric(top) || top %% 1 != 0){
warning("The value of the 'top' argument is not valid, using default option (top 100 models)")
top <- ifelse(base::nrow(grid.obj$grid_df) > 100, 100, base::nrow(grid.obj$grid_df))
}
if(top > base::nrow(grid.obj$grid_df)){
warning("The value of the 'top' argument exceeding the number of models, using default option (top 100 models)")
top <- ifelse(base::nrow(grid.obj$grid_df) > 100, 100, base::nrow(grid.obj$grid_df))
}
} else {
top <- ifelse(base::nrow(grid.obj$grid_df) > 100, 100, base::nrow(grid.obj$grid_df))
}
if(!base::is.numeric(highlight) || highlight <= 0 || highlight > 1){
warning("The value of the 'highlight' argument is not valid, using default (0.1)")
highlight <- 0.1
}
par_names <- base::names(grid.obj$parameters$hyper_params)
for(i in par_names){
if(base::is.null(grid.obj$parameters$hyper_params[[i]]) ||
grid.obj$parameters$hyper_params[[i]] == FALSE){
par_names <- par_names[-which(par_names == i)]
}
}
if(type == "parcoords"){
if(grid.obj$parameters$model == "HoltWinters"){
if(base::length(par_names) < 2){
stop("Cannot create a parallel coordinates plot for a single hyper parameter")
}
hw_dim <- NULL
hw_dim <- base::list()
for(i in base::seq_along(par_names)){
hw_dim[[i]] <- base::eval(base::parse(text = base::paste("list(range = c(0,1),
constraintrange = c(min(grid.obj$grid_df[1:", base::ceiling(top * highlight), ", i]),
max(grid.obj$grid_df[1:", base::ceiling(top * highlight), ",i])),
label = '", par_names[i],"', values = ~",
par_names[i],
")",
sep = "")
))
}
p <- grid.obj$grid_df[1:top,] %>%
plotly::plot_ly(type = 'parcoords',
line = list(color = ~ mean,
colorscale = colors$colorscale,
showscale = colors$showscale,
reversescale = colors$reversescale,
cmin = base::min(grid.obj$grid_df$mean),
cmax = base::max(grid.obj$grid_df$mean[1:top]),
colorbar=list(
title= base::paste("Avg.", grid.obj$parameters$optim, sep = " ")
)),
dimensions = hw_dim
) %>% plotly::layout(title = base::paste(grid.obj$parameters$model,
" Parameters Grid Search Results (Avg. ",
grid.obj$parameters$optim,
") for Top ",
top,
" Models", sep = ""),
xaxis = list(title = base::paste("Testing Over", grid.obj$parameters$periods, "Periods", sep = " ")))
}
}else if(type == "3D"){
if(grid.obj$parameters$model == "HoltWinters"){
if(base::length(par_names) == 3){
p <- plotly::plot_ly(data = grid.obj$grid_df[1:top,],
type="scatter3d",
mode = "markers",
x = ~ alpha,
y = ~ beta,
z = ~ gamma,
hoverinfo = 'text',
text = paste(base::paste("Avg.", grid.obj$parameters$optim, sep = " "),
": ", base::round(grid.obj$grid_df[1:top, "mean"], 2),
"<br>", par_names[1],": ", grid.obj$grid_df[1:top, par_names[1]],
"<br>", par_names[2],": ", grid.obj$grid_df[1:top, par_names[2]],
"<br>", par_names[3],": ", grid.obj$grid_df[1:top, par_names[3]],
sep = ""),
marker = list(color = ~ mean,
colorscale = colors$colorscale,
showscale = colors$showscale,
reversescale = colors$reversescale,
colorbar=list(
title= base::paste("Avg.", grid.obj$parameters$optim, sep = " ")
))) %>%
plotly::layout(title = base::paste(grid.obj$parameters$model,
" Parameters Grid Search Results (Avg. ",
grid.obj$parameters$optim,
") for Top ",
top,
" Models", sep = ""),
xaxis = list(title = base::paste("Testing Over", grid.obj$parameters$periods, "Periods", sep = " ")))
} else if(base::length(par_names) == 2){
warning("Cannot create a 3D plot for two hyper parameters")
# Scaling the bubbles size
sizeref <- 2.0 * max(grid.obj$grid_df$mean[1:top]) / (20**2)
p <- plotly::plot_ly(x = grid.obj$grid_df[1:top, par_names[1]],
y = grid.obj$grid_df[1:top, par_names[2]],
type = "scatter",
mode = "markers",
hoverinfo = 'text',
text = paste(base::paste("Avg.", grid.obj$parameters$optim, sep = " "),
": ", base::round(grid.obj$grid_df[1:top, "mean"], 2),
"<br>", par_names[1],": ", grid.obj$grid_df[1:top, par_names[1]],
"<br>", par_names[2],": ", grid.obj$grid_df[1:top, par_names[2]],
sep = ""),
marker = list(color = grid.obj$grid_df[1:top, "mean"],
size = grid.obj$grid_df[1:top, "mean"],
sizemode = 'area', sizeref = sizeref,
colorscale = colors$colorscale,
showscale = colors$showscale,
reversescale = colors$reversescale,
colorbar=list(
title= base::paste("Avg.", grid.obj$parameters$optim, sep = " ")
))
) %>%
plotly::layout(title = base::paste(grid.obj$parameters$model,
"Parameters Grid Search Results (Avg.",
base::paste(grid.obj$parameters$optim, ")", sep = ""),
"for Top",
top,
"Models",
sep = " "),
xaxis = list(title = par_names[1]),
yaxis = list(title = par_names[2]))
} else if(base::length(par_names) <= 1){
stop("Cannot create a 3D plot for a single hyper parameter")
}
}
}
return(p)
}
#' Train, Test, Evaluate, and Forecast Multiple Time Series Forecasting Models
#' @export
#' @description Method for train test and compare multiple time series models using either one partition (i.e., sample out)
#' or multipe partitions (backtesting)
#' @param input A univariate time series object (ts class)
#' @param methods A list, defines the models to use for training and forecasting the series.
#' The list must include a sub list with the model type, and the model's arguments (when applicable) and notes about the model.
#' The sub-list name will be used as the model ID. Possible models:
#'
#' \code{\link[stats]{arima}} - model from the stats package
#'
#' \code{\link[forecast]{auto.arima}} - model from the forecast package
#'
#' \code{\link[forecast]{ets}} - model from the forecast package
#'
#' \code{\link[stats]{HoltWinters}} - model from the stats package
#'
#' \code{\link[forecast]{nnetar}} - model from the forecast package
#'
#' \code{\link[forecast]{tslm}} - model from the forecast package (note that the 'tslm' model must have the formula argument in the 'method_arg' argument)
#'
#' @param train_method A list, defines the backtesting parameters:
#'
#' partitions - an integer, set the number of training and testing partitions to be used in the backtesting process,
#' where when partition is set to 1 it is a simple holdout training approach
#'
#' space - an integer, defines the length of the backtesting window expansion
#'
#' sample.in - an integer, optional, defines the length of the training partitions, and therefore the backtesting window structure.
#' By default, it set to NULL and therefore, the backtesting using expending window.
#' Otherwise, when the sample.in defined, the window structure is sliding
#'
#' sample.in - an integer, optional, defines the length of the training partitions, and therefore the type of the backtesting window.
#'By default, is set to NULL, which implay that the backtesting is using an expending window. Otherwise, when defining the size of the training partition, th
#' defines the train approach, either using a single testing partition (sample out)
#' or use multiple testing partitions (backtesting). The list should include the training method argument, (please see 'details' for the structure of the argument)
#' @param horizon An integer, defines the forecast horizon
#' @param xreg Optional, a list with two vectors (e.g., data.frame or matrix) of external regressors,
#' one vector corresponding to the input series and second to the forecast itself
#' (e.g., must have the same length as the input and forecast horizon, respectively)
#' @param error A character, defines the error metrics to be used to sort the models leaderboard. Possible metric - "MAPE" or "RMSE"
#' @param level An integer, set the confidence level of the prediction intervals
#' @examples
#'
#' # Defining the models and their arguments
#' methods <- list(ets1 = list(method = "ets",
#' method_arg = list(opt.crit = "lik"),
#' notes = "ETS model with opt.crit = lik"),
#' ets2 = list(method = "ets",
#' method_arg = list(opt.crit = "amse"),
#' notes = "ETS model with opt.crit = amse"),
#' arima1 = list(method = "arima",
#' method_arg = list(order = c(2,1,0)),
#' notes = "ARIMA(2,1,0)"),
#' arima2 = list(method = "arima",
#' method_arg = list(order = c(2,1,2),
#' seasonal = list(order = c(1,1,1))),
#' notes = "SARIMA(2,1,2)(1,1,1)"),
#' auto_arima = list(method = "auto.arima",
#' method_arg = NULL,
#' notes = "auto.arima model"),
#' hw = list(method = "HoltWinters",
#' method_arg = NULL,
#' notes = "HoltWinters Model"),
#' tslm = list(method = "tslm",
#' method_arg = list(formula = input ~ trend + season),
#' notes = "tslm model with trend and seasonal components"))
#' # Training the models with backtesting
#' md <- train_model(input = USgas,
#' methods = methods,
#' train_method = list(partitions = 6,
#' sample.out = 12,
#' space = 3),
#' horizon = 12,
#' error = "MAPE")
#' # View the model performance on the backtesting partitions
#' md$leaderboard
#'
train_model <- function(input,
methods,
train_method,
horizon,
error = "MAPE",
xreg = NULL,
level = c(80, 95)){
# Setting the pipe operator
`%>%` <- magrittr::`%>%`
method_list <- input_freq <- input_length <- w <- s1 <- s2 <- NULL
grid_df <- models_df <- w_range <- notes <- NULL
methods_selected <- model_id <- start <- end <- partition <- NULL
model <- avg_mape <- avg_rmse <- NULL
# Whenever updating, need to update the add_method function as well
method_list <- list("arima", "auto.arima", "ets", "HoltWinters", "nnetar", "tslm")
### Error Handling
# Check the level argument
if(base::all(!is.numeric(level)) ||
base::any(level %% 1 != 0) ||
base::any(level <= 0 | level > 100)){
stop("Error on the 'level' argument: the argument is out of range (0,100]")
}
# Check the error argument
if(base::is.null(error) || !base::is.character(error) || base::length(error) !=1){
stop("Error on the 'error' argument: the input is not valid, please use either 'RMSE' or 'MAPE'")
} else if( error != "MAPE" && error != "RMSE"){
stop("Error on the 'error' argument: the input is not valid, please use either 'RMSE' or 'MAPE'")
}
# Checking the input argument
if(!stats::is.ts(input)){
stop("The input argument is not a valid 'ts' object")
} else if(stats::is.mts(input)){
stop("Cannot use multiple time series object as input")
}
# Getting the attributes of the input object
input_freq <- stats::frequency(input)
input_length <- base::length(input)
# Validating the methods argument
if(!base::is.list(methods)){
stop("Error on the 'methods' argument: the argument is not a list")
} else if(base::is.null(base::names(methods))){
stop("Error on the 'methods' argument: could not find the models IDs")
} else if(base::any("NULL" %in% base::as.character(methods %>% purrr::map(~.x[["method"]])))){
stop("Error on the 'methods' argument: at least one of the methods is missing the 'method' argument")
}
models_df <- base::data.frame(model_id = base::names(methods),
methods_selected = base::as.character(methods %>% purrr::map_chr(~.x[["method"]])),
notes = base::as.character(methods %>% purrr::map(~.x[["notes"]])),
stringsAsFactors = FALSE)
if(!base::all(models_df$methods_selected %in% method_list)){
stop("Error on the 'methods' argument: at least one of the models methods is not valid")
}
# Checking the train argument
if(!base::is.list(train_method)){
stop("Error on the 'train_method' argument: the argument is not a list")
} else if(!"partitions" %in% base::names(train_method)){
stop("Error on the 'train_method' argument: the 'partition' argument is missing")
} else if(!"space" %in% base::names(train_method)){
stop("Error on the 'train_method' argument: the 'space' argument is missing")
} else if(!"sample.out" %in% base::names(train_method)){
stop("Error on the 'train_method' argument: the 'sample.out' argument is missing")
} else if(!base::is.numeric(train_method$sample.out) ||
train_method$sample.out < 1 ||
train_method$sample.out %% 1 != 0){
stop("Error on the 'train_method' argument: the 'sample.out' argument is not valide, please use a positive integer")
} else if(!base::is.numeric(train_method$partitions) ||
train_method$partitions < 1 ||
train_method$partitions %% 1 != 0){
stop("Error on the 'train_method' argument: the 'partitions' argument is not valide, please use a positive integer")
} else if(!base::is.numeric(train_method$space) ||
train_method$space < 1 ||
train_method$space %% 1 != 0){
stop("Error on the 'train_method' argument: the 'space' argument is not valide, please use a positive integer")
}
w <- seq(from = input_length - train_method$space * (train_method$partitions - 1),
by = train_method$space,
length.out = train_method$partitions)
if(min(w) < input_freq * 2){
stop("Error on the 'train_method' argument: the length of the first partition is not sufficient to train a model",
" (must leave at least two full cycles for the sample in partition)")
}
# If not using sample.in, will define the start point as 1
if(!"sample.in" %in% base::names(train_method) ||
("sample.in" %in% base::names(train_method) &&
base::is.null(train_method$sample.in))){
s1 <- s2 <- 1
w_range <- base::data.frame(start = c(base::rep(s1, base::length(w)), s2),
end = c(w, input_length),
type = c(base::rep("train", base::length(w)), "forecast"),
partition = c(base::paste0("partition_", 1:base::length(w), sep = ""), "final_partition"),
stringsAsFactors = FALSE)
# If defining the sample.in -> check that the argument is valid
} else if("sample.in" %in% base::names(train_method)){
# If defining the sample.in -> check that the argument is valid
if(!base::is.numeric(train_method$sample.in) ||
train_method$sample.in < 1 ||
train_method$sample.in %% 1 != 0){
stop("Error on the 'train_method' argument: the training partition length (sample in) of the backtesting is not valid. Please use a positive integer")
} else if( train_method$sample.in < input_freq * 2){
stop("Error on the 'train_method' argument: the training partition length (sample in) must have at least two cycles")
}
s1 <- w - train_method$sample.out - train_method$sample.in + 1
s2 <- input_length - train_method$sample.in + 1
w_range <- base::data.frame(start = c(s1, s2),
end = c(w, input_length),
type = c(base::rep("train", base::length(w)), "forecast"),
partition = c(base::paste0("partition_", 1:base::length(w), sep = ""), "final_partition"),
stringsAsFactors = FALSE)
}
# Checking the horizon argument
if(horizon %% 1 != 0 || !base::is.numeric(horizon) || horizon <=0){
stop("Error on the 'horizon' argument: the 'horizon' is not valid, please make sure using positive integer")
}
# Checking the xreg argument
if(!base::is.null(xreg)){
if(!all(c("train", "forecast") %in% base::names(xreg))){
stop("Error on the 'xreg' argument: the 'xreg' list is not valid, please make sure setting the correspinding regressor",
" inputs for the 'input' argument (train) and for the forecast horizon (forecast)")
} else if(base::nrow(xreg$train) != base::length(input)){
stop("Error on the 'xreg' argument: the length of the xreg train input is not aligned with the length of the input series")
} else if(base::nrow(xreg$forecast) != horizon){
stop("Error on the 'xreg' argument: the length of the xreg forecast input is not aligned with the forecast horizon")
}
}
# Creating grid of all the modeling combinations
grid_df <- base::expand.grid(models_df$model_id, s1, train_method$sample.out, stringsAsFactors = FALSE) %>%
stats::setNames(c("model_id", "start", "horizon")) %>%
dplyr::left_join(models_df, by = c("model_id")) %>%
dplyr::mutate(type = "train") %>% dplyr::bind_rows(
base::expand.grid(models_df$model_id, s2, horizon, stringsAsFactors = FALSE) %>%
stats::setNames(c("model_id", "start", "horizon")) %>%
dplyr::left_join(models_df, by = c("model_id")) %>%
dplyr::mutate(type = "forecast")
) %>%
dplyr::left_join(w_range, by = c("start", "type"))
fc_output <- lapply(base::seq_along(grid_df$model_id), function(i){
ts.obj <- train <- test <- md <- fc <- arg <- NULL
ts.obj <- stats::window(input,
start = stats::time(input)[grid_df$start[i]],
end = stats::time(input)[grid_df$end[i]])
if(grid_df$type[i] == "train"){
ts_partitions <- TSstudio::ts_split(ts.obj = ts.obj, sample.out = train_method$sample.out)
train <- ts_partitions$train
test <- ts_partitions$test
if(!base::is.null(xreg)){
xreg_base <- xreg$train[grid_df$start[i]:grid_df$end[i],]
xreg_train <- xreg_base[1:base::length(train),]
xreg_test <- xreg_base[(base::length(train) + 1):nrow(xreg_base),]
}
if(grid_df$methods_selected[i] == "arima"){
if(!base::is.null(methods[[grid_df$model_id[i]]]$method_arg)){
arg <- methods[[grid_df$model_id[i]]]$method_arg
}
if("xreg" %in% base::names(arg) && !base::is.null(xreg)){
arg_xreg <- arg
arg_xreg$xreg <- xreg_train[,arg$xreg]
md <- do.call(stats::arima,c(base::list(train), arg_xreg))
fc <- forecast::forecast(md,
h = grid_df$horizon[i],
xreg = xreg_test[,arg$xreg],
level = level)
} else {
md <- do.call(stats::arima,c(base::list(train), arg))
fc <- forecast::forecast(md,
h = grid_df$horizon[i],
level = level)
}
}
if(grid_df$methods_selected[i] == "HoltWinters"){
if(!base::is.null(methods[[grid_df$model_id[i]]]$method_arg)){
arg <- methods[[grid_df$model_id[i]]]$method_arg
}
md <- do.call(stats::HoltWinters,c(base::list(train), arg))
fc <- forecast::forecast(md,
h = grid_df$horizon[i],
level = level)
}
if(grid_df$methods_selected[i] == "auto.arima"){
if(!base::is.null(methods[[grid_df$model_id[i]]]$method_arg)){
arg <- methods[[grid_df$model_id[i]]]$method_arg
}
if("xreg" %in% base::names(arg) && !base::is.null(xreg)){
arg_xreg <- arg
arg_xreg$xreg <- xreg_train[,arg$xreg]
md <- do.call(forecast::auto.arima,c(base::list(train), arg_xreg))
fc <- forecast::forecast(md,
h = grid_df$horizon[i],
xreg = xreg_test[,arg$xreg],
level = level)
} else {
md <- do.call(forecast::auto.arima,c(base::list(train), arg))
fc <- forecast::forecast(md,
h = grid_df$horizon[i],
level = level)
}
}
if(grid_df$methods_selected[i] == "ets"){
if(!base::is.null(methods[[grid_df$model_id[i]]]$method_arg)){
arg <- methods[[grid_df$model_id[i]]]$method_arg
}
md <- do.call(forecast::ets,c(base::list(train), arg))
fc <- forecast::forecast(md,
h = grid_df$horizon[i],
level = level)
}
if(grid_df$methods_selected[i] == "nnetar"){
if(!base::is.null(methods[[grid_df$model_id[i]]]$method_arg)){
arg <- methods[[grid_df$model_id[i]]]$method_arg
}
if("xreg" %in% base::names(arg) && !base::is.null(xreg)){
arg_xreg <- arg
arg_xreg$xreg <- xreg_train[,arg$xreg]
md <- do.call(forecast::nnetar,c(base::list(train), arg_xreg))
fc <- forecast::forecast(md,
h = grid_df$horizon[i],
xreg = xreg_test[,arg$xreg],
level = level)
} else {
md <- do.call(forecast::nnetar,c(base::list(train), arg))
fc <- forecast::forecast(md,
h = grid_df$horizon[i],
level = level)
}
}
if(grid_df$methods_selected[i] == "tslm"){
# tslm model must have formula argument
if(!base::is.null(methods[[grid_df$model_id[i]]]$method_arg)){
arg <- methods[[grid_df$model_id[i]]]$method_arg
# Validate the formula
if(!"formula" %in% base::names(arg)){
stop("Error on the 'train_method' argument: cannot run 'tslm' model without the 'formula' argument")
}
f <- base::Reduce(base::paste, base::deparse(arg$formula))
tilde <- base::regexpr("~", f) %>% base::as.numeric()
# If the tilde is missing return error
if(tilde == -1){
stop("Error on the 'train_method' argument: cannot run 'tslm' model without the 'formula' argument")
}
# If formula good check the castumize the xreg argument
# Parsing the formula
f1 <- base::substr(f, tilde + 1, base::nchar(f))
f2 <- base::gsub('[\\+]' , "", base::gsub('\"', "", f1))
f3 <- base::unlist(base::strsplit(x = f2, split = " "))
f4 <- f3[base::which(f3 != "")]
# Checkig for external variables
if(any(!f4 %in% c("trend", "season"))){
if(!f4[which(!f4 %in% c("trend", "season"))] %in% base::names(xreg$train)){
stop(base::paste("Error on the tslm model formula: the ",
f4[which(!f4 %in% c("trend", "season"))],
"variables could not be found on the xreg input",
sep = " "))
}
arg$data <- xreg_train
arg$formula <- stats::as.formula(base::paste("train ~ ",
base::paste0(f4, collapse = "+"),
sep = ""))
md <- do.call(forecast::tslm, arg)
fc <- forecast::forecast(md,
h = grid_df$horizon[i],
newdata = xreg_test,
level = level)
} else {
arg$formula <- stats::as.formula(base::paste("train ~ ",
base::paste0(f4, collapse = "+"),
sep = ""))
md <- do.call(forecast::tslm, arg)
fc <- forecast::forecast(md,
h = grid_df$horizon[i],
level = level)
}
} else {
stop("Error on the 'train_method' argument: cannot run 'tslm' model without the function's arguments")
}
}
} else if(grid_df$type[i] == "forecast"){
if(!base::is.null(xreg)){
xreg_forecast <- xreg_train <- NULL
xreg_train <- xreg$train[grid_df$start[i]:grid_df$end[i],]
xreg_forecast <- xreg$forecast
}
if(grid_df$methods_selected[i] == "arima"){
if(!base::is.null(methods[[grid_df$model_id[i]]]$method_arg)){
arg <- methods[[grid_df$model_id[i]]]$method_arg
}
if("xreg" %in% base::names(arg) && !base::is.null(xreg)){
arg_xreg <- arg
arg_xreg$xreg <- xreg_train[,arg$xreg]
md <- do.call(stats::arima,c(base::list(ts.obj), arg_xreg))
fc <- forecast::forecast(md,
h = grid_df$horizon[i],
xreg = xreg_forecast[,arg$xreg],
level = level)
} else {
md <- do.call(stats::arima,c(base::list(ts.obj), arg))
fc <- forecast::forecast(md,
h = grid_df$horizon[i],
level = level)
}
}
if(grid_df$methods_selected[i] == "HoltWinters"){
if(!base::is.null(methods[[grid_df$model_id[i]]]$method_arg)){
arg <- methods[[grid_df$model_id[i]]]$method_arg
}
md <- do.call(stats::HoltWinters,c(base::list(ts.obj), arg))
fc <- forecast::forecast(md,
h = grid_df$horizon[i],
level = level)
}
if(grid_df$methods_selected[i] == "auto.arima"){
if(!base::is.null(methods[[grid_df$model_id[i]]]$method_arg)){
arg <- methods[[grid_df$model_id[i]]]$method_arg
}
if("xreg" %in% base::names(arg) && !base::is.null(xreg)){
arg_xreg <- arg
arg_xreg$xreg <- xreg_train[,arg$xreg]
md <- do.call(forecast::auto.arima,c(base::list(ts.obj), arg_xreg))
fc <- forecast::forecast(md,
h = grid_df$horizon[i],
xreg = xreg_test[,arg$xreg],
level = level)
} else {
md <- do.call(forecast::auto.arima,c(base::list(ts.obj), arg))
fc <- forecast::forecast(md,
h = grid_df$horizon[i],
level = level)
}
}
if(grid_df$methods_selected[i] == "ets"){
if(!base::is.null(methods[[grid_df$model_id[i]]]$method_arg)){
arg <- methods[[grid_df$model_id[i]]]$method_arg
}
md <- do.call(forecast::ets,c(base::list(ts.obj), arg))
fc <- forecast::forecast(md,
h = grid_df$horizon[i],
level = level)
}
if(grid_df$methods_selected[i] == "nnetar"){
if(!base::is.null(methods[[grid_df$model_id[i]]]$method_arg)){
arg <- methods[[grid_df$model_id[i]]]$method_arg
}
if("xreg" %in% base::names(arg) && !base::is.null(xreg)){
arg_xreg <- arg
arg_xreg$xreg <- xreg_train[,arg$xreg]
md <- do.call(forecast::nnetar,c(base::list(ts.obj), arg_xreg))
fc <- forecast::forecast(md,
h = grid_df$horizon[i],
xreg = xreg_test[,arg$xreg],
level = level)
} else {
md <- do.call(forecast::nnetar,c(base::list(ts.obj), arg))
fc <- forecast::forecast(md,
h = grid_df$horizon[i],
level = level)
}
}
if(grid_df$methods_selected[i] == "tslm"){
# tslm model must have formula argument
if(!base::is.null(methods[[grid_df$model_id[i]]]$method_arg)){
arg <- methods[[grid_df$model_id[i]]]$method_arg
# Validate the formula
if(!"formula" %in% base::names(arg)){
stop("Error on the 'train_method' argument: cannot run 'tslm' model without the 'formula' argument")
}
f <- base::Reduce(base::paste, base::deparse(arg$formula))
tilde <- base::regexpr("~", f) %>% base::as.numeric()
# If the tilde is missing return error
if(tilde == -1){
stop("Error on the 'train_method' argument: cannot run 'tslm' model without the 'formula' argument")
}
# If formula good check the castumize the xreg argument
# Parsing the formula
f1 <- base::substr(f, tilde + 1, base::nchar(f))
f2 <- base::gsub('[\\+]' , "", base::gsub('\"', "", f1))
f3 <- base::unlist(base::strsplit(x = f2, split = " "))
f4 <- f3[base::which(f3 != "")]
# Checkig for external variables
if(any(!f4 %in% c("trend", "season"))){
if(!f4[which(!f4 %in% c("trend", "season"))] %in% base::names(xreg$train)){
stop(base::paste("Error on the tslm model formula: the ",
f4[which(!f4 %in% c("trend", "season"))],
"variables could not be found on the xreg input",
sep = " "))
}
arg$data <- xreg_train
arg$formula <- stats::as.formula(base::paste("ts.obj ~ ",
base::paste0(f4, collapse = "+"),
sep = ""))
md <- do.call(forecast::tslm, arg)
fc <- forecast::forecast(md,
h = grid_df$horizon[i],
newdata = xreg_forecast,
level = level)
} else {
arg$formula <- stats::as.formula(base::paste("ts.obj ~ ",
base::paste0(f4, collapse = "+"),
sep = ""))
md <- do.call(forecast::tslm, arg)
fc <- forecast::forecast(md,
h = grid_df$horizon[i],
level = level)
}
} else {
stop("Error on the 'train_method' argument: cannot run 'tslm' model without the function's arguments")
}
}
}
output <- list(model = md,
forecast = fc,
parameters = base::list(
type = grid_df$type[i],
model_id = grid_df$model_id[i],
method = grid_df$methods_selected[i],
horizon = grid_df$horizon[i],
partition = grid_df$partition[i]))
return(output)
})
input_window <- grid_df %>% dplyr::select(start, end, horizon, partition) %>% dplyr::distinct()
t <- base::which(fc_output %>% purrr::map("parameters") %>% purrr::map_chr("type") == "train")
p1 <- fc_output[t] %>% purrr::map("parameters") %>% purrr::map_chr("partition") %>% base::unique()
training <- lapply(base::seq_along(p1), function(i1){
l <- NULL
l <- base::which(fc_output[t] %>% purrr::map("parameters") %>% purrr::map_chr("partition") == p1[i1])
md_id <- fc_output[l] %>% purrr::map("parameters") %>% purrr::map_chr("model_id")
ts.obj <- ts_partitions <- train <- test <- NULL
ts.obj <- stats::window(input,
start = stats::time(input)[input_window$start[which(input_window$partition == p1[i1])]],
end = stats::time(input)[input_window$end[which(input_window$partition == p1[i1])]])
ts_partitions <- TSstudio::ts_split(ts.obj = ts.obj, sample.out = input_window$horizon[which(input_window$partition == p1[i1])])
partition_output <- lapply(l, function(i2){
x <- fc_output[[i2]]
y <- base::list()
y[[x$parameters$model_id]] <- list(model = x$model, forecast = x$forecast, parameters = x$parameters)
}) %>% stats::setNames(md_id)
partition_output$train <- ts_partitions$train
partition_output$test <- ts_partitions$test
return(partition_output)
}) %>% stats::setNames(p1)
f <- base::which(fc_output %>% purrr::map("parameters") %>% purrr::map_chr("type") == "forecast")
p2 <- fc_output[f] %>% purrr::map("parameters") %>% purrr::map_chr("partition") %>% base::unique()
forecast <- lapply(base::seq_along(p2), function(i1){
l <- NULL
l <- base::which(fc_output[f] %>% purrr::map("parameters") %>% purrr::map_chr("partition") == p2[i1])
md_id <- fc_output[l] %>% purrr::map("parameters") %>% purrr::map_chr("model_id")
partition_output <- lapply(l, function(i2){
x <- fc_output[[i2]]
y <- base::list()
y[[x$parameters$model_id]] <- list(model = x$model, forecast = x$forecast, parameters = x$parameters)
}) %>% stats::setNames(md_id)
ts.obj <- NULL
ts.obj <- stats::window(input,
start = stats::time(input)[input_window$start[which(input_window$partition == p2[i1])]],
end = stats::time(input)[input_window$end[which(input_window$partition == p2[i1])]])
partition_output$train <- ts.obj
return(partition_output)
}) %>% stats::setNames(p2)
error_summary <- lapply(models_df$model_id, function(m){
f <- training[p1] %>% purrr::map(m) %>% purrr::map("forecast") %>% purrr::map("mean")
p <- f %>% base::names()
a <- training[p1] %>% purrr::map("test")
u <- training[p1] %>% purrr::map(m) %>% purrr::map("forecast") %>% purrr::map("upper")
l <- training[p1] %>% purrr::map(m) %>% purrr::map("forecast") %>% purrr::map("lower")
levels <- training[p1] %>% purrr::map(m) %>% purrr::map("forecast") %>% purrr::map("level")
error_df <- lapply(base::seq_along(p),function(n){
df <- coverage_df <- NULL
if(base::is.null(base::colnames(u[[p[n]]]))){
if(base::is.null(base::dim(u[[p[n]]]))){
u[[p[n]]] <- u[[p[n]]] %>% as.matrix()
l[[p[n]]] <- l[[p[n]]] %>% as.matrix()
}
}
base::colnames(u[[p[n]]]) <- base::paste0(levels[[p[n]]], "%")
base::colnames(l[[p[n]]]) <- base::paste0(levels[[p[n]]], "%")
coverage_df <- lapply(base::colnames(u[[p[n]]]), function(i){
df <- base::data.frame(coverage = base::sum(ifelse(u[[p[n]]][, i] >= a[[p[n]]] & l[[p[n]]][, i] <= a[[p[n]]], 1, 0)) / base::length(u[[p[n]]][, i]))
return(df)
}) %>% dplyr::bind_rows() %>%
base::t() %>%
base::as.data.frame() %>%
stats::setNames(base::paste0("coverage_", base::colnames(u[[p[n]]])))
df <- base::cbind(base::data.frame(partition = n,
model_id = m,
mape = base::mean(base::abs(f[[p[n]]] - a[[p[n]]]) / a[[p[n]]]),
rmse = (base::mean((a[[p[n]]] - f[[p[n]]]) ^ 2)) ^ 0.5,
stringsAsFactors = FALSE),
coverage_df)
return(df)
}) %>% dplyr::bind_rows()
return(error_df)
}) %>% stats::setNames(models_df$model_id)
leaderboard <- error_summary %>% dplyr::bind_rows() %>%
dplyr::group_by(model_id) %>%
dplyr::summarise_all(~mean(.)) %>% dplyr::select(-partition) %>%
dplyr::left_join(models_df %>%
dplyr::select(model_id, model = methods_selected, notes),
by = "model_id") %>%
dplyr::select(model_id, model, notes, dplyr::everything())
base::names(leaderboard) <- c("model_id",
"model",
"notes",
base::paste0("avg_", base::names(leaderboard)[4:base::ncol(leaderboard)]))
if(error == "MAPE"){
leaderboard <- leaderboard %>% dplyr::arrange(avg_mape)
} else if(error == "RMSE"){
leaderboard <- leaderboard %>% dplyr::arrange(avg_rmse)
}
output <- base::list(train = training,
forecast = forecast$final_partition,
input = input,
error_summary = error_summary,
leaderboard = leaderboard,
parameters = list(methods = methods,
train_method = train_method,
horizon = horizon,
xreg = xreg,
error_metric = error,
level = level))
print(leaderboard)
class(output) <- "train_model"
return(output)
}
#' Build the \code{\link[TSstudio]{train_model}} Function's Components
#' @description Add, edit, or remove the components of the \code{\link[TSstudio]{train_model}} function
#' @export
#' @param model.obj The train_model skeleton, created by the create_model
#' function or edited by add_input, add_methods, remove_methods, add_train_method or add_horizon
#' @param input A univariate time series object (ts class)
#' @param methods A list, defines the models to use for training and forecasting the series.
#' The list must include a sub list with the model type, and the model's arguments (when applicable) and notes about the model.
#' The sub-list name will be used as the model ID. Possible models:
#'
#' \code{\link[stats]{arima}} - model from the stats package
#'
#' \code{\link[forecast]{auto.arima}} - model from the forecast package
#'
#' \code{\link[forecast]{ets}} - model from the forecast package
#'
#' \code{\link[stats]{HoltWinters}} - model from the stats package
#'
#' \code{\link[forecast]{nnetar}} - model from the forecast package
#'
#' \code{\link[forecast]{tslm}} - model from the forecast package (note that the 'tslm' model must have the formula argument in the 'method_arg' argument)
#'
#' @param train_method A list, defines the train approach, either using a single testing partition (sample out)
#' or use multiple testing partitions (backtesting). The list should include the training method argument, (please see 'details' for the structure of the argument)
#' @param method_ids A character, defines the IDs of the model methods to be remove with the remove_methods function
#' @param horizon An integer, defines the forecast horizon
#' @param xreg Optional, a list with two vectors (e.g., data.frame or matrix) of external regressors,
#' one vector corresponding to the input series and second to the forecast itself
#' (e.g., must have the same length as the input and forecast horizon, respectively)
#' @param error A character, defines the error metrics to be used to sort the models leaderboard. Possible metric - "MAPE" or "RMSE"
#' @param level An integer, set the confidence level of the prediction intervals
#' @examples
#'
#' ### Building train_model function by adding its different components
#' # Create a skeleton model
#' md <- create_model()
#'
#' class(md)
#'
#' # Add input
#' data(USgas)
#' md <- add_input(model.obj = md, input = USgas)
#'
#' # Add methods
#' methods <- list(ets1 = list(method = "ets",
#' method_arg = list(opt.crit = "lik"),
#' notes = "ETS model with opt.crit = lik"),
#' ets2 = list(method = "ets",
#' method_arg = list(opt.crit = "amse"),
#' notes = "ETS model with opt.crit = amse"),
#' arima1 = list(method = "arima",
#' method_arg = list(order = c(1,1,1),
#' seasonal = list(order = c(1,0,1))),
#' notes = "SARIMA(1,1,1)(1,0,1)"))
#'
#' md <- add_methods(model.obj = md, methods = methods)
#'
#' # Add additional methods
#' methods2 <- list(arima2 = list(method = "arima",
#' method_arg = list(order = c(2,1,2),
#' seasonal = list(order = c(1,1,1))),
#' notes = "SARIMA(2,1,2)(1,1,1)"),
#' hw = list(method = "HoltWinters",
#' method_arg = NULL,
#' notes = "HoltWinters Model"),
#' tslm = list(method = "tslm",
#' method_arg = list(formula = input ~ trend + season),
#' notes = "tslm model with trend and seasonal components"))
#'
#' md <- add_methods(model.obj = md, methods = methods2)
#'
#' # Remove methods
#' md <- remove_methods(model.obj = md, method_ids = c("ets2", "auto_arima"))
#'
#' # Add train method
#' md <- add_train_method(model.obj = md, train_method = list(partitions = 6,
#' sample.out = 12,
#' space = 3))
#'
#'
#' # Set the forecast horizon
#' md <- add_horizon(model.obj = md, horizon = 12)
#'
#' # Add the forecast prediction intervals confidence level
#' md <- add_level(model.obj = md, level = c(90, 95))
#'
#' ### Alternatively, pipe the function with the magrittr package
#'
#' library(magrittr)
#'
#' md <- create_model() %>%
#' add_input(input = USgas) %>%
#' add_methods(methods = methods) %>%
#' add_methods(methods = methods2) %>%
#' add_train_method(train_method = list(partitions = 6,
#' sample.out = 12,
#' space = 3)) %>%
#' add_horizon(horizon = 12) %>%
#' add_level(level = c(90, 95))
#'
#' # Run the model
#' fc <- md %>% build_model()
create_model <- function(){
model_base <-base::list(input = NULL,
methods = NULL,
train_method = NULL,
horizon = NULL)
class(model_base) <- "train_model"
return(model_base)
}
#' @export
#' @rdname create_model
#'
add_input <- function(model.obj, input){
`%>%` <- magrittr::`%>%`
# Error handling
# Checking the input object
if(!stats::is.ts(input)){
stop("The input argument is not a valid 'ts' object")
} else if(stats::is.mts(input)){
stop("Cannot use multiple time series object as an input")
}
# Checking the model.obj
if(base::class(model.obj) != "train_model"){
stop("The 'model.obj' is not valid 'train_model' object")
} else if("input" %in% base::names(model.obj) && base::is.null(model.obj$input)){
model.obj$input <- input
} else if("input" %in% base::names(model.obj) && !base::is.null(model.obj$input)){
q <- base::readline("The 'model.obj' already has input object, do you want to overwrite it? yes/no ") %>% base::tolower()
if(q == "y" || q == "yes"){
model.obj$input <- input
} else if( q == "n" || q == "no"){
warning("The 'input' was not added to the model object")
} else {
stop("Invalid input...")
}
}
return(model.obj)
}
#' @export
#' @rdname create_model
#'
add_methods <- function(model.obj, methods){
`%>%` <- magrittr::`%>%`
method_list <- models_df <- NULL
method_list <- list("arima", "auto.arima", "ets", "HoltWinters", "nnetar", "tslm")
# Error handling
# Checking the model.obj class
if(base::class(model.obj) != "train_model"){
stop("The 'model.obj' is not valid 'train_model' object")
}
# Validating the methods object
if(!base::is.list(methods)){
stop("Error on the 'methods' argument: the argument is not a list")
} else if(base::is.null(base::names(methods))){
stop("Error on the 'methods' argument: could not find the models IDs")
} else if(base::any("NULL" %in% base::as.character(methods %>% purrr::map(~.x[["method"]])))){
stop("Error on the 'methods' argument: at least one of the methods is missing the 'method' argument")
}
if(!base::all(base::as.character(methods %>% purrr::map_chr(~.x[["method"]])) %in% method_list)){
stop("Error on the 'methods' argument: at least one of the models methods is not valid")
}
# Adding the metods to the model.obj object
if(("methods" %in% base::names(model.obj) && base::is.null(model.obj$methods))|| !"methods" %in% base::names(model.obj)){
model.obj$methods <- methods
# In case the object has existing methods
} else if("methods" %in% base::names(model.obj) && !base::is.null(model.obj$methods)) {
# Validating that object is not exist already
for(i in base::names(methods)){
if(i %in% base::names(model.obj$methods)){
q <- base::readline(base::paste("The", i, "method already exists in the model object, do you wish to overwrite it? yes/no ", sep = " ")) %>% base::tolower()
if(q == "y" || q == "yes"){
model.obj$methods[[i]] <- methods[[i]]
} else{
warning(base::paste("Method", i, "were not added", sep = " "))
}
} else {
model.obj$methods[[i]] <- methods[[i]]
}
}
}
return(model.obj)
}
#' @export
#' @rdname create_model
#'
remove_methods <- function(model.obj, method_ids){
`%>%` <- magrittr::`%>%`
# Error handling
# Checking the model.obj class
if(base::class(model.obj) != "train_model"){
stop("The 'model.obj' is not valid 'train_model' object")
}
# Checking the method_ids argument
if(!is.character(method_ids)){
stop("The 'method_ids' argument is not valid input")
}
if(!"methods" %in% base::names(model.obj) || base::is.null(model.obj$methods)){
stop("The input model object does not have any available method")
}
for(i in method_ids){
if(i %in% base::names(model.obj$methods)){
model.obj$methods[[i]] <- NULL
} else {
warning(base::paste("The", i, "does not exist on the model object"))
}
}
return(model.obj)
}
#' @export
#' @rdname create_model
#'
add_train_method <- function(model.obj, train_method){
`%>%` <- magrittr::`%>%`
q <- NULL
# Error handling
# Checking the model.obj class
if(base::class(model.obj) != "train_model"){
stop("The 'model.obj' is not valid 'train_model' object")
}
# Checking the train argument
if(!base::is.list(train_method)){
stop("Error on the 'train_method' argument: the argument is not a list")
} else if(!"partitions" %in% base::names(train_method)){
stop("Error on the 'train_method' argument: the 'partition' argument is missing")
} else if(!"space" %in% base::names(train_method)){
stop("Error on the 'train_method' argument: the 'space' argument is missing")
} else if(!"sample.out" %in% base::names(train_method)){
stop("Error on the 'train_method' argument: the 'sample.out' argument is missing")
} else if(!base::is.numeric(train_method$sample.out) ||
train_method$sample.out < 1 ||
train_method$sample.out %% 1 != 0){
stop("Error on the 'train_method' argument: the 'sample.out' argument is not valide, please use a positive integer")
} else if(!base::is.numeric(train_method$partitions) ||
train_method$partitions < 1 ||
train_method$partitions %% 1 != 0){
stop("Error on the 'train_method' argument: the 'partitions' argument is not valide, please use a positive integer")
} else if(!base::is.numeric(train_method$space) ||
train_method$space < 1 ||
train_method$space %% 1 != 0){
stop("Error on the 'train_method' argument: the 'space' argument is not valide, please use a positive integer")
}
# Adding the train object
if(!"train_method" %in% base::names(model.obj) || base::is.null(model.obj$train_method)){
model.obj$train_method <- train_method
} else if(!base::is.null(model.obj$train_method)){
q <- base::readline(base::paste("The model object already has train method, do you wish to overwrite it? (yes) ", sep = " ")) %>% base::tolower()
if(q == "y" || q == "yes" || q == ""){
model.obj$train_method <- train_method
} else{
warning("Did not update the train method")
}
}
return(model.obj)
}
#' @export
#' @rdname create_model
#'
add_horizon <- function(model.obj, horizon){
`%>%` <- magrittr::`%>%`
# Error handling
# Checking the model.obj class
if(base::class(model.obj) != "train_model"){
stop("The 'model.obj' is not valid 'train_model' object")
}
if(!"horizon" %in% base::names(model.obj) || base::is.null(model.obj$horizon)){
model.obj$horizon <- horizon
} else if(!base::is.null(model.obj$horizon)){
q <- base::readline(base::paste("The model object already has horizon, do you wish to overwrite it? yes/no ", sep = " ")) %>% base::tolower()
if(q == "y" || q == "yes"){
model.obj$horizon <- horizon
} else{
warning("No change had made on the model 'horizon' argument")
}
}
return(model.obj)
}
#' @export
#' @rdname create_model
#'
build_model <- function(model.obj){
`%>%` <- magrittr::`%>%`
# Error handling
# Checking the model.obj class
if(base::class(model.obj) != "train_model"){
stop("The 'model.obj' is not valid 'train_model' object")
}
if(!"horizon" %in% base::names(model.obj) || base::is.null(model.obj$horizon)){
stop("Cannot build a model, the 'horizon' argument is missing")
}
if(!"methods" %in% base::names(model.obj) || base::is.null(model.obj$methods)){
stop("Cannot build a model, the 'methods' argument is missing")
}
if(!"train_method" %in% base::names(model.obj) || base::is.null(model.obj$train_method)){
stop("Cannot build a model, the 'train_method' argument is missing")
}
if(!"input" %in% base::names(model.obj) || base::is.null(model.obj$input)){
stop("Cannot build a model, the 'input' argument is missing")
}
if(!"error" %in% base::names(model.obj)){
model.obj$error <- "MAPE"
}
if(!"level" %in% base::names(model.obj)){
model.obj$level <- c(80, 95)
}
if(!"xreg" %in% base::names(model.obj)){
model.obj$xreg <- NULL
}
output <- NULL
output <- TSstudio::train_model(input = model.obj$input,
methods = model.obj$methods,
train_method = model.obj$train_method,
horizon = model.obj$horizon,
xreg = model.obj$xreg,
error = model.obj$error,
level = model.obj$level)
return(output)
}
#' @export
#' @rdname create_model
#'
set_error <- function(model.obj, error){
`%>%` <- magrittr::`%>%`
# Error handling
# Checking the model.obj class
if(base::class(model.obj) != "train_model"){
stop("The 'model.obj' is not valid 'train_model' object")
}
# Check the error argument
if(base::is.null(error) || !base::is.character(error) || base::length(error) !=1){
stop("Error on the 'error' argument: the input is not valid, please use either 'RMSE' or 'MAPE'")
} else if( error != "MAPE" && error != "RMSE"){
stop("Error on the 'error' argument: the input is not valid, please use either 'RMSE' or 'MAPE'")
}
if(!"error" %in% base::names(model.obj) ||
("error" %in% base::names(model.obj) && base::is.null(model.obj$error))){
model.obj$error <- error
} else if("error" %in% base::names(model.obj) && !base::is.null(model.obj$error)){
q <- base::readline(base::paste("The model object already has 'error' argument, do you wish to overwrite it? (yes) ", sep = " ")) %>% base::tolower()
if(q == "y" || q == "yes" || q == ""){
model.obj$error <- error
} else{
warning("No change had made on the model 'error' argument")
}
}
return(model.obj)
}
#' @export
#' @rdname create_model
#'
add_xreg <- function(model.obj, xreg){
`%>%` <- magrittr::`%>%`
q <- NULL
# Error handling
# Checking the model.obj class
if(base::class(model.obj) != "train_model"){
stop("The 'model.obj' is not valid 'train_model' object")
}
# Checking the xreg argument
if(!base::is.null(xreg)){
if(!all(c("train", "forecast") %in% base::names(xreg))){
stop("Error on the 'xreg' argument: the 'xreg' list is not valid, please make sure setting the correspinding regressor",
" inputs for the 'input' argument (train) and for the forecast horizon (forecast)")
}
}
if(!"xreg" %in% base::names(model.obj) ||
("xreg" %in% base::names(model.obj) && base::is.null(model.obj$xreg))){
model.obj$xreg <- xreg
} else if("xreg" %in% base::names(model.obj) && !base::is.null(model.obj$xreg)){
q <- base::readline(base::paste("The model object already has 'xreg' argument, do you wish to overwrite it? (yes) ", sep = " ")) %>% base::tolower()
if(q == "y" || q == "yes" || q == ""){
model.obj$xreg <- xreg
} else{
warning("No change had made on the model 'xreg' argument")
}
}
return(model.obj)
}
#' @export
#' @rdname create_model
#'
add_level <- function(model.obj, level){
`%>%` <- magrittr::`%>%`
q <- NULL
# Error handling
# Checking the model.obj class
if(base::class(model.obj) != "train_model"){
stop("The 'model.obj' is not valid 'train_model' object")
}
### Error Handling
# Check the level argument
if(base::all(!is.numeric(level)) ||
base::any(level %% 1 != 0) ||
base::any(level <= 0 | level > 100)){
stop("Error on the 'level' argument: the argument is out of range (0,100]")
}
if(!"level" %in% base::names(model.obj) || base::is.null(model.obj$level)){
model.obj$level <- level
} else if(!base::is.null(model.obj$level)){
q <- base::readline(base::paste("The model object already has 'level', do you wish to overwrite it? (yes) ", sep = " ")) %>% base::tolower()
if(q == "y" || q == "yes" || q == ""){
model.obj$level <- level
} else{
warning("No change had made on the model 'level' argument")
}
}
return(model.obj)
}
#' Plot the Models Performance on the Testing Partitions
#' @export
#' @details The plot_model provides a visualization of the models performance on the testing paritions for the train_model function output
#' @param model.obj A train_model object
#' @param model_ids A character, defines the trained models to plot, if set to NULL (default), will plot all the models
#' @return Animation of models forecast on the testing partitions compared to the actuals
#' @examples
#' # Defining the models and their arguments
#' methods <- list(ets1 = list(method = "ets",
#' method_arg = list(opt.crit = "lik"),
#' notes = "ETS model with opt.crit = lik"),
#' ets2 = list(method = "ets",
#' method_arg = list(opt.crit = "amse"),
#' notes = "ETS model with opt.crit = amse"),
#' arima1 = list(method = "arima",
#' method_arg = list(order = c(2,1,0)),
#' notes = "ARIMA(2,1,0)"),
#' arima2 = list(method = "arima",
#' method_arg = list(order = c(2,1,2),
#' seasonal = list(order = c(1,1,1))),
#' notes = "SARIMA(2,1,2)(1,1,1)"),
#' hw = list(method = "HoltWinters",
#' method_arg = NULL,
#' notes = "HoltWinters Model"),
#' tslm = list(method = "tslm",
#' method_arg = list(formula = input ~ trend + season),
#' notes = "tslm model with trend and seasonal components"))
#' # Training the models with backtesting
#' md <- train_model(input = USgas,
#' methods = methods,
#' train_method = list(partitions = 6,
#' sample.out = 12,
#' space = 3),
#' horizon = 12,
#' error = "MAPE")
#' # Plot the models performance on the testing partitions
#' plot_model(model.obj = md)
#'
#' # Plot only the ETS models
#' plot_model(model.obj = md , model_ids = c("ets1", "ets2"))
#'
plot_model <- function(model.obj, model_ids = NULL){
`%>%` <- magrittr::`%>%`
m <- p <- ac_df <- fc_df <- df <- output <- obj_name <- NULL
obj_name <- obj.name <- base::deparse(base::substitute(model.obj))
# Error handling
# Checking the model.obj class
if(base::class(model.obj) != "train_model"){
stop("The 'model.obj' is not valid 'train_model' object")
}
m <- model.obj$parameters$methods %>% base::names()
if(base::is.null(m)){
stop("Error on the 'model.obj' argument: cannot find any method in the 'model.obj' argument")
}
if(!base::is.null(model_ids)){
if(!base::all(model_ids %in% m)){
stop("Error on the 'model_ids' argument: cannot find some (or all) of the model ids in the 'model.obj' object")
}
m <- model_ids
}
p <- model.obj$parameters$train_method$partitions
ac_df <- base::data.frame(y = rep(base::as.numeric(model.obj$input), p),
time = base::rep(base::as.numeric(stats::time(model.obj$input)), p),
partition = base::rep(1:p, each = base::length(model.obj$input)),
type = "actual")
fc_df <- lapply(m, function(i){
df1 <- df2 <- NULL
df1 <- model.obj$train %>%
purrr::map(~.x[[i]]) %>%
purrr::map(~.x[["forecast"]]) %>%
purrr::map(~.x[["mean"]]) %>%
dplyr::bind_cols()
for(c in 1:base::ncol(df1)){
temp <- df3 <- NULL
temp <- df1[, c] %>%
as.data.frame() %>%
stats::setNames("y")
df3 <- base::data.frame(y = base::as.numeric(temp$y),
time = base::as.numeric(stats::time(temp$y)),
partition = c,
type = i,
stringsAsFactors = FALSE)
df2 <- dplyr::bind_rows(df2, df3)
}
df1 <- model.obj$train %>%
purrr::map(~.x[[i]]) %>%
purrr::map(~.x[["forecast"]]) %>%
purrr::map(~.x[["mean"]]) %>%
dplyr::bind_cols()
return(df2)
}) %>% dplyr::bind_rows()
df <- rbind(fc_df, ac_df)
output <- plotly::plot_ly(data = df,
x = ~ time,
y = ~ y,
split = ~ type,
frame = ~ partition,
type = 'scatter',
mode = 'lines',
line = list(simplyfy = F))%>%
plotly::layout(title = base::paste(obj_name, "Models Performance by Testing Partitions", sep = " "),
margin = 50,
title = "",
xaxis = list(
title = "Date",
zeroline = F),
yaxis = list(
title = "",
zeroline = F
),
font = list(color = "black"),
plot_bgcolor = "white",
paper_bgcolor = "white"
) %>%
plotly::animation_opts(
frame = 500,
transition = 0,
redraw = F
) %>%
plotly::animation_slider(
hide = F
) %>%
plotly::animation_button(
x = 1, xanchor = "right", y = 0, yanchor = "bottom"
)
return(output)
}
#' Plot the Models Error Metric on the Testing Partitions
#' @export
#' @details The plot_model provides a visualization of the models performance on the testing paritions for the train_model function output
#' @param model.obj A train_model object
#' @param error A character, defines the type of error metrics to plot, possible metric - "MAPE" or "RMSE"
#' @param palette A character, defines the color type to used on the plot, use row.names(RColorBrewer::brewer.pal.info) to view possible color palletes
#' @return A plot with a summery of the models error rate by testing partition
#' @examples
#' # Defining the models and their arguments
#' methods <- list(ets1 = list(method = "ets",
#' method_arg = list(opt.crit = "lik"),
#' notes = "ETS model with opt.crit = lik"),
#' ets2 = list(method = "ets",
#' method_arg = list(opt.crit = "amse"),
#' notes = "ETS model with opt.crit = amse"),
#' arima1 = list(method = "arima",
#' method_arg = list(order = c(2,1,0)),
#' notes = "ARIMA(2,1,0)"),
#' arima2 = list(method = "arima",
#' method_arg = list(order = c(2,1,2),
#' seasonal = list(order = c(1,1,1))),
#' notes = "SARIMA(2,1,2)(1,1,1)"),
#' hw = list(method = "HoltWinters",
#' method_arg = NULL,
#' notes = "HoltWinters Model"),
#' tslm = list(method = "tslm",
#' method_arg = list(formula = input ~ trend + season),
#' notes = "tslm model with trend and seasonal components"))
#' # Training the models with backtesting
#' md <- train_model(input = USgas,
#' methods = methods,
#' train_method = list(partitions = 6,
#' sample.out = 12,
#' space = 3),
#' horizon = 12,
#' error = "MAPE")
#'
#' # Plot the models performance on the testing partitions
#' plot_error(model.obj = md)
#'
plot_error <- function(model.obj, error = "MAPE", palette = "Set1"){
`%>%` <- magrittr::`%>%`
m<- n_colors <- colors_list <- p1 <- p2 <- output <- error_df <- model_id <- NULL
hex_to_rgb <- function(hex){
rgb <- base::paste0(as.numeric(grDevices::col2rgb(hex) %>% base::t()), collapse = ",")
return(rgb)
}
# Error handling
# Checking the model.obj class
if(base::class(model.obj) != "train_model"){
stop("The 'model.obj' is not valid 'train_model' object")
}
# Checking the error argument
if(error != "MAPE" && error != "RMSE"){
stop("Error on the 'error' argument: in valid error metric, can use either 'MAPE' or 'RMSE'")
}
error_df <- model.obj$error_summary %>% dplyr::bind_rows()
m <- unique(error_df$model_id)
palette_list <- base::row.names(RColorBrewer::brewer.pal.info)
if(base::length(palette) != 1 || !palette %in% palette_list){
stop("Error on the 'palette' argument: cannot find the color palette on the RColorBrewer palettes list, ",
"use row.names(RColorBrewer::brewer.pal.info) to view possible color palettes")
}
n_colors <- RColorBrewer::brewer.pal.info$maxcolors[row.names(RColorBrewer::brewer.pal.info) == palette]
colors_list <- grDevices::colorRampPalette(RColorBrewer::brewer.pal(n_colors, palette))(base::length(m))
p1 <- plotly::plot_ly()
p2 <- plotly::plot_ly()
if(error == "MAPE"){
for(i in base::seq_along(m)){
df <- NULL
df <- error_df %>% dplyr::filter(model_id == m[i])
p1 <- p1 %>% plotly::add_lines(x = df$partition, y = df$mape * 100, name = m[i],
showlegend = TRUE,
legendgroup = m[i],
line = list(color = colors_list[i]))
p2 <- p2 %>% plotly::add_trace(y = df$mape * 100, name = m[i],
type = "box",
fillcolor = base::paste("rgba(", hex_to_rgb(colors_list[i]), ", 0.5)", sep = ""),
line = list(color = colors_list[i]),
marker = list(color = colors_list[i]),
boxpoints = "all",
jitter = 0.3,
pointpos = -1.8,
showlegend = FALSE,
legendgroup = m[i])
}
p1 <- p1 %>% plotly::layout(yaxis = list(title = "MAPE", ticksuffix = '%'),
xaxis = list(title = "Partition"))
p2 <- p2 %>% plotly::layout(yaxis = list(title = "MAPE", ticksuffix = '%'),
xaxis = list(title = "Partition"))
output <- plotly::subplot(p1, p2, nrows = 1, shareY = T) %>%
plotly::layout(title = "Model Performance by Testing Partition - MAPE")
} else if(error == "RMSE"){
for(i in base::seq_along(m)){
df <- NULL
df <- error_df %>% dplyr::filter(model_id == m[i])
p1 <- p1 %>% plotly::add_lines(x = df$partition, y = df$rmse, name = m[i],
showlegend = TRUE,
legendgroup = m[i],
line = list(color = colors_list[i]))
p2 <- p2 %>% plotly::add_trace(y = df$rmse, name = m[i],
type = "box",
fillcolor = base::paste("rgba(", hex_to_rgb(colors_list[i]), ", 0.5)", sep = ""),
line = list(color = colors_list[i]),
marker = list(color = colors_list[i]),
boxpoints = "all",
jitter = 0.3,
pointpos = -1.8,
showlegend = FALSE,
legendgroup = m[i])
}
p1 <- p1 %>% plotly::layout(yaxis = list(title = "RMSE"),
xaxis = list(title = "Partition"))
p2 <- p2 %>% plotly::layout(yaxis = list(title = "RMSE"),
xaxis = list(title = "Partition"))
output <- plotly::subplot(p1, p2, nrows = 1, shareY = T) %>%
plotly::layout(title = "Model Performance by Testing Partition - RMSE")
}
return(output)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/workspaces_service.R
\name{workspaces}
\alias{workspaces}
\title{Amazon WorkSpaces}
\usage{
workspaces(
config = list(),
credentials = list(),
endpoint = NULL,
region = NULL
)
}
\arguments{
\item{config}{Optional configuration of credentials, endpoint, and/or region.
\itemize{
\item{\strong{credentials}:} {\itemize{
\item{\strong{creds}:} {\itemize{
\item{\strong{access_key_id}:} {AWS access key ID}
\item{\strong{secret_access_key}:} {AWS secret access key}
\item{\strong{session_token}:} {AWS temporary session token}
}}
\item{\strong{profile}:} {The name of a profile to use. If not given, then the default profile is used.}
\item{\strong{anonymous}:} {Set anonymous credentials.}
\item{\strong{endpoint}:} {The complete URL to use for the constructed client.}
\item{\strong{region}:} {The AWS Region used in instantiating the client.}
}}
\item{\strong{close_connection}:} {Immediately close all HTTP connections.}
\item{\strong{timeout}:} {The time in seconds till a timeout exception is thrown when attempting to make a connection. The default is 60 seconds.}
\item{\strong{s3_force_path_style}:} {Set this to \code{true} to force the request to use path-style addressing, i.e. \verb{http://s3.amazonaws.com/BUCKET/KEY}.}
\item{\strong{sts_regional_endpoint}:} {Set sts regional endpoint resolver to regional or legacy \url{https://docs.aws.amazon.com/sdkref/latest/guide/feature-sts-regionalized-endpoints.html}}
}}
\item{credentials}{Optional credentials shorthand for the config parameter
\itemize{
\item{\strong{creds}:} {\itemize{
\item{\strong{access_key_id}:} {AWS access key ID}
\item{\strong{secret_access_key}:} {AWS secret access key}
\item{\strong{session_token}:} {AWS temporary session token}
}}
\item{\strong{profile}:} {The name of a profile to use. If not given, then the default profile is used.}
\item{\strong{anonymous}:} {Set anonymous credentials.}
}}
\item{endpoint}{Optional shorthand for complete URL to use for the constructed client.}
\item{region}{Optional shorthand for AWS Region used in instantiating the client.}
}
\value{
A client for the service. You can call the service's operations using
syntax like \code{svc$operation(...)}, where \code{svc} is the name you've assigned
to the client. The available operations are listed in the
Operations section.
}
\description{
Amazon WorkSpaces Service
Amazon WorkSpaces enables you to provision virtual, cloud-based
Microsoft Windows or Amazon Linux desktops for your users, known as
\emph{WorkSpaces}. WorkSpaces eliminates the need to procure and deploy
hardware or install complex software. You can quickly add or remove
users as your needs change. Users can access their virtual desktops from
multiple devices or web browsers.
This API Reference provides detailed information about the actions, data
types, parameters, and errors of the WorkSpaces service. For more
information about the supported Amazon Web Services Regions, endpoints,
and service quotas of the Amazon WorkSpaces service, see \href{https://docs.aws.amazon.com/general/latest/gr/wsp.html}{WorkSpaces endpoints and quotas} in the
\emph{Amazon Web Services General Reference}.
You can also manage your WorkSpaces resources using the WorkSpaces
console, Command Line Interface (CLI), and SDKs. For more information
about administering WorkSpaces, see the \href{https://docs.aws.amazon.com/workspaces/latest/adminguide/}{Amazon WorkSpaces Administration Guide}. For
more information about using the Amazon WorkSpaces client application or
web browser to access provisioned WorkSpaces, see the \href{https://docs.aws.amazon.com/workspaces/latest/userguide/}{Amazon WorkSpaces User Guide}.
For more information about using the CLI to manage your WorkSpaces
resources, see the \href{https://docs.aws.amazon.com/cli/latest/reference/workspaces/index.html}{WorkSpaces section of the CLI Reference}.
}
\section{Service syntax}{
\if{html}{\out{<div class="sourceCode">}}\preformatted{svc <- workspaces(
config = list(
credentials = list(
creds = list(
access_key_id = "string",
secret_access_key = "string",
session_token = "string"
),
profile = "string",
anonymous = "logical"
),
endpoint = "string",
region = "string",
close_connection = "logical",
timeout = "numeric",
s3_force_path_style = "logical",
sts_regional_endpoint = "string"
),
credentials = list(
creds = list(
access_key_id = "string",
secret_access_key = "string",
session_token = "string"
),
profile = "string",
anonymous = "logical"
),
endpoint = "string",
region = "string"
)
}\if{html}{\out{</div>}}
}
\section{Operations}{
\tabular{ll}{
\link[=workspaces_associate_connection_alias]{associate_connection_alias} \tab Associates the specified connection alias with the specified directory to enable cross-Region redirection\cr
\link[=workspaces_associate_ip_groups]{associate_ip_groups} \tab Associates the specified IP access control group with the specified directory\cr
\link[=workspaces_authorize_ip_rules]{authorize_ip_rules} \tab Adds one or more rules to the specified IP access control group\cr
\link[=workspaces_copy_workspace_image]{copy_workspace_image} \tab Copies the specified image from the specified Region to the current Region\cr
\link[=workspaces_create_connect_client_add_in]{create_connect_client_add_in} \tab Creates a client-add-in for Amazon Connect within a directory\cr
\link[=workspaces_create_connection_alias]{create_connection_alias} \tab Creates the specified connection alias for use with cross-Region redirection\cr
\link[=workspaces_create_ip_group]{create_ip_group} \tab Creates an IP access control group\cr
\link[=workspaces_create_standby_workspaces]{create_standby_workspaces} \tab Creates a standby WorkSpace in a secondary Region\cr
\link[=workspaces_create_tags]{create_tags} \tab Creates the specified tags for the specified WorkSpaces resource\cr
\link[=workspaces_create_updated_workspace_image]{create_updated_workspace_image} \tab Creates a new updated WorkSpace image based on the specified source image\cr
\link[=workspaces_create_workspace_bundle]{create_workspace_bundle} \tab Creates the specified WorkSpace bundle\cr
\link[=workspaces_create_workspace_image]{create_workspace_image} \tab Creates a new WorkSpace image from an existing WorkSpace\cr
\link[=workspaces_create_workspaces]{create_workspaces} \tab Creates one or more WorkSpaces\cr
\link[=workspaces_delete_client_branding]{delete_client_branding} \tab Deletes customized client branding\cr
\link[=workspaces_delete_connect_client_add_in]{delete_connect_client_add_in} \tab Deletes a client-add-in for Amazon Connect that is configured within a directory\cr
\link[=workspaces_delete_connection_alias]{delete_connection_alias} \tab Deletes the specified connection alias\cr
\link[=workspaces_delete_ip_group]{delete_ip_group} \tab Deletes the specified IP access control group\cr
\link[=workspaces_delete_tags]{delete_tags} \tab Deletes the specified tags from the specified WorkSpaces resource\cr
\link[=workspaces_delete_workspace_bundle]{delete_workspace_bundle} \tab Deletes the specified WorkSpace bundle\cr
\link[=workspaces_delete_workspace_image]{delete_workspace_image} \tab Deletes the specified image from your account\cr
\link[=workspaces_deregister_workspace_directory]{deregister_workspace_directory} \tab Deregisters the specified directory\cr
\link[=workspaces_describe_account]{describe_account} \tab Retrieves a list that describes the configuration of Bring Your Own License (BYOL) for the specified account\cr
\link[=workspaces_describe_account_modifications]{describe_account_modifications} \tab Retrieves a list that describes modifications to the configuration of Bring Your Own License (BYOL) for the specified account\cr
\link[=workspaces_describe_client_branding]{describe_client_branding} \tab Describes the specified client branding\cr
\link[=workspaces_describe_client_properties]{describe_client_properties} \tab Retrieves a list that describes one or more specified Amazon WorkSpaces clients\cr
\link[=workspaces_describe_connect_client_add_ins]{describe_connect_client_add_ins} \tab Retrieves a list of Amazon Connect client add-ins that have been created\cr
\link[=workspaces_describe_connection_aliases]{describe_connection_aliases} \tab Retrieves a list that describes the connection aliases used for cross-Region redirection\cr
\link[=workspaces_describe_connection_alias_permissions]{describe_connection_alias_permissions} \tab Describes the permissions that the owner of a connection alias has granted to another Amazon Web Services account for the specified connection alias\cr
\link[=workspaces_describe_ip_groups]{describe_ip_groups} \tab Describes one or more of your IP access control groups\cr
\link[=workspaces_describe_tags]{describe_tags} \tab Describes the specified tags for the specified WorkSpaces resource\cr
\link[=workspaces_describe_workspace_bundles]{describe_workspace_bundles} \tab Retrieves a list that describes the available WorkSpace bundles\cr
\link[=workspaces_describe_workspace_directories]{describe_workspace_directories} \tab Describes the available directories that are registered with Amazon WorkSpaces\cr
\link[=workspaces_describe_workspace_image_permissions]{describe_workspace_image_permissions} \tab Describes the permissions that the owner of an image has granted to other Amazon Web Services accounts for an image\cr
\link[=workspaces_describe_workspace_images]{describe_workspace_images} \tab Retrieves a list that describes one or more specified images, if the image identifiers are provided\cr
\link[=workspaces_describe_workspaces]{describe_workspaces} \tab Describes the specified WorkSpaces\cr
\link[=workspaces_describe_workspaces_connection_status]{describe_workspaces_connection_status} \tab Describes the connection status of the specified WorkSpaces\cr
\link[=workspaces_describe_workspace_snapshots]{describe_workspace_snapshots} \tab Describes the snapshots for the specified WorkSpace\cr
\link[=workspaces_disassociate_connection_alias]{disassociate_connection_alias} \tab Disassociates a connection alias from a directory\cr
\link[=workspaces_disassociate_ip_groups]{disassociate_ip_groups} \tab Disassociates the specified IP access control group from the specified directory\cr
\link[=workspaces_import_client_branding]{import_client_branding} \tab Imports client branding\cr
\link[=workspaces_import_workspace_image]{import_workspace_image} \tab Imports the specified Windows 10 or 11 Bring Your Own License (BYOL) image into Amazon WorkSpaces\cr
\link[=workspaces_list_available_management_cidr_ranges]{list_available_management_cidr_ranges} \tab Retrieves a list of IP address ranges, specified as IPv4 CIDR blocks, that you can use for the network management interface when you enable Bring Your Own License (BYOL)\cr
\link[=workspaces_migrate_workspace]{migrate_workspace} \tab Migrates a WorkSpace from one operating system or bundle type to another, while retaining the data on the user volume\cr
\link[=workspaces_modify_account]{modify_account} \tab Modifies the configuration of Bring Your Own License (BYOL) for the specified account\cr
\link[=workspaces_modify_certificate_based_auth_properties]{modify_certificate_based_auth_properties} \tab Modifies the properties of the certificate-based authentication you want to use with your WorkSpaces\cr
\link[=workspaces_modify_client_properties]{modify_client_properties} \tab Modifies the properties of the specified Amazon WorkSpaces clients\cr
\link[=workspaces_modify_saml_properties]{modify_saml_properties} \tab Modifies multiple properties related to SAML 2\cr
\link[=workspaces_modify_selfservice_permissions]{modify_selfservice_permissions} \tab Modifies the self-service WorkSpace management capabilities for your users\cr
\link[=workspaces_modify_workspace_access_properties]{modify_workspace_access_properties} \tab Specifies which devices and operating systems users can use to access their WorkSpaces\cr
\link[=workspaces_modify_workspace_creation_properties]{modify_workspace_creation_properties} \tab Modify the default properties used to create WorkSpaces\cr
\link[=workspaces_modify_workspace_properties]{modify_workspace_properties} \tab Modifies the specified WorkSpace properties\cr
\link[=workspaces_modify_workspace_state]{modify_workspace_state} \tab Sets the state of the specified WorkSpace\cr
\link[=workspaces_reboot_workspaces]{reboot_workspaces} \tab Reboots the specified WorkSpaces\cr
\link[=workspaces_rebuild_workspaces]{rebuild_workspaces} \tab Rebuilds the specified WorkSpace\cr
\link[=workspaces_register_workspace_directory]{register_workspace_directory} \tab Registers the specified directory\cr
\link[=workspaces_restore_workspace]{restore_workspace} \tab Restores the specified WorkSpace to its last known healthy state\cr
\link[=workspaces_revoke_ip_rules]{revoke_ip_rules} \tab Removes one or more rules from the specified IP access control group\cr
\link[=workspaces_start_workspaces]{start_workspaces} \tab Starts the specified WorkSpaces\cr
\link[=workspaces_stop_workspaces]{stop_workspaces} \tab Stops the specified WorkSpaces\cr
\link[=workspaces_terminate_workspaces]{terminate_workspaces} \tab Terminates the specified WorkSpaces\cr
\link[=workspaces_update_connect_client_add_in]{update_connect_client_add_in} \tab Updates a Amazon Connect client add-in\cr
\link[=workspaces_update_connection_alias_permission]{update_connection_alias_permission} \tab Shares or unshares a connection alias with one account by specifying whether that account has permission to associate the connection alias with a directory\cr
\link[=workspaces_update_rules_of_ip_group]{update_rules_of_ip_group} \tab Replaces the current rules of the specified IP access control group with the specified rules\cr
\link[=workspaces_update_workspace_bundle]{update_workspace_bundle} \tab Updates a WorkSpace bundle with a new image\cr
\link[=workspaces_update_workspace_image_permission]{update_workspace_image_permission} \tab Shares or unshares an image with one account in the same Amazon Web Services Region by specifying whether that account has permission to copy the image
}
}
\examples{
\dontrun{
svc <- workspaces()
svc$associate_connection_alias(
Foo = 123
)
}
}
|
/cran/paws.end.user.computing/man/workspaces.Rd
|
permissive
|
paws-r/paws
|
R
| false
| true
| 14,398
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/workspaces_service.R
\name{workspaces}
\alias{workspaces}
\title{Amazon WorkSpaces}
\usage{
workspaces(
config = list(),
credentials = list(),
endpoint = NULL,
region = NULL
)
}
\arguments{
\item{config}{Optional configuration of credentials, endpoint, and/or region.
\itemize{
\item{\strong{credentials}:} {\itemize{
\item{\strong{creds}:} {\itemize{
\item{\strong{access_key_id}:} {AWS access key ID}
\item{\strong{secret_access_key}:} {AWS secret access key}
\item{\strong{session_token}:} {AWS temporary session token}
}}
\item{\strong{profile}:} {The name of a profile to use. If not given, then the default profile is used.}
\item{\strong{anonymous}:} {Set anonymous credentials.}
\item{\strong{endpoint}:} {The complete URL to use for the constructed client.}
\item{\strong{region}:} {The AWS Region used in instantiating the client.}
}}
\item{\strong{close_connection}:} {Immediately close all HTTP connections.}
\item{\strong{timeout}:} {The time in seconds till a timeout exception is thrown when attempting to make a connection. The default is 60 seconds.}
\item{\strong{s3_force_path_style}:} {Set this to \code{true} to force the request to use path-style addressing, i.e. \verb{http://s3.amazonaws.com/BUCKET/KEY}.}
\item{\strong{sts_regional_endpoint}:} {Set sts regional endpoint resolver to regional or legacy \url{https://docs.aws.amazon.com/sdkref/latest/guide/feature-sts-regionalized-endpoints.html}}
}}
\item{credentials}{Optional credentials shorthand for the config parameter
\itemize{
\item{\strong{creds}:} {\itemize{
\item{\strong{access_key_id}:} {AWS access key ID}
\item{\strong{secret_access_key}:} {AWS secret access key}
\item{\strong{session_token}:} {AWS temporary session token}
}}
\item{\strong{profile}:} {The name of a profile to use. If not given, then the default profile is used.}
\item{\strong{anonymous}:} {Set anonymous credentials.}
}}
\item{endpoint}{Optional shorthand for complete URL to use for the constructed client.}
\item{region}{Optional shorthand for AWS Region used in instantiating the client.}
}
\value{
A client for the service. You can call the service's operations using
syntax like \code{svc$operation(...)}, where \code{svc} is the name you've assigned
to the client. The available operations are listed in the
Operations section.
}
\description{
Amazon WorkSpaces Service
Amazon WorkSpaces enables you to provision virtual, cloud-based
Microsoft Windows or Amazon Linux desktops for your users, known as
\emph{WorkSpaces}. WorkSpaces eliminates the need to procure and deploy
hardware or install complex software. You can quickly add or remove
users as your needs change. Users can access their virtual desktops from
multiple devices or web browsers.
This API Reference provides detailed information about the actions, data
types, parameters, and errors of the WorkSpaces service. For more
information about the supported Amazon Web Services Regions, endpoints,
and service quotas of the Amazon WorkSpaces service, see \href{https://docs.aws.amazon.com/general/latest/gr/wsp.html}{WorkSpaces endpoints and quotas} in the
\emph{Amazon Web Services General Reference}.
You can also manage your WorkSpaces resources using the WorkSpaces
console, Command Line Interface (CLI), and SDKs. For more information
about administering WorkSpaces, see the \href{https://docs.aws.amazon.com/workspaces/latest/adminguide/}{Amazon WorkSpaces Administration Guide}. For
more information about using the Amazon WorkSpaces client application or
web browser to access provisioned WorkSpaces, see the \href{https://docs.aws.amazon.com/workspaces/latest/userguide/}{Amazon WorkSpaces User Guide}.
For more information about using the CLI to manage your WorkSpaces
resources, see the \href{https://docs.aws.amazon.com/cli/latest/reference/workspaces/index.html}{WorkSpaces section of the CLI Reference}.
}
\section{Service syntax}{
\if{html}{\out{<div class="sourceCode">}}\preformatted{svc <- workspaces(
config = list(
credentials = list(
creds = list(
access_key_id = "string",
secret_access_key = "string",
session_token = "string"
),
profile = "string",
anonymous = "logical"
),
endpoint = "string",
region = "string",
close_connection = "logical",
timeout = "numeric",
s3_force_path_style = "logical",
sts_regional_endpoint = "string"
),
credentials = list(
creds = list(
access_key_id = "string",
secret_access_key = "string",
session_token = "string"
),
profile = "string",
anonymous = "logical"
),
endpoint = "string",
region = "string"
)
}\if{html}{\out{</div>}}
}
\section{Operations}{
\tabular{ll}{
\link[=workspaces_associate_connection_alias]{associate_connection_alias} \tab Associates the specified connection alias with the specified directory to enable cross-Region redirection\cr
\link[=workspaces_associate_ip_groups]{associate_ip_groups} \tab Associates the specified IP access control group with the specified directory\cr
\link[=workspaces_authorize_ip_rules]{authorize_ip_rules} \tab Adds one or more rules to the specified IP access control group\cr
\link[=workspaces_copy_workspace_image]{copy_workspace_image} \tab Copies the specified image from the specified Region to the current Region\cr
\link[=workspaces_create_connect_client_add_in]{create_connect_client_add_in} \tab Creates a client-add-in for Amazon Connect within a directory\cr
\link[=workspaces_create_connection_alias]{create_connection_alias} \tab Creates the specified connection alias for use with cross-Region redirection\cr
\link[=workspaces_create_ip_group]{create_ip_group} \tab Creates an IP access control group\cr
\link[=workspaces_create_standby_workspaces]{create_standby_workspaces} \tab Creates a standby WorkSpace in a secondary Region\cr
\link[=workspaces_create_tags]{create_tags} \tab Creates the specified tags for the specified WorkSpaces resource\cr
\link[=workspaces_create_updated_workspace_image]{create_updated_workspace_image} \tab Creates a new updated WorkSpace image based on the specified source image\cr
\link[=workspaces_create_workspace_bundle]{create_workspace_bundle} \tab Creates the specified WorkSpace bundle\cr
\link[=workspaces_create_workspace_image]{create_workspace_image} \tab Creates a new WorkSpace image from an existing WorkSpace\cr
\link[=workspaces_create_workspaces]{create_workspaces} \tab Creates one or more WorkSpaces\cr
\link[=workspaces_delete_client_branding]{delete_client_branding} \tab Deletes customized client branding\cr
\link[=workspaces_delete_connect_client_add_in]{delete_connect_client_add_in} \tab Deletes a client-add-in for Amazon Connect that is configured within a directory\cr
\link[=workspaces_delete_connection_alias]{delete_connection_alias} \tab Deletes the specified connection alias\cr
\link[=workspaces_delete_ip_group]{delete_ip_group} \tab Deletes the specified IP access control group\cr
\link[=workspaces_delete_tags]{delete_tags} \tab Deletes the specified tags from the specified WorkSpaces resource\cr
\link[=workspaces_delete_workspace_bundle]{delete_workspace_bundle} \tab Deletes the specified WorkSpace bundle\cr
\link[=workspaces_delete_workspace_image]{delete_workspace_image} \tab Deletes the specified image from your account\cr
\link[=workspaces_deregister_workspace_directory]{deregister_workspace_directory} \tab Deregisters the specified directory\cr
\link[=workspaces_describe_account]{describe_account} \tab Retrieves a list that describes the configuration of Bring Your Own License (BYOL) for the specified account\cr
\link[=workspaces_describe_account_modifications]{describe_account_modifications} \tab Retrieves a list that describes modifications to the configuration of Bring Your Own License (BYOL) for the specified account\cr
\link[=workspaces_describe_client_branding]{describe_client_branding} \tab Describes the specified client branding\cr
\link[=workspaces_describe_client_properties]{describe_client_properties} \tab Retrieves a list that describes one or more specified Amazon WorkSpaces clients\cr
\link[=workspaces_describe_connect_client_add_ins]{describe_connect_client_add_ins} \tab Retrieves a list of Amazon Connect client add-ins that have been created\cr
\link[=workspaces_describe_connection_aliases]{describe_connection_aliases} \tab Retrieves a list that describes the connection aliases used for cross-Region redirection\cr
\link[=workspaces_describe_connection_alias_permissions]{describe_connection_alias_permissions} \tab Describes the permissions that the owner of a connection alias has granted to another Amazon Web Services account for the specified connection alias\cr
\link[=workspaces_describe_ip_groups]{describe_ip_groups} \tab Describes one or more of your IP access control groups\cr
\link[=workspaces_describe_tags]{describe_tags} \tab Describes the specified tags for the specified WorkSpaces resource\cr
\link[=workspaces_describe_workspace_bundles]{describe_workspace_bundles} \tab Retrieves a list that describes the available WorkSpace bundles\cr
\link[=workspaces_describe_workspace_directories]{describe_workspace_directories} \tab Describes the available directories that are registered with Amazon WorkSpaces\cr
\link[=workspaces_describe_workspace_image_permissions]{describe_workspace_image_permissions} \tab Describes the permissions that the owner of an image has granted to other Amazon Web Services accounts for an image\cr
\link[=workspaces_describe_workspace_images]{describe_workspace_images} \tab Retrieves a list that describes one or more specified images, if the image identifiers are provided\cr
\link[=workspaces_describe_workspaces]{describe_workspaces} \tab Describes the specified WorkSpaces\cr
\link[=workspaces_describe_workspaces_connection_status]{describe_workspaces_connection_status} \tab Describes the connection status of the specified WorkSpaces\cr
\link[=workspaces_describe_workspace_snapshots]{describe_workspace_snapshots} \tab Describes the snapshots for the specified WorkSpace\cr
\link[=workspaces_disassociate_connection_alias]{disassociate_connection_alias} \tab Disassociates a connection alias from a directory\cr
\link[=workspaces_disassociate_ip_groups]{disassociate_ip_groups} \tab Disassociates the specified IP access control group from the specified directory\cr
\link[=workspaces_import_client_branding]{import_client_branding} \tab Imports client branding\cr
\link[=workspaces_import_workspace_image]{import_workspace_image} \tab Imports the specified Windows 10 or 11 Bring Your Own License (BYOL) image into Amazon WorkSpaces\cr
\link[=workspaces_list_available_management_cidr_ranges]{list_available_management_cidr_ranges} \tab Retrieves a list of IP address ranges, specified as IPv4 CIDR blocks, that you can use for the network management interface when you enable Bring Your Own License (BYOL)\cr
\link[=workspaces_migrate_workspace]{migrate_workspace} \tab Migrates a WorkSpace from one operating system or bundle type to another, while retaining the data on the user volume\cr
\link[=workspaces_modify_account]{modify_account} \tab Modifies the configuration of Bring Your Own License (BYOL) for the specified account\cr
\link[=workspaces_modify_certificate_based_auth_properties]{modify_certificate_based_auth_properties} \tab Modifies the properties of the certificate-based authentication you want to use with your WorkSpaces\cr
\link[=workspaces_modify_client_properties]{modify_client_properties} \tab Modifies the properties of the specified Amazon WorkSpaces clients\cr
\link[=workspaces_modify_saml_properties]{modify_saml_properties} \tab Modifies multiple properties related to SAML 2\cr
\link[=workspaces_modify_selfservice_permissions]{modify_selfservice_permissions} \tab Modifies the self-service WorkSpace management capabilities for your users\cr
\link[=workspaces_modify_workspace_access_properties]{modify_workspace_access_properties} \tab Specifies which devices and operating systems users can use to access their WorkSpaces\cr
\link[=workspaces_modify_workspace_creation_properties]{modify_workspace_creation_properties} \tab Modify the default properties used to create WorkSpaces\cr
\link[=workspaces_modify_workspace_properties]{modify_workspace_properties} \tab Modifies the specified WorkSpace properties\cr
\link[=workspaces_modify_workspace_state]{modify_workspace_state} \tab Sets the state of the specified WorkSpace\cr
\link[=workspaces_reboot_workspaces]{reboot_workspaces} \tab Reboots the specified WorkSpaces\cr
\link[=workspaces_rebuild_workspaces]{rebuild_workspaces} \tab Rebuilds the specified WorkSpace\cr
\link[=workspaces_register_workspace_directory]{register_workspace_directory} \tab Registers the specified directory\cr
\link[=workspaces_restore_workspace]{restore_workspace} \tab Restores the specified WorkSpace to its last known healthy state\cr
\link[=workspaces_revoke_ip_rules]{revoke_ip_rules} \tab Removes one or more rules from the specified IP access control group\cr
\link[=workspaces_start_workspaces]{start_workspaces} \tab Starts the specified WorkSpaces\cr
\link[=workspaces_stop_workspaces]{stop_workspaces} \tab Stops the specified WorkSpaces\cr
\link[=workspaces_terminate_workspaces]{terminate_workspaces} \tab Terminates the specified WorkSpaces\cr
\link[=workspaces_update_connect_client_add_in]{update_connect_client_add_in} \tab Updates a Amazon Connect client add-in\cr
\link[=workspaces_update_connection_alias_permission]{update_connection_alias_permission} \tab Shares or unshares a connection alias with one account by specifying whether that account has permission to associate the connection alias with a directory\cr
\link[=workspaces_update_rules_of_ip_group]{update_rules_of_ip_group} \tab Replaces the current rules of the specified IP access control group with the specified rules\cr
\link[=workspaces_update_workspace_bundle]{update_workspace_bundle} \tab Updates a WorkSpace bundle with a new image\cr
\link[=workspaces_update_workspace_image_permission]{update_workspace_image_permission} \tab Shares or unshares an image with one account in the same Amazon Web Services Region by specifying whether that account has permission to copy the image
}
}
\examples{
\dontrun{
svc <- workspaces()
svc$associate_connection_alias(
Foo = 123
)
}
}
|
LOG = function (train) with(train,
{
n = length(y)
x = cbind(rep(1, n), x1,x2)
logistic = function(x) 1/(1 + exp(-x))
posterior = function(x) logistic(t(theta) %*% c(1, x))
theta = matrix(0, 3)
losses = c()
for(i in 1:20)
{
eta = x %*% theta
mu = logistic(eta)
w = diag(c(mu*(1-mu)))
z = eta + solve(w) %*% (y - mu)
theta = solve(t(x) %*% w %*% x) %*% t(x) %*% w %*% z
j = t(x) %*% (y - mu)
losses = c(losses, t(j) %*% j)
}
layers = c(geom_abline(slope = -theta[2]/theta[3], intercept = -(theta[1])/theta[3])) #possible bug, no 0.5 needed
list(layers = layers, posterior = posterior)
})
|
/linear_logistic_lda_qda/log.r
|
no_license
|
vadimkantorov/mva
|
R
| false
| false
| 656
|
r
|
LOG = function (train) with(train,
{
n = length(y)
x = cbind(rep(1, n), x1,x2)
logistic = function(x) 1/(1 + exp(-x))
posterior = function(x) logistic(t(theta) %*% c(1, x))
theta = matrix(0, 3)
losses = c()
for(i in 1:20)
{
eta = x %*% theta
mu = logistic(eta)
w = diag(c(mu*(1-mu)))
z = eta + solve(w) %*% (y - mu)
theta = solve(t(x) %*% w %*% x) %*% t(x) %*% w %*% z
j = t(x) %*% (y - mu)
losses = c(losses, t(j) %*% j)
}
layers = c(geom_abline(slope = -theta[2]/theta[3], intercept = -(theta[1])/theta[3])) #possible bug, no 0.5 needed
list(layers = layers, posterior = posterior)
})
|
testlist <- list(AgeVector = c(-4.73074171454048e-167, 2.2262381097027e-76, -9.12990429452974e-204, 5.97087417427845e-79, 4.7390525269307e-300, 6.58361441690132e-121, 3.58611068565168e-154, -2.94504776827523e-186, 2.62380314702636e-116, -6.78950518864266e+23, 6.99695749856012e-167, 86485.676793021, 1.11271562183704e+230, 1.94114173595984e-186, 1.44833381226225e-178, -6.75217876587581e-69, 1.17166524186752e-15, -4.66902120197297e-64, -1.96807327384856e+304, 4.43806122192432e-53, 9.29588680224717e-276, -6.49633240047463e-239, -1.22140819059424e-138, 5.03155164774999e-80, -6.36956558303921e-38, 7.15714506860012e-155, -1.05546603899445e-274, -3.66720914317747e-169, -6.94681701552128e+38, 2.93126040859825e-33, 2.03804078100055e-84, 3.62794352816579e+190, 3.84224576683191e+202, 2.90661893502594e+44, -5.43046915655589e-132, -1.22315376742253e-152), ExpressionMatrix = structure(c(4.80597147865938e+96, 6.97343932706536e+155, 1.3267342810479e+281, 1.34663897260867e+171, 1.76430141680543e+158, 1.20021255064002e-241, 1.72046093489436e+274, 4.64807629890539e-66, 2.79437906543198e-115, 3.70896378162114e-42, 1.09474740380531e+92, 7.49155705745727e-308, 3.26639180474928e+224, 3.21841801500177e-79, 4.26435540037564e-295, 1.40002857639358e+82, 47573397570345336, 2.00517157311369e-187, 2.74035572944044e+70, 2.89262435086883e-308, 6.65942057982148e-198, 1.10979548758712e-208, 1.40208057226312e-220, 6.25978904299555e-111, 1.06191688875218e+167, 1.1857452172049, 7.01135380962132e-157, 4.49610615342627e-308, 8.04053421408348e+261, 6.23220855980985e+275, 1.91601752509744e+141, 2.27737212344351e-244, 1.6315101795754e+126, 3.83196182917788e+160, 1.53445011275161e-192), .Dim = c(5L, 7L)), permutations = 415362983L)
result <- do.call(myTAI:::cpp_bootMatrix,testlist)
str(result)
|
/myTAI/inst/testfiles/cpp_bootMatrix/AFL_cpp_bootMatrix/cpp_bootMatrix_valgrind_files/1615764589-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 1,804
|
r
|
testlist <- list(AgeVector = c(-4.73074171454048e-167, 2.2262381097027e-76, -9.12990429452974e-204, 5.97087417427845e-79, 4.7390525269307e-300, 6.58361441690132e-121, 3.58611068565168e-154, -2.94504776827523e-186, 2.62380314702636e-116, -6.78950518864266e+23, 6.99695749856012e-167, 86485.676793021, 1.11271562183704e+230, 1.94114173595984e-186, 1.44833381226225e-178, -6.75217876587581e-69, 1.17166524186752e-15, -4.66902120197297e-64, -1.96807327384856e+304, 4.43806122192432e-53, 9.29588680224717e-276, -6.49633240047463e-239, -1.22140819059424e-138, 5.03155164774999e-80, -6.36956558303921e-38, 7.15714506860012e-155, -1.05546603899445e-274, -3.66720914317747e-169, -6.94681701552128e+38, 2.93126040859825e-33, 2.03804078100055e-84, 3.62794352816579e+190, 3.84224576683191e+202, 2.90661893502594e+44, -5.43046915655589e-132, -1.22315376742253e-152), ExpressionMatrix = structure(c(4.80597147865938e+96, 6.97343932706536e+155, 1.3267342810479e+281, 1.34663897260867e+171, 1.76430141680543e+158, 1.20021255064002e-241, 1.72046093489436e+274, 4.64807629890539e-66, 2.79437906543198e-115, 3.70896378162114e-42, 1.09474740380531e+92, 7.49155705745727e-308, 3.26639180474928e+224, 3.21841801500177e-79, 4.26435540037564e-295, 1.40002857639358e+82, 47573397570345336, 2.00517157311369e-187, 2.74035572944044e+70, 2.89262435086883e-308, 6.65942057982148e-198, 1.10979548758712e-208, 1.40208057226312e-220, 6.25978904299555e-111, 1.06191688875218e+167, 1.1857452172049, 7.01135380962132e-157, 4.49610615342627e-308, 8.04053421408348e+261, 6.23220855980985e+275, 1.91601752509744e+141, 2.27737212344351e-244, 1.6315101795754e+126, 3.83196182917788e+160, 1.53445011275161e-192), .Dim = c(5L, 7L)), permutations = 415362983L)
result <- do.call(myTAI:::cpp_bootMatrix,testlist)
str(result)
|
library(MCMCpack)
library(mvtnorm)
mydata1 = read.table("http://www.stat.washington.edu/hoff/Book/Data/hwdata/bluecrab.dat")
mydata2 = read.table("http://www.stat.washington.edu/hoff/Book/Data/hwdata/orangecrab.dat")
V1 <- mydata1[,1]
V2 <- mydata1[,2]
m1 <- mean(V1)
m2 <- mean(V2)
mu0<-c(m1,m2)
L0<- matrix(c(var(V1),cov(V1,V2),cov(V1,V2),var(V2)),nrow=2,ncol=2)
nu0<-4
S0<-matrix(c(var(V1),cov(V1,V2),cov(V1,V2),var(V2)),nrow=2,ncol=2)
n <- dim(mydata1)[1]
ybar <- apply(mydata1,2,mean)
Sigma <- cov(mydata1)
N <- 10000
THETA <- rep(0,N)
SIGMA <- rep(0,N)
BlueCov <- rep(0,N)
#save data into vector
ss1 <- rep(0,N)
ss2 <- rep(0,N)
ss3 <- rep(0,N)
ss4 <- rep(0,N)
for (i in 1:N){
###update theta
Ln <- solve(solve(L0) + n*solve(Sigma))
mun <- Ln%*%(solve(L0)%*%mu0 + n*solve(Sigma)%*%ybar)
theta <- rmvnorm(1,mun,Ln)
###update Sigma
Sn <- S0 + (t(mydata1) - c(theta))%*% t(t(mydata1) - c(theta))
sigma <- solve(rwish(nu0+n,solve(Sn)))
ss1[i] <- sigma[1,1]
ss2[i] <- sigma[2,1]
ss3[i] <- sigma[1,2]
ss4[i] <- sigma[2,2]
###save results
THETA[i] <- theta
SIGMA[i] <- c(sigma)
##correlation
cor_blue=rbind(cor_blue,sigma[1,2]/sqrt(sigma[1,1]*sigma[2,2]))
}
plot(THETA)
plot(density(BLUE))
plot(density(ss1))
plot(density(ss2))
plot(density(ss3))
plot(density(ss4))
|
/lab7.R
|
no_license
|
xiechengen/BayesianStat
|
R
| false
| false
| 1,327
|
r
|
library(MCMCpack)
library(mvtnorm)
mydata1 = read.table("http://www.stat.washington.edu/hoff/Book/Data/hwdata/bluecrab.dat")
mydata2 = read.table("http://www.stat.washington.edu/hoff/Book/Data/hwdata/orangecrab.dat")
V1 <- mydata1[,1]
V2 <- mydata1[,2]
m1 <- mean(V1)
m2 <- mean(V2)
mu0<-c(m1,m2)
L0<- matrix(c(var(V1),cov(V1,V2),cov(V1,V2),var(V2)),nrow=2,ncol=2)
nu0<-4
S0<-matrix(c(var(V1),cov(V1,V2),cov(V1,V2),var(V2)),nrow=2,ncol=2)
n <- dim(mydata1)[1]
ybar <- apply(mydata1,2,mean)
Sigma <- cov(mydata1)
N <- 10000
THETA <- rep(0,N)
SIGMA <- rep(0,N)
BlueCov <- rep(0,N)
#save data into vector
ss1 <- rep(0,N)
ss2 <- rep(0,N)
ss3 <- rep(0,N)
ss4 <- rep(0,N)
for (i in 1:N){
###update theta
Ln <- solve(solve(L0) + n*solve(Sigma))
mun <- Ln%*%(solve(L0)%*%mu0 + n*solve(Sigma)%*%ybar)
theta <- rmvnorm(1,mun,Ln)
###update Sigma
Sn <- S0 + (t(mydata1) - c(theta))%*% t(t(mydata1) - c(theta))
sigma <- solve(rwish(nu0+n,solve(Sn)))
ss1[i] <- sigma[1,1]
ss2[i] <- sigma[2,1]
ss3[i] <- sigma[1,2]
ss4[i] <- sigma[2,2]
###save results
THETA[i] <- theta
SIGMA[i] <- c(sigma)
##correlation
cor_blue=rbind(cor_blue,sigma[1,2]/sqrt(sigma[1,1]*sigma[2,2]))
}
plot(THETA)
plot(density(BLUE))
plot(density(ss1))
plot(density(ss2))
plot(density(ss3))
plot(density(ss4))
|
library(forecast)
library(fable)
library(forecastHybrid)
library(nnet)
library(readxl)
library(e1071)
library(Metrics)
Dataset_Surabaya <- read_excel("C:/Users/asus/OneDrive - Institut Teknologi Sepuluh Nopember/Kuliah/Thesis/Dataset_Surabaya.xlsx")
data_outflow_10000<-data.frame(y=Dataset_Surabaya[["K5000"]])
myts <- ts(data_outflow_10000,start=c(1994, 1), end=c(2017, 12), frequency=12)
months <- 1:288
components.ts = decompose(myts)
plot(components.ts)
df.myts<-data.frame(x=1:288,y=as.numeric(myts))
svmodel <- svm(y ~ x,data=df.myts, type="eps-regression",kernel="radial",cost=10000, gamma=10)
rmse(df.myts$y,svmodel$fitted)
plot(df.myts$y, col="green", type="o")
points(svmodel$fitted, col="red", pch="*")
|
/svm_only_bank.R
|
no_license
|
vcarlsberg/arimar
|
R
| false
| false
| 720
|
r
|
library(forecast)
library(fable)
library(forecastHybrid)
library(nnet)
library(readxl)
library(e1071)
library(Metrics)
Dataset_Surabaya <- read_excel("C:/Users/asus/OneDrive - Institut Teknologi Sepuluh Nopember/Kuliah/Thesis/Dataset_Surabaya.xlsx")
data_outflow_10000<-data.frame(y=Dataset_Surabaya[["K5000"]])
myts <- ts(data_outflow_10000,start=c(1994, 1), end=c(2017, 12), frequency=12)
months <- 1:288
components.ts = decompose(myts)
plot(components.ts)
df.myts<-data.frame(x=1:288,y=as.numeric(myts))
svmodel <- svm(y ~ x,data=df.myts, type="eps-regression",kernel="radial",cost=10000, gamma=10)
rmse(df.myts$y,svmodel$fitted)
plot(df.myts$y, col="green", type="o")
points(svmodel$fitted, col="red", pch="*")
|
findFace <- function(x, constr) {
stopifnot(length(x) == ncol(constr$constr))
d <- constr$constr %*% x - constr$rhs
which.max(d)
}
checkPolytope <- function(x0, constr, homogeneous, transform) {
n <- if (!is.null(x0)) length(x0) else ncol(constr$constr)
m <- nrow(constr$constr)
# Verify preconditions
stopifnot(n > homogeneous)
stopifnot(n == ncol(constr$constr))
stopifnot(m == length(constr$rhs))
stopifnot(constr$dir == "<=")
if (homogeneous) { # Change to homogeneous coordinates
stopifnot(x0[n] == 1.0)
list(n = n - 1,
m = m,
x0 = if (is.null(x0)) x0 else x0[1:(n - 1)],
constr = list(constr = constr$constr[ , 1:(n - 1), drop=FALSE],
rhs = constr$rhs - constr$constr[ , n, drop=TRUE],
dir = constr$dir),
transform = function(samples) {
if (!is.null(transform)) {
mat <- samples %*% t(transform[ , 1:(n - 1), drop=FALSE])
t(t(mat) + transform[ , n, drop=TRUE])
} else {
cbind(samples, 1)
}
},
xN = function(samples) {
c(samples[nrow(samples), , drop=TRUE], 1)
})
} else {
list(n = n,
m = m,
x0 = x0,
constr = constr,
transform = function(samples) {
if (!is.null(transform)) {
samples %*% t(transform)
} else {
samples
}
},
xN = function(samples) {
samples[nrow(samples), , drop=TRUE]
})
}
}
har <- function(x0, constr, N, thin=1, homogeneous=FALSE, transform=NULL) {
stopifnot(N %% thin == 0)
args <- checkPolytope(x0, constr, homogeneous, transform)
rval <- .Call(hitandrun_har, args$x0, args$constr$constr, args$constr$rhs, N, thin)
list(samples=args$transform(rval),
xN=args$xN(rval))
}
sab <- function(x0, i0, constr, N, thin=1, homogeneous=FALSE, transform=NULL) {
stopifnot(N %% thin == 0)
args <- checkPolytope(x0, constr, homogeneous, transform)
constr <- args$constr
# normalize the constraints (required for shake-and-bake)
for (i in 1:args$m) {
norm <- sqrt(sum(constr$constr[i,]^2))
constr$constr[i,] <- constr$constr[i,] / norm
constr$rhs[i] <- constr$rhs[i] / norm
}
rval <- .Call(hitandrun_sab, args$x0, i0, constr$constr, constr$rhs, N, thin)
list(samples=args$transform(rval[[1]]),
xN=args$xN(rval[[1]]),
faces=rval[[2]],
iN=rval[[2]][length(rval[[2]])])
}
bbReject <- function(lb, ub, constr, N, homogeneous=FALSE, transform=NULL) {
args <- checkPolytope(NULL, constr, homogeneous, transform)
stopifnot(args$n == length(lb))
stopifnot(args$n == length(ub))
rval <- .Call(hitandrun_bbReject, lb, ub, args$constr$constr, args$constr$rhs, N)
list(samples=args$transform(rval[[1]]),
rejectionRate=rval[[2]])
}
simplex.sample <- function(n, N, sort=FALSE) {
samples <- .Call(hitandrun_simplexSample, n, sort, N);
list(samples=samples)
}
hypersphere.sample <- function(n, N) {
.Call(hitandrun_hypersphereSample, n, N)
}
|
/hitandrun/R/sample.R
|
no_license
|
gertvv/hitandrun
|
R
| false
| false
| 3,103
|
r
|
findFace <- function(x, constr) {
stopifnot(length(x) == ncol(constr$constr))
d <- constr$constr %*% x - constr$rhs
which.max(d)
}
checkPolytope <- function(x0, constr, homogeneous, transform) {
n <- if (!is.null(x0)) length(x0) else ncol(constr$constr)
m <- nrow(constr$constr)
# Verify preconditions
stopifnot(n > homogeneous)
stopifnot(n == ncol(constr$constr))
stopifnot(m == length(constr$rhs))
stopifnot(constr$dir == "<=")
if (homogeneous) { # Change to homogeneous coordinates
stopifnot(x0[n] == 1.0)
list(n = n - 1,
m = m,
x0 = if (is.null(x0)) x0 else x0[1:(n - 1)],
constr = list(constr = constr$constr[ , 1:(n - 1), drop=FALSE],
rhs = constr$rhs - constr$constr[ , n, drop=TRUE],
dir = constr$dir),
transform = function(samples) {
if (!is.null(transform)) {
mat <- samples %*% t(transform[ , 1:(n - 1), drop=FALSE])
t(t(mat) + transform[ , n, drop=TRUE])
} else {
cbind(samples, 1)
}
},
xN = function(samples) {
c(samples[nrow(samples), , drop=TRUE], 1)
})
} else {
list(n = n,
m = m,
x0 = x0,
constr = constr,
transform = function(samples) {
if (!is.null(transform)) {
samples %*% t(transform)
} else {
samples
}
},
xN = function(samples) {
samples[nrow(samples), , drop=TRUE]
})
}
}
har <- function(x0, constr, N, thin=1, homogeneous=FALSE, transform=NULL) {
stopifnot(N %% thin == 0)
args <- checkPolytope(x0, constr, homogeneous, transform)
rval <- .Call(hitandrun_har, args$x0, args$constr$constr, args$constr$rhs, N, thin)
list(samples=args$transform(rval),
xN=args$xN(rval))
}
sab <- function(x0, i0, constr, N, thin=1, homogeneous=FALSE, transform=NULL) {
stopifnot(N %% thin == 0)
args <- checkPolytope(x0, constr, homogeneous, transform)
constr <- args$constr
# normalize the constraints (required for shake-and-bake)
for (i in 1:args$m) {
norm <- sqrt(sum(constr$constr[i,]^2))
constr$constr[i,] <- constr$constr[i,] / norm
constr$rhs[i] <- constr$rhs[i] / norm
}
rval <- .Call(hitandrun_sab, args$x0, i0, constr$constr, constr$rhs, N, thin)
list(samples=args$transform(rval[[1]]),
xN=args$xN(rval[[1]]),
faces=rval[[2]],
iN=rval[[2]][length(rval[[2]])])
}
bbReject <- function(lb, ub, constr, N, homogeneous=FALSE, transform=NULL) {
args <- checkPolytope(NULL, constr, homogeneous, transform)
stopifnot(args$n == length(lb))
stopifnot(args$n == length(ub))
rval <- .Call(hitandrun_bbReject, lb, ub, args$constr$constr, args$constr$rhs, N)
list(samples=args$transform(rval[[1]]),
rejectionRate=rval[[2]])
}
simplex.sample <- function(n, N, sort=FALSE) {
samples <- .Call(hitandrun_simplexSample, n, sort, N);
list(samples=samples)
}
hypersphere.sample <- function(n, N) {
.Call(hitandrun_hypersphereSample, n, N)
}
|
setwd("C:/Users/FM/Documents/Personal/Data_Science_Coursera/Course-4/ExData_Plotting1-master")
file<-read.table(file = "household_power_consumption.txt",sep = ";")
names(file)<-as.matrix(file[1,])
file<-file[-1,]
file[]<-lapply(file,function(x) type.convert(as.character(x)))
subset1<-subset(file, file$Date=="1/2/2007"|file$Date=="2/2/2007")
datetime<-as.POSIXct(paste(as.Date(subset1$Date, "%d/%m/%Y"), subset1$Time))
plot(datetime, as.numeric(subset1$Sub_metering_1), type = "l", xlab = "", ylab = "Energy Sub Metering", col="black")
lines(datetime, as.numeric(subset1$Sub_metering_2), type = "l", xlab = "", ylab = "Energy Sub Metering", col="red")
lines(datetime, as.numeric(subset1$Sub_metering_3), type = "l", xlab = "", ylab = "Energy Sub Metering", col="blue")
legend("topleft", legend = c("Sub_Metering_1", "Sub_Metering_2", "Sub_Metering_3"), col = c("black","red","blue"), lty = 1:2, cex=0.8)
dev.copy(png,'plot3.png')
dev.off()
|
/plot3.R
|
no_license
|
dikshajain/ExData_Plotting1
|
R
| false
| false
| 968
|
r
|
setwd("C:/Users/FM/Documents/Personal/Data_Science_Coursera/Course-4/ExData_Plotting1-master")
file<-read.table(file = "household_power_consumption.txt",sep = ";")
names(file)<-as.matrix(file[1,])
file<-file[-1,]
file[]<-lapply(file,function(x) type.convert(as.character(x)))
subset1<-subset(file, file$Date=="1/2/2007"|file$Date=="2/2/2007")
datetime<-as.POSIXct(paste(as.Date(subset1$Date, "%d/%m/%Y"), subset1$Time))
plot(datetime, as.numeric(subset1$Sub_metering_1), type = "l", xlab = "", ylab = "Energy Sub Metering", col="black")
lines(datetime, as.numeric(subset1$Sub_metering_2), type = "l", xlab = "", ylab = "Energy Sub Metering", col="red")
lines(datetime, as.numeric(subset1$Sub_metering_3), type = "l", xlab = "", ylab = "Energy Sub Metering", col="blue")
legend("topleft", legend = c("Sub_Metering_1", "Sub_Metering_2", "Sub_Metering_3"), col = c("black","red","blue"), lty = 1:2, cex=0.8)
dev.copy(png,'plot3.png')
dev.off()
|
#!/usr/bin/env Rscript
library(optparse)
library(gdalUtils)
# Script for mosaicking tiles that have been cropped into a single mosaic file, while not running out of RAM
parser = OptionParser()
parser = add_option(parser, c("-t", "--tile"), type="character", default="X16Y06",
help="Proba-V tile to process. (Default: %default)", metavar="tile")
parser = add_option(parser, c("-v", "--vegetation-index"), type="character", default="NDMI",
help="Vegetation index to process. Case sensitive to input files. (Default: %default)", metavar="VI")
args = parse_args(parser)
OutputDir = file.path("/data/users/Public/greatemerald/modis/breaks", args[["vegetation-index"]], args[["tile"]])
OutputFile = file.path(OutputDir, "breaks-order3.tif")
ChunkNames = list.files(OutputDir, pattern=glob2rx("Output_Chunk*.tif"), full.names=TRUE)
# Mosaic in steps of 100 chunks
if (length(ChunkNames) > 100*100)
stop("Mosaicking over 10k files is not implemented.")
MosaicNames = NULL
for (Centenary in 1:ceiling(length(ChunkNames) / 100))
{
StartIndex = (Centenary-1)*100+1
StopIndex = min(Centenary*100, length(ChunkNames))
VrtFilename = file.path(OutputDir, paste0("Mosaic_Chunk_", Centenary, ".vrt"))
gdalbuildvrt(ChunkNames[StartIndex:StopIndex], VrtFilename)
TiffFilename = file.path(OutputDir, paste0("Mosaic_Chunk_", Centenary, ".tif"))
gdalwarp(VrtFilename, TiffFilename, wm=6000)
MosaicNames = c(MosaicNames, TiffFilename)
}
# Mosaic the mosaics
gdalwarp(MosaicNames, OutputFile, wm=6000)
|
/src/mosaic-chunks.r
|
no_license
|
GreatEmerald/cglops-change-detection
|
R
| false
| false
| 1,557
|
r
|
#!/usr/bin/env Rscript
library(optparse)
library(gdalUtils)
# Script for mosaicking tiles that have been cropped into a single mosaic file, while not running out of RAM
parser = OptionParser()
parser = add_option(parser, c("-t", "--tile"), type="character", default="X16Y06",
help="Proba-V tile to process. (Default: %default)", metavar="tile")
parser = add_option(parser, c("-v", "--vegetation-index"), type="character", default="NDMI",
help="Vegetation index to process. Case sensitive to input files. (Default: %default)", metavar="VI")
args = parse_args(parser)
OutputDir = file.path("/data/users/Public/greatemerald/modis/breaks", args[["vegetation-index"]], args[["tile"]])
OutputFile = file.path(OutputDir, "breaks-order3.tif")
ChunkNames = list.files(OutputDir, pattern=glob2rx("Output_Chunk*.tif"), full.names=TRUE)
# Mosaic in steps of 100 chunks
if (length(ChunkNames) > 100*100)
stop("Mosaicking over 10k files is not implemented.")
MosaicNames = NULL
for (Centenary in 1:ceiling(length(ChunkNames) / 100))
{
StartIndex = (Centenary-1)*100+1
StopIndex = min(Centenary*100, length(ChunkNames))
VrtFilename = file.path(OutputDir, paste0("Mosaic_Chunk_", Centenary, ".vrt"))
gdalbuildvrt(ChunkNames[StartIndex:StopIndex], VrtFilename)
TiffFilename = file.path(OutputDir, paste0("Mosaic_Chunk_", Centenary, ".tif"))
gdalwarp(VrtFilename, TiffFilename, wm=6000)
MosaicNames = c(MosaicNames, TiffFilename)
}
# Mosaic the mosaics
gdalwarp(MosaicNames, OutputFile, wm=6000)
|
#' Filter New Concepts
#' @description This function filters for any row that contains at least 1 instance of a "NEW " string
#' @import rubix
#' @export
filterAnyNewConcept <-
function(.input) {
if (nrow(.input) == 0) {
stop("input is empty")
}
.input %>%
rubix::filter_all_grepl_any(grepl_phrase = "NEW ")
}
|
/R/filterAnyNewConcept.R
|
no_license
|
meerapatelmd/HemOncExt
|
R
| false
| false
| 427
|
r
|
#' Filter New Concepts
#' @description This function filters for any row that contains at least 1 instance of a "NEW " string
#' @import rubix
#' @export
filterAnyNewConcept <-
function(.input) {
if (nrow(.input) == 0) {
stop("input is empty")
}
.input %>%
rubix::filter_all_grepl_any(grepl_phrase = "NEW ")
}
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22810536108106e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result)
|
/CNull/inst/testfiles/communities_individual_based_sampling_alpha/AFL_communities_individual_based_sampling_alpha/communities_individual_based_sampling_alpha_valgrind_files/1615770006-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false
| false
| 362
|
r
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22810536108106e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_users.R
\name{get_users}
\alias{get_users}
\title{Get All Users}
\usage{
get_users(users_desired = 1e+08, api_token)
}
\arguments{
\item{users_desired}{Number of user records to return}
\item{api_token}{Your personalized token provided by 'Skilljar'}
}
\value{
A data frame with users and user data
}
\description{
Returns a data.frame with all users from a particular domain. If you have
many users and do not want to return them all at once, you may request
fewer users to save time while doing development. The API returns up 10,000
users at a time-- if you request more than 10,000, it will return in full
page increments (a multiple of 10,000).
}
\details{
Utilizing the API requires a token. This must be obtained by logging in
at dashboard.skilljar.com and going to Organization -> API Credentials.
There are different strategies for storing api tokens securely. It is
an unnecessary risk to store the token in the script!
}
\examples{
\dontrun{
# Retrieve 1000 users
my_users <- get_users(users_desired = 1000,
api_token = "my-token")
}
}
\seealso{
See \url{https://api.skilljar.com/docs/} for documentation on
the 'Skilljar' API.
}
|
/man/get_users.Rd
|
permissive
|
chrisumphlett/skilljaR
|
R
| false
| true
| 1,226
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_users.R
\name{get_users}
\alias{get_users}
\title{Get All Users}
\usage{
get_users(users_desired = 1e+08, api_token)
}
\arguments{
\item{users_desired}{Number of user records to return}
\item{api_token}{Your personalized token provided by 'Skilljar'}
}
\value{
A data frame with users and user data
}
\description{
Returns a data.frame with all users from a particular domain. If you have
many users and do not want to return them all at once, you may request
fewer users to save time while doing development. The API returns up 10,000
users at a time-- if you request more than 10,000, it will return in full
page increments (a multiple of 10,000).
}
\details{
Utilizing the API requires a token. This must be obtained by logging in
at dashboard.skilljar.com and going to Organization -> API Credentials.
There are different strategies for storing api tokens securely. It is
an unnecessary risk to store the token in the script!
}
\examples{
\dontrun{
# Retrieve 1000 users
my_users <- get_users(users_desired = 1000,
api_token = "my-token")
}
}
\seealso{
See \url{https://api.skilljar.com/docs/} for documentation on
the 'Skilljar' API.
}
|
# Import data first using: psql taxi -f kaggledata/import.sql
pg = src_postgres(dbname="taxi", host="127.0.0.1")
TEST_RAW = tbl(pg, "test_raw")
TRAIN_RAW = tbl(pg, "train_raw")
GHINFO = tbl(pg, "ghinfo")
DATA = tbl(pg, "data")
TRIP_DETAILS = tbl(pg, "trip_details")
|
/data/00_load_data.R
|
no_license
|
rbdixon/taxi
|
R
| false
| false
| 267
|
r
|
# Import data first using: psql taxi -f kaggledata/import.sql
pg = src_postgres(dbname="taxi", host="127.0.0.1")
TEST_RAW = tbl(pg, "test_raw")
TRAIN_RAW = tbl(pg, "train_raw")
GHINFO = tbl(pg, "ghinfo")
DATA = tbl(pg, "data")
TRIP_DETAILS = tbl(pg, "trip_details")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/isolate_branches.r
\name{get_branch_id}
\alias{get_branch_id}
\title{Add the branch id as a column to the tokenindex}
\usage{
get_branch_id(tokens)
}
\arguments{
\item{tokens}{A tokenindex}
}
\value{
the tokenindex
}
\description{
After splitting trees into branches
}
\examples{
tokens = tokens_spacy[tokens_spacy$doc_id == 'text4',]
tokens = as_tokenindex(tokens)
\donttest{
tokens2 = isolate_branch(tokens, relation = 'relcl', copy_parent = TRUE)
get_branch_id(tokens2)
}
}
|
/man/get_branch_id.Rd
|
no_license
|
vanatteveldt/rsyntax
|
R
| false
| true
| 556
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/isolate_branches.r
\name{get_branch_id}
\alias{get_branch_id}
\title{Add the branch id as a column to the tokenindex}
\usage{
get_branch_id(tokens)
}
\arguments{
\item{tokens}{A tokenindex}
}
\value{
the tokenindex
}
\description{
After splitting trees into branches
}
\examples{
tokens = tokens_spacy[tokens_spacy$doc_id == 'text4',]
tokens = as_tokenindex(tokens)
\donttest{
tokens2 = isolate_branch(tokens, relation = 'relcl', copy_parent = TRUE)
get_branch_id(tokens2)
}
}
|
#' Saves Graphical Output
#'
#' Closes all graphical devices with \code{dev.off()} and saves the output only if both \code{fileName} and \code{type} are provided.
#'
#' @param fileName name of file to be created in directory \code{fileDirectory} without file extension ".\code{type}".
#' @param type see \code{Cairo()}.
#' @param fileDirectory path of directory, where graphic is stored. Default setting current working directory.
#' @param oldfile old file of same name to be overwritten
#'
#' @return NULL, if no \code{type} or \code{fileName} is provided, TRUE if graph is created
#' @examples
#' # very simple KDE (adapted from example in \code{Cairo()})
#' openGraphCairo(type = "png", fileDirectory=tempdir())
#' plot(rnorm(4000),rnorm(4000),col="#ff000018",pch=19,cex=2)
#' #save file "norm.png" in directory specified in \code{fileDirectory}
#' saveGraphVisstat("norm",type = "png",fileDirectory=tempdir())
#' file.remove(file.path(tempdir(),"norm.png")) # remove file "norm.png" from \code{fileDirectory}.
#'
#' @export saveGraphVisstat
#'
saveGraphVisstat = function(fileName = NULL, type=NULL, fileDirectory=getwd(),oldfile = NULL) {
#return if no fileName is provided
if (is.null(fileName))
{
#message("saveGraphVisstat() returns NULL if file=NULL")
return()
}else if (is.null(type)) {
# message("saveGraphVisstat() returns NULL if type=NULL")
return()
}else
if (is.null(oldfile)) {
dummy_name = "visstat_plot"
oldPlotName = paste(dummy_name,".",type,sep = "")
oldfile=file.path(fileDirectory,oldPlotName)
}
while (!is.null(dev.list())) dev.off() #closes all devices
#overwrite existing files
file2 = gsub("[^[:alnum:]]", "_", fileName) #replaces numbers and '^' with underscore
file3 = gsub("_{2,}", "_", file2)
newFileName = paste0(file3,".", type)
Cairofile = file.path(fileDirectory,newFileName)
file.copy(oldfile,Cairofile,overwrite = T)
if (file.exists(oldfile)) {
file.remove(oldfile)}
}
|
/R/saveGraphVisstat.R
|
no_license
|
cran/visStatistics
|
R
| false
| false
| 2,030
|
r
|
#' Saves Graphical Output
#'
#' Closes all graphical devices with \code{dev.off()} and saves the output only if both \code{fileName} and \code{type} are provided.
#'
#' @param fileName name of file to be created in directory \code{fileDirectory} without file extension ".\code{type}".
#' @param type see \code{Cairo()}.
#' @param fileDirectory path of directory, where graphic is stored. Default setting current working directory.
#' @param oldfile old file of same name to be overwritten
#'
#' @return NULL, if no \code{type} or \code{fileName} is provided, TRUE if graph is created
#' @examples
#' # very simple KDE (adapted from example in \code{Cairo()})
#' openGraphCairo(type = "png", fileDirectory=tempdir())
#' plot(rnorm(4000),rnorm(4000),col="#ff000018",pch=19,cex=2)
#' #save file "norm.png" in directory specified in \code{fileDirectory}
#' saveGraphVisstat("norm",type = "png",fileDirectory=tempdir())
#' file.remove(file.path(tempdir(),"norm.png")) # remove file "norm.png" from \code{fileDirectory}.
#'
#' @export saveGraphVisstat
#'
saveGraphVisstat = function(fileName = NULL, type=NULL, fileDirectory=getwd(),oldfile = NULL) {
#return if no fileName is provided
if (is.null(fileName))
{
#message("saveGraphVisstat() returns NULL if file=NULL")
return()
}else if (is.null(type)) {
# message("saveGraphVisstat() returns NULL if type=NULL")
return()
}else
if (is.null(oldfile)) {
dummy_name = "visstat_plot"
oldPlotName = paste(dummy_name,".",type,sep = "")
oldfile=file.path(fileDirectory,oldPlotName)
}
while (!is.null(dev.list())) dev.off() #closes all devices
#overwrite existing files
file2 = gsub("[^[:alnum:]]", "_", fileName) #replaces numbers and '^' with underscore
file3 = gsub("_{2,}", "_", file2)
newFileName = paste0(file3,".", type)
Cairofile = file.path(fileDirectory,newFileName)
file.copy(oldfile,Cairofile,overwrite = T)
if (file.exists(oldfile)) {
file.remove(oldfile)}
}
|
read_model_results = function(upper_folder, scenario_names, scenario_folders, selected_scenarios, distribution_centers){
numberOfScenarios = length(selected_scenarios)
summary = data.frame()
scaleFactorTrucks = 1.0
scaleFactorParcels = 1.0
for (i in 1:numberOfScenarios){
scenario = selected_scenarios[[i]]
scenario_index = match(x = scenario, table = scenario_names)
selected_DC = distribution_centers[[scenario_index]]
folder = paste(upper_folder, scenario_folders[[scenario_index]], "/", sep = "")
parcels = fread(paste(folder, "parcels.csv", sep = ""))
#ld_trucks = fread(paste(folder, "ld_trucks.csv", sep = ""))
#ld_trucks = ld_trucks %>% filter(destinationDistributionCenter == selected_DC)
sd_trucks = fread(paste(folder, "sd_trucks.csv", sep = ""))
vehicle_emissions = fread(paste(folder, "vehicleWarmEmissionFile.csv", sep = ""))
vehicle_emissions$CO = as.numeric( vehicle_emissions$CO)
vehicle_emissions$CO2 = as.numeric( vehicle_emissions$CO2)
vehicle_emissions$HC = as.numeric( vehicle_emissions$HC)
vehicle_emissions$PM = as.numeric( vehicle_emissions$PM)
vehicle_emissions$NOx = as.numeric( vehicle_emissions$NOx)
vehicle_emissions = vehicle_emissions %>% filter(distance != 0)
#ld_trucks_assigned = ld_trucks %>% filter(assigned == T)
#trucks_with_emissions = left_join(ld_trucks_assigned, vehicle_emissions, by = "id")
total_weight = parcels %>%
filter(assigned, toDestination) %>%
summarize(weight_kg = sum(weight_kg), n = n())
delivered_weight_van = parcels %>%
filter(assigned, toDestination, transaction != "PARCEL_SHOP", distributionType == "MOTORIZED") %>%
summarize(weight_kg = sum(weight_kg), n = n())
if(nrow(delivered_weight_van) == 0){
w_van = 0
p_van = 0
} else {
w_van = sum(delivered_weight_van$weight_kg)
p_van = sum(delivered_weight_van$n)
}
delivered_weight_cargo_bike = parcels %>%
filter(assigned, toDestination, transaction != "PARCEL_SHOP", distributionType == "CARGO_BIKE") %>%
summarize(weight_kg = sum(weight_kg), n = n())
if(nrow(delivered_weight_cargo_bike) == 0){
w_cb = 0
p_cb = 0
} else {
w_cb = sum(delivered_weight_cargo_bike$weight_kg)
p_cb = sum(delivered_weight_cargo_bike$n)
}
delivered_weight_to_shop = parcels %>%
filter(assigned, toDestination, transaction == "PARCEL_SHOP") %>% group_by(distributionType) %>%
summarize(weight_kg = sum(weight_kg), n = n())
if(nrow(delivered_weight_to_shop) == 0){
w_shop = 0
p_shop = 0
} else {
w_shop = sum(delivered_weight_to_shop$weight_kg)
p_shop = sum(delivered_weight_to_shop$n)
}
summary_vans = vehicle_emissions %>%
rowwise() %>%
filter(grepl("van", id) & !grepl("feeder", id)) %>%
mutate(id = "all") %>%
group_by() %>% summarize(n = n()/scaleFactorParcels, distance = sum(distance)/scaleFactorParcels,
CO2 = sum(CO2)/scaleFactorParcels, NOx = sum(NOx)/scaleFactorParcels,
operatingTime = sum(operatingTime)/scaleFactorParcels)
summary_vans$commodity = "POST_PACKET"
summary_vans$vehicle = "Van"
summary_feeder = vehicle_emissions %>%
rowwise() %>%
filter(grepl("feeder",id)) %>%
mutate(id = "all") %>%
group_by() %>% summarize(n = n()/scaleFactorParcels, distance = sum(distance)/scaleFactorParcels,
CO2 = sum(CO2)/scaleFactorParcels, NOx = sum(NOx)/scaleFactorParcels,
operatingTime = sum(operatingTime)/scaleFactorParcels)
summary_feeder$commodity = "POST_PACKET"
summary_feeder$vehicle = "Feeder"
#summary_ld_trucks$vehicle = "Truck"
summary_cargo_bike = vehicle_emissions %>%
rowwise() %>%
filter(grepl("cargoBike", id)) %>%
mutate(id = "all") %>%
group_by() %>% summarize(n = n()/scaleFactorParcels, distance = sum(distance)/scaleFactorParcels,
CO2 = sum(CO2)/scaleFactorParcels, NOx = sum(NOx)/scaleFactorParcels,
operatingTime = sum(operatingTime)/scaleFactorParcels)
summary_cargo_bike$commodity = "POST_PACKET"
summary_cargo_bike$vehicle = "Cargo bike"
summary_cargo_bike$weight_tn = w_cb
summary_feeder$weight_tn = w_cb + w_shop
summary_vans$weight_tn = w_van
summary_cargo_bike$parcels = p_cb
summary_feeder$parcels = p_cb + p_shop
summary_vans$parcels = p_van
#this_summary = rbind(summary_vans, summary_ld_trucks)
this_summary = rbind(summary_vans, summary_cargo_bike)
this_summary = rbind(this_summary, summary_feeder)
this_summary$scenario = scenario
summary = rbind(summary, this_summary)
}
#summary_ld_trucks$scenario = "All (inter-urban)"
#summary = rbind(summary, summary_ld_trucks)
summary = summary %>% filter(commodity == "POST_PACKET")
#summary$parcels = delivered_weight$n
factor_levels = c(scenario_names)
summary$scenario = factor(summary$scenario, levels = factor_levels)
summary$vehicle = factor(summary$vehicle, levels = c("Van", "Feeder", "Cargo bike"))
return(summary)
}
|
/simulation_results_analysis/dashboard/read_data_fun.R
|
no_license
|
cllorca1/freightFlowsAnalyses
|
R
| false
| false
| 5,473
|
r
|
read_model_results = function(upper_folder, scenario_names, scenario_folders, selected_scenarios, distribution_centers){
numberOfScenarios = length(selected_scenarios)
summary = data.frame()
scaleFactorTrucks = 1.0
scaleFactorParcels = 1.0
for (i in 1:numberOfScenarios){
scenario = selected_scenarios[[i]]
scenario_index = match(x = scenario, table = scenario_names)
selected_DC = distribution_centers[[scenario_index]]
folder = paste(upper_folder, scenario_folders[[scenario_index]], "/", sep = "")
parcels = fread(paste(folder, "parcels.csv", sep = ""))
#ld_trucks = fread(paste(folder, "ld_trucks.csv", sep = ""))
#ld_trucks = ld_trucks %>% filter(destinationDistributionCenter == selected_DC)
sd_trucks = fread(paste(folder, "sd_trucks.csv", sep = ""))
vehicle_emissions = fread(paste(folder, "vehicleWarmEmissionFile.csv", sep = ""))
vehicle_emissions$CO = as.numeric( vehicle_emissions$CO)
vehicle_emissions$CO2 = as.numeric( vehicle_emissions$CO2)
vehicle_emissions$HC = as.numeric( vehicle_emissions$HC)
vehicle_emissions$PM = as.numeric( vehicle_emissions$PM)
vehicle_emissions$NOx = as.numeric( vehicle_emissions$NOx)
vehicle_emissions = vehicle_emissions %>% filter(distance != 0)
#ld_trucks_assigned = ld_trucks %>% filter(assigned == T)
#trucks_with_emissions = left_join(ld_trucks_assigned, vehicle_emissions, by = "id")
total_weight = parcels %>%
filter(assigned, toDestination) %>%
summarize(weight_kg = sum(weight_kg), n = n())
delivered_weight_van = parcels %>%
filter(assigned, toDestination, transaction != "PARCEL_SHOP", distributionType == "MOTORIZED") %>%
summarize(weight_kg = sum(weight_kg), n = n())
if(nrow(delivered_weight_van) == 0){
w_van = 0
p_van = 0
} else {
w_van = sum(delivered_weight_van$weight_kg)
p_van = sum(delivered_weight_van$n)
}
delivered_weight_cargo_bike = parcels %>%
filter(assigned, toDestination, transaction != "PARCEL_SHOP", distributionType == "CARGO_BIKE") %>%
summarize(weight_kg = sum(weight_kg), n = n())
if(nrow(delivered_weight_cargo_bike) == 0){
w_cb = 0
p_cb = 0
} else {
w_cb = sum(delivered_weight_cargo_bike$weight_kg)
p_cb = sum(delivered_weight_cargo_bike$n)
}
delivered_weight_to_shop = parcels %>%
filter(assigned, toDestination, transaction == "PARCEL_SHOP") %>% group_by(distributionType) %>%
summarize(weight_kg = sum(weight_kg), n = n())
if(nrow(delivered_weight_to_shop) == 0){
w_shop = 0
p_shop = 0
} else {
w_shop = sum(delivered_weight_to_shop$weight_kg)
p_shop = sum(delivered_weight_to_shop$n)
}
summary_vans = vehicle_emissions %>%
rowwise() %>%
filter(grepl("van", id) & !grepl("feeder", id)) %>%
mutate(id = "all") %>%
group_by() %>% summarize(n = n()/scaleFactorParcels, distance = sum(distance)/scaleFactorParcels,
CO2 = sum(CO2)/scaleFactorParcels, NOx = sum(NOx)/scaleFactorParcels,
operatingTime = sum(operatingTime)/scaleFactorParcels)
summary_vans$commodity = "POST_PACKET"
summary_vans$vehicle = "Van"
summary_feeder = vehicle_emissions %>%
rowwise() %>%
filter(grepl("feeder",id)) %>%
mutate(id = "all") %>%
group_by() %>% summarize(n = n()/scaleFactorParcels, distance = sum(distance)/scaleFactorParcels,
CO2 = sum(CO2)/scaleFactorParcels, NOx = sum(NOx)/scaleFactorParcels,
operatingTime = sum(operatingTime)/scaleFactorParcels)
summary_feeder$commodity = "POST_PACKET"
summary_feeder$vehicle = "Feeder"
#summary_ld_trucks$vehicle = "Truck"
summary_cargo_bike = vehicle_emissions %>%
rowwise() %>%
filter(grepl("cargoBike", id)) %>%
mutate(id = "all") %>%
group_by() %>% summarize(n = n()/scaleFactorParcels, distance = sum(distance)/scaleFactorParcels,
CO2 = sum(CO2)/scaleFactorParcels, NOx = sum(NOx)/scaleFactorParcels,
operatingTime = sum(operatingTime)/scaleFactorParcels)
summary_cargo_bike$commodity = "POST_PACKET"
summary_cargo_bike$vehicle = "Cargo bike"
summary_cargo_bike$weight_tn = w_cb
summary_feeder$weight_tn = w_cb + w_shop
summary_vans$weight_tn = w_van
summary_cargo_bike$parcels = p_cb
summary_feeder$parcels = p_cb + p_shop
summary_vans$parcels = p_van
#this_summary = rbind(summary_vans, summary_ld_trucks)
this_summary = rbind(summary_vans, summary_cargo_bike)
this_summary = rbind(this_summary, summary_feeder)
this_summary$scenario = scenario
summary = rbind(summary, this_summary)
}
#summary_ld_trucks$scenario = "All (inter-urban)"
#summary = rbind(summary, summary_ld_trucks)
summary = summary %>% filter(commodity == "POST_PACKET")
#summary$parcels = delivered_weight$n
factor_levels = c(scenario_names)
summary$scenario = factor(summary$scenario, levels = factor_levels)
summary$vehicle = factor(summary$vehicle, levels = c("Van", "Feeder", "Cargo bike"))
return(summary)
}
|
tfFisher <- function(eeSNP.file, control.file, eeSNPnum, controlnum, threshold=-8.5)
{
require(data.table)
require(fdrtool)
tf = fread(eeSNP.file)
tf=tf[V6 < threshold]
#tf1 = tf[, list(value=max(V6)), by=list(V1,V2,V10)]
tf.control = fread(control.file)
tf.control=tf.control[V6 < threshold]
#tf1.control = tf.control[, list(value=V6, V1,V2,V10)]
tf.grp = tf[, list( eeSNP=length(V6)) , by=V2]
tf.control.grp = tf.control[, list( motif=(strsplit(V10, split=" ")[[1]][4]),control=length(V6)), by=V2]
tf.merge = merge(tf.grp, tf.control.grp, by="V2", all=T)
tf.merge[is.na(tf.merge)] <- 0
tf.merge$eeSNPneg <- eeSNPnum - tf.merge$eeSNP
tf.merge$controlneg <- controlnum - tf.merge$control
eeSNP.fisher =tf.merge[, list( motif=motif, eeSNP=eeSNP, eeSNPneg=eeSNPneg, conrol = control, controlneg = controlneg,
greater = fisher.test(matrix(c(eeSNP , eeSNPneg, control, controlneg),2,2),alternative="greater")$p.value,
less= fisher.test(matrix(c(eeSNP , eeSNPneg, control, controlneg),2,2),alternative="less")$p.value,
two.sided= fisher.test(matrix(c(eeSNP , eeSNPneg, control, controlneg),2,2),alternative="two.sided")$p.value),
by=V2]
eeSNP.fisher$qval = fdrtool(eeSNP.fisher$two.sided, statistic="pvalue", plot=F)$qval
setkey(eeSNP.fisher, greater)
eeSNP.fisher
}
tfFisher.grp <- function(eeSNP.file, control.file, eeSNPnum, controlnum)
{
require(data.table)
require(fdrtool)
tf = fread(eeSNP.file)
tf1 = tf[, list(value=max(V6)), by=list(V1,V2,V10)]
tf.control = fread(control.file)
tf1.control = tf.control[, list(value=max(V6)), by=list(V1,V2,V10)]
tf.grp = tf1[, list( eeSNP=length(value)) , by=V2]
tf.control.grp = tf1.control[, list( motif=(strsplit(V10, split=" ")[[1]][4]),control=length(value)), by=V2]
tf.merge = merge(tf.grp, tf.control.grp, by="V2", all=T)
tf.merge[is.na(tf.merge)] <- 0
tf.merge$eeSNPneg <- eeSNPnum - tf.merge$eeSNP
tf.merge$controlneg <- controlnum - tf.merge$control
eeSNP.fisher =tf.merge[, list( motif=motif, eeSNP=eeSNP, eeSNPneg=eeSNPneg, conrol = control, controlneg = controlneg,
greater = fisher.test(matrix(c(eeSNP , eeSNPneg, control, controlneg),2,2),alternative="greater")$p.value,
less= fisher.test(matrix(c(eeSNP , eeSNPneg, control, controlneg),2,2),alternative="less")$p.value,
two.sided= fisher.test(matrix(c(eeSNP , eeSNPneg, control, controlneg),2,2),alternative="two.sided")$p.value),
by=V2]
eeSNP.fisher$qval = fdrtool(eeSNP.fisher$two.sided, statistic="pvalue", plot=F)$qval
setkey(eeSNP.fisher, greater)
eeSNP.fisher
}
tfFisher.majorminor <- function(eeSNP, control, eeSNPnum, controlnum, threshold=-8.5)
{
eeSNP.major=paste(eeSNP, ".major.gff", sep="")
eeSNP.minor=paste(eeSNP, ".minor.gff", sep="")
control.major=paste(control, ".major.gff", sep="")
control.minor=paste(control, ".minor.gff", sep="")
require(data.table)
require(fdrtool)
tf = fread(eeSNP.major)
setnames(tf, "V6", "major")
tf$minor <- fread(eeSNP.minor)$V6
tf[,change:=abs(major - minor)]
tf[,sig:=xor((major<threshold), (minor<threshold))]
tf[,disrupts:= ifelse(sig & (change > 1), 1, 0) ]
tf.control = fread(control.major)
setnames(tf.control, "V6", "major")
tf.control$minor <- fread(control.minor)$V6
tf.control[,change:=abs(major - minor)]
tf.control[,sig:=xor((major<threshold), (minor<threshold))]
tf.control[,disrupts:= ifelse(sig & (change > 1), 1, 0) ]
#tf.control=tf.control[V6 < threshold]
tf.grp = tf[, list( motif=(strsplit(V10, split=" ")[[1]][4]), eeSNP=sum(disrupts)) , by=V2]
tf.control.grp = tf.control[, list( control=length(V6)), by=V2]
tf.merge = merge(tf.grp, tf.control.grp, by="V2", all=T)
tf.merge[is.na(tf.merge)] <- 0
tf.merge$eeSNPneg <- eeSNPnum - tf.merge$eeSNP
tf.merge$controlneg <- controlnum - tf.merge$control
eeSNP.fisher =tf.merge[, list( motif=motif, eeSNP=eeSNP, eeSNPneg=eeSNPneg, conrol = control, controlneg = controlneg,
greater = fisher.test(matrix(c(eeSNP , eeSNPneg, control, controlneg),2,2),alternative="greater")$p.value,
less= fisher.test(matrix(c(eeSNP , eeSNPneg, control, controlneg),2,2),alternative="less")$p.value,
two.sided= fisher.test(matrix(c(eeSNP , eeSNPneg, control, controlneg),2,2),alternative="two.sided")$p.value),
by=V2]
eeSNP.fisher$qval = fdrtool(eeSNP.fisher$two.sided, statistic="pvalue", plot=F)$qval
setkey(eeSNP.fisher, greater)
eeSNP.fisher
}
tfFisher.majorminor.wilcox <- function(eeSNP, control, mat2motif,threshold=-8.5)
{
#write.table(file="mat2motif",x = temp1 , row.names = F,
#col.names =T, sep="\t", quote=F )
require(data.table)
require(fdrtool)
mat2motif <- fread(mat2motif)
nMat <- nrow(mat2motif)
if(is.character(eeSNP))
eeSNP <- scan(eeSNP, sep="\n")
if(is.character(control))
control <- scan(control, sep="\n")
tf <- matrix(abs(eeSNP), ncol=nMat)
tf.control <- matrix(abs(control), ncol=nMat)
mat2motif$delta <- colMeans(tf, na.rm=T)
mat2motif$delta.control <- colMeans(tf.control, na.rm=T)
mat2motif$greater <- 1
mat2motif$less <- 1
mat2motif$two.sided <- 1
for(ii in seq(nMat)){
a1 <- tf[,ii]
a2 <- tf.control[,ii]
if(! (is.nan(mat2motif$delta[ii]) | is.nan(mat2motif$delta.control[ii]))){
mat2motif$greater[ii]<- (wilcox.test(a1, a2, alternative="greater", exact=T))$p.value
mat2motif$less[ii]<- (wilcox.test(a1, a2, alternative="less", exact=T))$p.value
mat2motif$two.sided[ii]<- (wilcox.test(a1, a2, alternative="two.sided", exact=T))$p.value
}
}
#mat2motif$qval = fdrtool(mat2motif$two.sided, statistic="pvalue", plot=F)$qval
mat2motif
}
|
/restore/R/tfFisher.R
|
no_license
|
vinash85/avinash
|
R
| false
| false
| 5,656
|
r
|
tfFisher <- function(eeSNP.file, control.file, eeSNPnum, controlnum, threshold=-8.5)
{
require(data.table)
require(fdrtool)
tf = fread(eeSNP.file)
tf=tf[V6 < threshold]
#tf1 = tf[, list(value=max(V6)), by=list(V1,V2,V10)]
tf.control = fread(control.file)
tf.control=tf.control[V6 < threshold]
#tf1.control = tf.control[, list(value=V6, V1,V2,V10)]
tf.grp = tf[, list( eeSNP=length(V6)) , by=V2]
tf.control.grp = tf.control[, list( motif=(strsplit(V10, split=" ")[[1]][4]),control=length(V6)), by=V2]
tf.merge = merge(tf.grp, tf.control.grp, by="V2", all=T)
tf.merge[is.na(tf.merge)] <- 0
tf.merge$eeSNPneg <- eeSNPnum - tf.merge$eeSNP
tf.merge$controlneg <- controlnum - tf.merge$control
eeSNP.fisher =tf.merge[, list( motif=motif, eeSNP=eeSNP, eeSNPneg=eeSNPneg, conrol = control, controlneg = controlneg,
greater = fisher.test(matrix(c(eeSNP , eeSNPneg, control, controlneg),2,2),alternative="greater")$p.value,
less= fisher.test(matrix(c(eeSNP , eeSNPneg, control, controlneg),2,2),alternative="less")$p.value,
two.sided= fisher.test(matrix(c(eeSNP , eeSNPneg, control, controlneg),2,2),alternative="two.sided")$p.value),
by=V2]
eeSNP.fisher$qval = fdrtool(eeSNP.fisher$two.sided, statistic="pvalue", plot=F)$qval
setkey(eeSNP.fisher, greater)
eeSNP.fisher
}
tfFisher.grp <- function(eeSNP.file, control.file, eeSNPnum, controlnum)
{
require(data.table)
require(fdrtool)
tf = fread(eeSNP.file)
tf1 = tf[, list(value=max(V6)), by=list(V1,V2,V10)]
tf.control = fread(control.file)
tf1.control = tf.control[, list(value=max(V6)), by=list(V1,V2,V10)]
tf.grp = tf1[, list( eeSNP=length(value)) , by=V2]
tf.control.grp = tf1.control[, list( motif=(strsplit(V10, split=" ")[[1]][4]),control=length(value)), by=V2]
tf.merge = merge(tf.grp, tf.control.grp, by="V2", all=T)
tf.merge[is.na(tf.merge)] <- 0
tf.merge$eeSNPneg <- eeSNPnum - tf.merge$eeSNP
tf.merge$controlneg <- controlnum - tf.merge$control
eeSNP.fisher =tf.merge[, list( motif=motif, eeSNP=eeSNP, eeSNPneg=eeSNPneg, conrol = control, controlneg = controlneg,
greater = fisher.test(matrix(c(eeSNP , eeSNPneg, control, controlneg),2,2),alternative="greater")$p.value,
less= fisher.test(matrix(c(eeSNP , eeSNPneg, control, controlneg),2,2),alternative="less")$p.value,
two.sided= fisher.test(matrix(c(eeSNP , eeSNPneg, control, controlneg),2,2),alternative="two.sided")$p.value),
by=V2]
eeSNP.fisher$qval = fdrtool(eeSNP.fisher$two.sided, statistic="pvalue", plot=F)$qval
setkey(eeSNP.fisher, greater)
eeSNP.fisher
}
tfFisher.majorminor <- function(eeSNP, control, eeSNPnum, controlnum, threshold=-8.5)
{
eeSNP.major=paste(eeSNP, ".major.gff", sep="")
eeSNP.minor=paste(eeSNP, ".minor.gff", sep="")
control.major=paste(control, ".major.gff", sep="")
control.minor=paste(control, ".minor.gff", sep="")
require(data.table)
require(fdrtool)
tf = fread(eeSNP.major)
setnames(tf, "V6", "major")
tf$minor <- fread(eeSNP.minor)$V6
tf[,change:=abs(major - minor)]
tf[,sig:=xor((major<threshold), (minor<threshold))]
tf[,disrupts:= ifelse(sig & (change > 1), 1, 0) ]
tf.control = fread(control.major)
setnames(tf.control, "V6", "major")
tf.control$minor <- fread(control.minor)$V6
tf.control[,change:=abs(major - minor)]
tf.control[,sig:=xor((major<threshold), (minor<threshold))]
tf.control[,disrupts:= ifelse(sig & (change > 1), 1, 0) ]
#tf.control=tf.control[V6 < threshold]
tf.grp = tf[, list( motif=(strsplit(V10, split=" ")[[1]][4]), eeSNP=sum(disrupts)) , by=V2]
tf.control.grp = tf.control[, list( control=length(V6)), by=V2]
tf.merge = merge(tf.grp, tf.control.grp, by="V2", all=T)
tf.merge[is.na(tf.merge)] <- 0
tf.merge$eeSNPneg <- eeSNPnum - tf.merge$eeSNP
tf.merge$controlneg <- controlnum - tf.merge$control
eeSNP.fisher =tf.merge[, list( motif=motif, eeSNP=eeSNP, eeSNPneg=eeSNPneg, conrol = control, controlneg = controlneg,
greater = fisher.test(matrix(c(eeSNP , eeSNPneg, control, controlneg),2,2),alternative="greater")$p.value,
less= fisher.test(matrix(c(eeSNP , eeSNPneg, control, controlneg),2,2),alternative="less")$p.value,
two.sided= fisher.test(matrix(c(eeSNP , eeSNPneg, control, controlneg),2,2),alternative="two.sided")$p.value),
by=V2]
eeSNP.fisher$qval = fdrtool(eeSNP.fisher$two.sided, statistic="pvalue", plot=F)$qval
setkey(eeSNP.fisher, greater)
eeSNP.fisher
}
tfFisher.majorminor.wilcox <- function(eeSNP, control, mat2motif,threshold=-8.5)
{
#write.table(file="mat2motif",x = temp1 , row.names = F,
#col.names =T, sep="\t", quote=F )
require(data.table)
require(fdrtool)
mat2motif <- fread(mat2motif)
nMat <- nrow(mat2motif)
if(is.character(eeSNP))
eeSNP <- scan(eeSNP, sep="\n")
if(is.character(control))
control <- scan(control, sep="\n")
tf <- matrix(abs(eeSNP), ncol=nMat)
tf.control <- matrix(abs(control), ncol=nMat)
mat2motif$delta <- colMeans(tf, na.rm=T)
mat2motif$delta.control <- colMeans(tf.control, na.rm=T)
mat2motif$greater <- 1
mat2motif$less <- 1
mat2motif$two.sided <- 1
for(ii in seq(nMat)){
a1 <- tf[,ii]
a2 <- tf.control[,ii]
if(! (is.nan(mat2motif$delta[ii]) | is.nan(mat2motif$delta.control[ii]))){
mat2motif$greater[ii]<- (wilcox.test(a1, a2, alternative="greater", exact=T))$p.value
mat2motif$less[ii]<- (wilcox.test(a1, a2, alternative="less", exact=T))$p.value
mat2motif$two.sided[ii]<- (wilcox.test(a1, a2, alternative="two.sided", exact=T))$p.value
}
}
#mat2motif$qval = fdrtool(mat2motif$two.sided, statistic="pvalue", plot=F)$qval
mat2motif
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllClasses.R
\docType{class}
\name{dbFrame-class}
\alias{dbFrame-class}
\alias{dbFrame}
\title{Debarcoding frame class}
\description{
This class represents the data returned by and used throughout debarcoding.
}
\details{
Objects of class \code{dbFrame} hold all data required for debarcoding:
\enumerate{
\item as the initial step of single-cell deconcolution,
\code{\link{assignPrelim}} will return a \code{dbFrame} containing
the input measurement data, barcoding scheme, and preliminary assignments.
\item assignments will be made final by \code{\link{applyCutoffs}}.
Optionally, population-specific separation cutoffs may be estimated
by running \code{\link{estCutoffs}} prior to this.
\item \code{\link{plotYields}}, \code{\link{plotEvents}} and
\code{\link{plotMahal}} aim to guide devoncolution parameter selection,
and to give a sense of the resulting barcode assignment quality.}
\code{show(dbFrame)} will display \itemize{
\item the dimensionality of the measurement data and number of barcodes
\item current assignments in order of decreasing population size
\item current separation cutoffs
\item the mean & per-population yield that'll be achieved upon debarcoding}
}
\section{Slots}{
\describe{
\item{\code{exprs}}{a matrix containing raw intensities of the input flowFrame.}
\item{\code{bc_key}}{binary barcoding scheme with numeric masses as column names and
samples names as row names OR a numeric vector of barcode masses.}
\item{\code{bc_ids}}{vector of barcode IDs. If a barcoding scheme is supplied, the respective
binary code's row name, else, the mass of the respective barcode channel.}
\item{\code{deltas}}{numeric vector of separations between positive and negative
barcode populations computed from normalized barcode intensities.}
\item{\code{normed_bcs}}{matrix containing normalized barcode intensities.}
\item{\code{mhl_dists}}{mahalanobis distances.}
\item{\code{sep_cutoffs}}{numeric vector of distance separation cutoffs between positive and negative
barcode populations above which events will be unassigned.}
\item{\code{mhl_cutoff}}{non-negative and non-zero numeric value specifying the
Mahalanobis distance below which events will be unassigned.}
\item{\code{counts}}{matrix of dimension (# barcodes)x(101) where each row contains the number
of events within a barcode for which positive and negative populations
are separated by a distance between in [0,0.01), ..., [0.99,1], respectively.}
\item{\code{yields}}{a matrix of dimension (# barcodes)x(101) where each row contains the
percentage of events within a barcode that will be obtained after applying
a separation cutoff of 0, 0.01, ..., 1, respectively.}
}}
\author{
Helena Lucia Crowell \email{crowellh@student.ethz.ch}
}
|
/man/dbFrame-class.Rd
|
no_license
|
lmweber/CATALYST
|
R
| false
| true
| 2,831
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllClasses.R
\docType{class}
\name{dbFrame-class}
\alias{dbFrame-class}
\alias{dbFrame}
\title{Debarcoding frame class}
\description{
This class represents the data returned by and used throughout debarcoding.
}
\details{
Objects of class \code{dbFrame} hold all data required for debarcoding:
\enumerate{
\item as the initial step of single-cell deconcolution,
\code{\link{assignPrelim}} will return a \code{dbFrame} containing
the input measurement data, barcoding scheme, and preliminary assignments.
\item assignments will be made final by \code{\link{applyCutoffs}}.
Optionally, population-specific separation cutoffs may be estimated
by running \code{\link{estCutoffs}} prior to this.
\item \code{\link{plotYields}}, \code{\link{plotEvents}} and
\code{\link{plotMahal}} aim to guide devoncolution parameter selection,
and to give a sense of the resulting barcode assignment quality.}
\code{show(dbFrame)} will display \itemize{
\item the dimensionality of the measurement data and number of barcodes
\item current assignments in order of decreasing population size
\item current separation cutoffs
\item the mean & per-population yield that'll be achieved upon debarcoding}
}
\section{Slots}{
\describe{
\item{\code{exprs}}{a matrix containing raw intensities of the input flowFrame.}
\item{\code{bc_key}}{binary barcoding scheme with numeric masses as column names and
samples names as row names OR a numeric vector of barcode masses.}
\item{\code{bc_ids}}{vector of barcode IDs. If a barcoding scheme is supplied, the respective
binary code's row name, else, the mass of the respective barcode channel.}
\item{\code{deltas}}{numeric vector of separations between positive and negative
barcode populations computed from normalized barcode intensities.}
\item{\code{normed_bcs}}{matrix containing normalized barcode intensities.}
\item{\code{mhl_dists}}{mahalanobis distances.}
\item{\code{sep_cutoffs}}{numeric vector of distance separation cutoffs between positive and negative
barcode populations above which events will be unassigned.}
\item{\code{mhl_cutoff}}{non-negative and non-zero numeric value specifying the
Mahalanobis distance below which events will be unassigned.}
\item{\code{counts}}{matrix of dimension (# barcodes)x(101) where each row contains the number
of events within a barcode for which positive and negative populations
are separated by a distance between in [0,0.01), ..., [0.99,1], respectively.}
\item{\code{yields}}{a matrix of dimension (# barcodes)x(101) where each row contains the
percentage of events within a barcode that will be obtained after applying
a separation cutoff of 0, 0.01, ..., 1, respectively.}
}}
\author{
Helena Lucia Crowell \email{crowellh@student.ethz.ch}
}
|
create_plot3 <- function()
{
#read the file with the data downloaded
dtPower <- read.table("household_power_consumption.txt", header=T, sep=";" , stringsAsFactors = FALSE)
#we are only interested in data between 2007-02-01 and 2007-02-02 , subset this data.
dtPowerSs <- subset(dtPower, as.Date(as.character(Date),"%d/%m/%Y") >= as.Date("2007-02-01") & as.Date(as.character(Date),"%d/%m/%Y") <= as.Date("2007-02-02") )
#add a new column with the date and time values combined
dtPowerSs$DateTime <- strptime(paste(dtPowerSs$Date, dtPowerSs$Time), format = "%d/%m/%Y %H:%M:%S")
#open the png device driver with the filename and dimensions set to 480 X 480
png(file="plot3.png",width=480,height=480)
#plot the graph with the Sub_metering_1 variable
plot(dtPowerSs$DateTime,dtPowerSs$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering" , col = "black" )
#add the Sub_metering_2 red line
lines(dtPowerSs$DateTime,dtPowerSs$Sub_metering_2,col="red")
#add the Sub_metering_3 blue line
lines(dtPowerSs$DateTime,dtPowerSs$Sub_metering_3,col="blue")
#add the legend
legend("topright" , c("Sub_metering_1","Sub_metering_2","Sub_metering_3") , col=c("black","red","blue") ,cex = 1 , lty = c(1,1,1) )
dev.off()
}
|
/plot3.r
|
no_license
|
sanchal/ExData_Plotting1
|
R
| false
| false
| 1,377
|
r
|
create_plot3 <- function()
{
#read the file with the data downloaded
dtPower <- read.table("household_power_consumption.txt", header=T, sep=";" , stringsAsFactors = FALSE)
#we are only interested in data between 2007-02-01 and 2007-02-02 , subset this data.
dtPowerSs <- subset(dtPower, as.Date(as.character(Date),"%d/%m/%Y") >= as.Date("2007-02-01") & as.Date(as.character(Date),"%d/%m/%Y") <= as.Date("2007-02-02") )
#add a new column with the date and time values combined
dtPowerSs$DateTime <- strptime(paste(dtPowerSs$Date, dtPowerSs$Time), format = "%d/%m/%Y %H:%M:%S")
#open the png device driver with the filename and dimensions set to 480 X 480
png(file="plot3.png",width=480,height=480)
#plot the graph with the Sub_metering_1 variable
plot(dtPowerSs$DateTime,dtPowerSs$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering" , col = "black" )
#add the Sub_metering_2 red line
lines(dtPowerSs$DateTime,dtPowerSs$Sub_metering_2,col="red")
#add the Sub_metering_3 blue line
lines(dtPowerSs$DateTime,dtPowerSs$Sub_metering_3,col="blue")
#add the legend
legend("topright" , c("Sub_metering_1","Sub_metering_2","Sub_metering_3") , col=c("black","red","blue") ,cex = 1 , lty = c(1,1,1) )
dev.off()
}
|
library(jsonlite)
x <- fromJSON("location.json")
# extracting the locations dataframe
loc = x$locations
# converting time column from posix milliseconds into a readable time scale
loc$time = as.POSIXct(as.numeric(x$locations$timestampMs)/1000, origin = "1970-01-01")
# converting longitude and latitude from E7 to GPS coordinates
loc$lat = loc$latitudeE7 / 1e7
loc$lon = loc$longitudeE7 / 1e7
library(ggplot2)
library(ggmap)
map <- get_map(c(-122.414191, 37.776366), zoom = 15, source = 'stamen', maptype = "toner")
ggmap(map) + geom_point(data = loc, aes(x = lon, y = lat), alpha = 0.5, color = "red") +
theme(legend.position = "right") +
labs(
x = "Longitude",
y = "Latitude",
title = "Location history data points",
caption = "\nA simple point plot shows recorded positions.")
|
/heatmap.R
|
no_license
|
tejasrr19/location-history
|
R
| false
| false
| 809
|
r
|
library(jsonlite)
x <- fromJSON("location.json")
# extracting the locations dataframe
loc = x$locations
# converting time column from posix milliseconds into a readable time scale
loc$time = as.POSIXct(as.numeric(x$locations$timestampMs)/1000, origin = "1970-01-01")
# converting longitude and latitude from E7 to GPS coordinates
loc$lat = loc$latitudeE7 / 1e7
loc$lon = loc$longitudeE7 / 1e7
library(ggplot2)
library(ggmap)
map <- get_map(c(-122.414191, 37.776366), zoom = 15, source = 'stamen', maptype = "toner")
ggmap(map) + geom_point(data = loc, aes(x = lon, y = lat), alpha = 0.5, color = "red") +
theme(legend.position = "right") +
labs(
x = "Longitude",
y = "Latitude",
title = "Location history data points",
caption = "\nA simple point plot shows recorded positions.")
|
maximizeInterpolant <- function( x, y )
# maximizeInterpolant: written by Aaron Lun
#
# This function takes an ordered set of spline points and a likelihood matrix where each row
# corresponds to a tag and each column corresponds to a spline point. It then calculates the
# position at which the maximum interpolated likelihood occurs for each by solving the derivative
# of the spline function.
{
if (is.vector(y)) {
y<-rbind(y)
warning("coverting vector of likelihoods to matrix format for interpolation")
}
if (length(x)!=ncol(y)) {
stop("number of columns must equal number of spline points")
} else if (is.unsorted(x) || anyDuplicated(x)) {
stop("spline points must be unique and sorted")
}
# Performing some type checking.
if (!is.double(x)) storage.mode(x)<-"double"
if (!is.double(y)) storage.mode(y)<-"double"
out<-.Call("R_maximize_interpolant", x, t(y), PACKAGE="edgeR")
if (is.character(out)) { stop(out) }
return(out);
}
|
/R/maximizeInterpolant.R
|
no_license
|
genome-vendor/r-bioc-edger
|
R
| false
| false
| 1,002
|
r
|
maximizeInterpolant <- function( x, y )
# maximizeInterpolant: written by Aaron Lun
#
# This function takes an ordered set of spline points and a likelihood matrix where each row
# corresponds to a tag and each column corresponds to a spline point. It then calculates the
# position at which the maximum interpolated likelihood occurs for each by solving the derivative
# of the spline function.
{
if (is.vector(y)) {
y<-rbind(y)
warning("coverting vector of likelihoods to matrix format for interpolation")
}
if (length(x)!=ncol(y)) {
stop("number of columns must equal number of spline points")
} else if (is.unsorted(x) || anyDuplicated(x)) {
stop("spline points must be unique and sorted")
}
# Performing some type checking.
if (!is.double(x)) storage.mode(x)<-"double"
if (!is.double(y)) storage.mode(y)<-"double"
out<-.Call("R_maximize_interpolant", x, t(y), PACKAGE="edgeR")
if (is.character(out)) { stop(out) }
return(out);
}
|
#loading partial data
#install.packages("sqldf")
#install.packages("downloader")
library(sqldf)
library(downloader)
#downloading the zip file and unzipping to the working directory
url<-"https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download(url, dest="dataset.zip", mode="wb")
unzip ("dataset.zip",exdir=".")
#using sqldf to select only two days from the dataset
fi <- file("household_power_consumption.txt")
df <- sqldf("select * from fi where Date in ('1/2/2007','2/2/2007')",file.format = list(header = TRUE, sep = ";"))#treating Date as characters
close(fi)
#generating the plot
windows()
hist(df$Global_active_power,col="red",main="Global Active Power",xlab = "Global Active Power (kilowatts)")
dev.copy(png,file="plot1.png")
dev.off
|
/plot1.R
|
no_license
|
lzcheng/ExData_Plotting1
|
R
| false
| false
| 792
|
r
|
#loading partial data
#install.packages("sqldf")
#install.packages("downloader")
library(sqldf)
library(downloader)
#downloading the zip file and unzipping to the working directory
url<-"https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download(url, dest="dataset.zip", mode="wb")
unzip ("dataset.zip",exdir=".")
#using sqldf to select only two days from the dataset
fi <- file("household_power_consumption.txt")
df <- sqldf("select * from fi where Date in ('1/2/2007','2/2/2007')",file.format = list(header = TRUE, sep = ";"))#treating Date as characters
close(fi)
#generating the plot
windows()
hist(df$Global_active_power,col="red",main="Global Active Power",xlab = "Global Active Power (kilowatts)")
dev.copy(png,file="plot1.png")
dev.off
|
require(plotKML);require(raster)
out_dir<-"/mnt/workspace_cluster_6/KML"
src.dir <- paste("/curie_data/ncastaneda/gap-analysis/gap_rice/_scripts",sep="") # !!! change accordingly !!!
SP_RICH_DIR<-"/mnt/workspace_cluster_6/tiles/_models4web/global_species_richness"
GAP_RICH_DIR<-"/mnt/workspace_cluster_6/tiles/_models4web/global_gap_richness"
sp_rich<-raster(paste0(SP_RICH_DIR,"/","global_species_richness.asc"))
sp_rich[which(sp_rich[]==0)]<-NA
sp_rich[which(sp_rich[]<0)]<-NA
sp_rich<-setMinMax(sp_rich)
proj4string(sp_rich) <-CRS("+proj=longlat +datum=WGS84")
gap_rich<-raster(paste0(GAP_RICH_DIR,"/","global_gap_richness.asc"))
gap_rich[which(gap_rich[]==0)]<-NA
gap_rich[which(gap_rich[]<0)]<-NA
gap_rich<-setMinMax(gap_rich)
proj4string(gap_rich) <-CRS("+proj=longlat +datum=WGS84")
SP_RICH_OUT_PATH<-paste0(out_dir,"/","global_species_richness");if (!file.exists(SP_RICH_OUT_PATH)) {dir.create(SP_RICH_OUT_PATH)}
GAP_RICH_OUT_PATH<-paste0(out_dir,"/","global_gap_richness");if (!file.exists(GAP_RICH_OUT_PATH)) {dir.create(GAP_RICH_OUT_PATH)}
F1<-c("#ED801D","#5AED1D","#1DED80","#1D5AED","#B01DED","#ED1D5A") #6
G1<-c("#ED801D","#77ED1D","#1DED75","#1D83ED","#391DED","#EC1DED","#ED1D3C") #7
setwd(SP_RICH_OUT_PATH)
kml(sp_rich,file.name="global_species_richness.kml",raster_name="global_species_richness.png",colour_scale=G1,plot.legend =T, kmz = F,layer.name="Collecting hotspots map",png.width=7640,png.height = 1600)
zip("global_species_richness.kmz",files=c("global_species_richness.png","global_species_richness.kml","global_species_richness_legend.png"))
file.remove(c("global_species_richness.png","global_species_richness.kml","global_species_richness_legend.png"))
setwd(GAP_RICH_OUT_PATH)
kml(gap_rich,file.name="global_gap_richness.kml",raster_name="global_gap_richness.png",colour_scale=F1,plot.legend =T, kmz = F,layer.name="Collecting hotspots map",png.width=7640,png.height = 1600)
zip("global_gap_richness.kmz",files=c("global_gap_richness.png","global_gap_richness.kml","global_gap_richness_legend.png"))
file.remove(c("global_gap_richness.png","global_gap_richness.kml","global_gap_richness_legend.png"))
rm(list=ls(all=T));quit("yes")
|
/KML_GLOBAL.R
|
no_license
|
ccsosa/GIS_ANALYSIS
|
R
| false
| false
| 2,181
|
r
|
require(plotKML);require(raster)
out_dir<-"/mnt/workspace_cluster_6/KML"
src.dir <- paste("/curie_data/ncastaneda/gap-analysis/gap_rice/_scripts",sep="") # !!! change accordingly !!!
SP_RICH_DIR<-"/mnt/workspace_cluster_6/tiles/_models4web/global_species_richness"
GAP_RICH_DIR<-"/mnt/workspace_cluster_6/tiles/_models4web/global_gap_richness"
sp_rich<-raster(paste0(SP_RICH_DIR,"/","global_species_richness.asc"))
sp_rich[which(sp_rich[]==0)]<-NA
sp_rich[which(sp_rich[]<0)]<-NA
sp_rich<-setMinMax(sp_rich)
proj4string(sp_rich) <-CRS("+proj=longlat +datum=WGS84")
gap_rich<-raster(paste0(GAP_RICH_DIR,"/","global_gap_richness.asc"))
gap_rich[which(gap_rich[]==0)]<-NA
gap_rich[which(gap_rich[]<0)]<-NA
gap_rich<-setMinMax(gap_rich)
proj4string(gap_rich) <-CRS("+proj=longlat +datum=WGS84")
SP_RICH_OUT_PATH<-paste0(out_dir,"/","global_species_richness");if (!file.exists(SP_RICH_OUT_PATH)) {dir.create(SP_RICH_OUT_PATH)}
GAP_RICH_OUT_PATH<-paste0(out_dir,"/","global_gap_richness");if (!file.exists(GAP_RICH_OUT_PATH)) {dir.create(GAP_RICH_OUT_PATH)}
F1<-c("#ED801D","#5AED1D","#1DED80","#1D5AED","#B01DED","#ED1D5A") #6
G1<-c("#ED801D","#77ED1D","#1DED75","#1D83ED","#391DED","#EC1DED","#ED1D3C") #7
setwd(SP_RICH_OUT_PATH)
kml(sp_rich,file.name="global_species_richness.kml",raster_name="global_species_richness.png",colour_scale=G1,plot.legend =T, kmz = F,layer.name="Collecting hotspots map",png.width=7640,png.height = 1600)
zip("global_species_richness.kmz",files=c("global_species_richness.png","global_species_richness.kml","global_species_richness_legend.png"))
file.remove(c("global_species_richness.png","global_species_richness.kml","global_species_richness_legend.png"))
setwd(GAP_RICH_OUT_PATH)
kml(gap_rich,file.name="global_gap_richness.kml",raster_name="global_gap_richness.png",colour_scale=F1,plot.legend =T, kmz = F,layer.name="Collecting hotspots map",png.width=7640,png.height = 1600)
zip("global_gap_richness.kmz",files=c("global_gap_richness.png","global_gap_richness.kml","global_gap_richness_legend.png"))
file.remove(c("global_gap_richness.png","global_gap_richness.kml","global_gap_richness_legend.png"))
rm(list=ls(all=T));quit("yes")
|
library(glmnet)
mydata = read.table("./TrainingSet/LassoBIC/urinary_tract.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.7,family="gaussian",standardize=TRUE)
sink('./Model/EN/Lasso/urinary_tract/urinary_tract_075.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
/Model/EN/Lasso/urinary_tract/urinary_tract_075.R
|
no_license
|
leon1003/QSMART
|
R
| false
| false
| 371
|
r
|
library(glmnet)
mydata = read.table("./TrainingSet/LassoBIC/urinary_tract.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.7,family="gaussian",standardize=TRUE)
sink('./Model/EN/Lasso/urinary_tract/urinary_tract_075.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/chunk-embeddings.R
\name{nlp_chunk_embeddings}
\alias{nlp_chunk_embeddings}
\title{Spark NLP ChunkEmbeddings}
\usage{
nlp_chunk_embeddings(x, input_cols, output_col, pooling_strategy = NULL,
uid = random_string("chunk_embeddings_"))
}
\arguments{
\item{x}{A \code{spark_connection}, \code{ml_pipeline}, or a \code{tbl_spark}.}
\item{input_cols}{Input columns. String array.}
\item{output_col}{Output column. String.}
\item{pooling_strategy}{Choose how you would like to aggregate Word Embeddings to Sentence Embeddings: AVERAGE or SUM}
\item{uid}{A character string used to uniquely identify the ML estimator.}
\item{...}{Optional arguments, see Details.}
}
\value{
The object returned depends on the class of \code{x}.
\itemize{
\item \code{spark_connection}: When \code{x} is a \code{spark_connection}, the function returns an instance of a \code{ml_estimator} object. The object contains a pointer to
a Spark \code{Estimator} object and can be used to compose
\code{Pipeline} objects.
\item \code{ml_pipeline}: When \code{x} is a \code{ml_pipeline}, the function returns a \code{ml_pipeline} with
the NLP estimator appended to the pipeline.
\item \code{tbl_spark}: When \code{x} is a \code{tbl_spark}, an estimator is constructed then
immediately fit with the input \code{tbl_spark}, returning an NLP model.
}
}
\description{
Spark ML transformer that utilizes WordEmbeddings or BertEmbeddings to generate chunk embeddings from either Chunker,
NGramGenerator, or NerConverter outputs.
See \url{https://nlp.johnsnowlabs.com/docs/en/annotators#chunkembeddings}
}
|
/man/nlp_chunk_embeddings.Rd
|
permissive
|
mstei4176/sparknlp
|
R
| false
| true
| 1,666
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/chunk-embeddings.R
\name{nlp_chunk_embeddings}
\alias{nlp_chunk_embeddings}
\title{Spark NLP ChunkEmbeddings}
\usage{
nlp_chunk_embeddings(x, input_cols, output_col, pooling_strategy = NULL,
uid = random_string("chunk_embeddings_"))
}
\arguments{
\item{x}{A \code{spark_connection}, \code{ml_pipeline}, or a \code{tbl_spark}.}
\item{input_cols}{Input columns. String array.}
\item{output_col}{Output column. String.}
\item{pooling_strategy}{Choose how you would like to aggregate Word Embeddings to Sentence Embeddings: AVERAGE or SUM}
\item{uid}{A character string used to uniquely identify the ML estimator.}
\item{...}{Optional arguments, see Details.}
}
\value{
The object returned depends on the class of \code{x}.
\itemize{
\item \code{spark_connection}: When \code{x} is a \code{spark_connection}, the function returns an instance of a \code{ml_estimator} object. The object contains a pointer to
a Spark \code{Estimator} object and can be used to compose
\code{Pipeline} objects.
\item \code{ml_pipeline}: When \code{x} is a \code{ml_pipeline}, the function returns a \code{ml_pipeline} with
the NLP estimator appended to the pipeline.
\item \code{tbl_spark}: When \code{x} is a \code{tbl_spark}, an estimator is constructed then
immediately fit with the input \code{tbl_spark}, returning an NLP model.
}
}
\description{
Spark ML transformer that utilizes WordEmbeddings or BertEmbeddings to generate chunk embeddings from either Chunker,
NGramGenerator, or NerConverter outputs.
See \url{https://nlp.johnsnowlabs.com/docs/en/annotators#chunkembeddings}
}
|
# library -----------------------------------------------------------------
library(tidyr)
library(dplyr)
library(lsa)
library(purrr)
library(magrittr)
library(tibble)
library(gplots)
# loading data ------------------------------------------------------------
load("data/crypto/total_tidy.rda")
years <- 2007:2018
pmax.abs <- function(x, y) {
z <- y
x.bigger <- (abs(x) > abs(y))
z[x.bigger] <- x [x.bigger]
return(z)
}
cos_vectors <- function(x1,x2,y1,y2){
dot.prod_x <- x1 * x2
dot.prod_y <- y1 * y2
cosin <- dot.prod_x+ dot.prod_y
return(cosin)
}
calculate_cosine <- function(x, y){
res <- cross2(x$Ticker, x$Ticker) %>%
do.call(what = rbind) %>% set_colnames(c("Ticker", "Ticker2")) %>%
as_tibble() %>% mutate(Ticker = as.character(Ticker), Ticker2 = as.character(Ticker2)) %>%
left_join(x, by = "Ticker") %>%
mutate(vol_y_normal_2 = y$vol_y_normal[match(Ticker2, y$Ticker)]) %>%
mutate(vol_x_normal_2 = y$vol_x_normal[match(Ticker2, y$Ticker)]) %>%
mutate(cos_vecs = cos_vectors(vol_x_normal, vol_x_normal_2, vol_y_normal, vol_y_normal_2))
return(res)
}
for (year in years) {
load_dir <- paste0("data/crypto/multilag/daily_cosinus_multilag3_", year, ".rda")
load(load_dir)
multilags_dt <- bind_rows(daily_res_multilag)
relation_counts <- multilags_dt %>% select(Ticker, Ticker2, first, second) %>%
mutate(relation = paste0(first, second)) %>%
select(Ticker, Ticker2, relation) %>%
group_by(Ticker, Ticker2, relation) %>%
summarise(coun_relation = n())
valid_relations <- relation_counts %>%
group_by(Ticker, Ticker2) %>%
filter(Ticker != Ticker2) %>%
mutate(open_days = sum(coun_relation)) %>%
filter(coun_relation > open_days / 2)
save_dir <- paste0("data/crypto/multilag/valid_relations3_", year, ".rda")
save(valid_relations, file = save_dir)
}
|
/R/crypto/8-multilag-adjacancy.R
|
no_license
|
baharAfshari/networkS-P500
|
R
| false
| false
| 1,877
|
r
|
# library -----------------------------------------------------------------
library(tidyr)
library(dplyr)
library(lsa)
library(purrr)
library(magrittr)
library(tibble)
library(gplots)
# loading data ------------------------------------------------------------
load("data/crypto/total_tidy.rda")
years <- 2007:2018
pmax.abs <- function(x, y) {
z <- y
x.bigger <- (abs(x) > abs(y))
z[x.bigger] <- x [x.bigger]
return(z)
}
cos_vectors <- function(x1,x2,y1,y2){
dot.prod_x <- x1 * x2
dot.prod_y <- y1 * y2
cosin <- dot.prod_x+ dot.prod_y
return(cosin)
}
calculate_cosine <- function(x, y){
res <- cross2(x$Ticker, x$Ticker) %>%
do.call(what = rbind) %>% set_colnames(c("Ticker", "Ticker2")) %>%
as_tibble() %>% mutate(Ticker = as.character(Ticker), Ticker2 = as.character(Ticker2)) %>%
left_join(x, by = "Ticker") %>%
mutate(vol_y_normal_2 = y$vol_y_normal[match(Ticker2, y$Ticker)]) %>%
mutate(vol_x_normal_2 = y$vol_x_normal[match(Ticker2, y$Ticker)]) %>%
mutate(cos_vecs = cos_vectors(vol_x_normal, vol_x_normal_2, vol_y_normal, vol_y_normal_2))
return(res)
}
for (year in years) {
load_dir <- paste0("data/crypto/multilag/daily_cosinus_multilag3_", year, ".rda")
load(load_dir)
multilags_dt <- bind_rows(daily_res_multilag)
relation_counts <- multilags_dt %>% select(Ticker, Ticker2, first, second) %>%
mutate(relation = paste0(first, second)) %>%
select(Ticker, Ticker2, relation) %>%
group_by(Ticker, Ticker2, relation) %>%
summarise(coun_relation = n())
valid_relations <- relation_counts %>%
group_by(Ticker, Ticker2) %>%
filter(Ticker != Ticker2) %>%
mutate(open_days = sum(coun_relation)) %>%
filter(coun_relation > open_days / 2)
save_dir <- paste0("data/crypto/multilag/valid_relations3_", year, ".rda")
save(valid_relations, file = save_dir)
}
|
getNBPData <- function(year=2019){
ret <- data.frame()
if(year>=2013){
fileName <- paste0(year,"_NBP_data.csv")
try({
if(file.exists(fileName)){
if(as.Date(file.info(fileName)$mtime)==Sys.Date()){
cat(paste("Reading data from local file\n"))
ret<-read.table(file=fileName,sep=";",dec=",",header=T,stringsAsFactor=F)
colnames(ret) <- gsub("X","",colnames(ret))
return(ret)
}
}
})
cat(paste("Downloading data\n"))
res <- try({
d <- readLines(paste0("https://www.nbp.pl/kursy/Archiwum/archiwum_tab_a_",year,".csv"))
d <- d[-2]
d <- d[-c((length(d)-3):length(d))]
tmpColnames <- strsplit(d[1],";",useBytes=T)[[1]]
tmpColnames <- tmpColnames[-c((length(tmpColnames)-1):length(tmpColnames))]
d <- do.call("rbind",
lapply(strsplit(d[-1],";"),
function(x){
matrix(as.numeric(gsub(",",".",x[-c((length(x)-1):length(x))])),nrow=1)
})
)
colnames(d) <- tmpColnames
d <- as.data.frame(d)
d$data <- as.Date(as.character(d$data),format="%Y%m%d")
ret <- d
write.table(ret,file=fileName,sep=";",dec=",",row.names=F)
},silent=T)
if(inherits(res,"try-error")){
cat(paste("An error occurred while downloading data!!!\n"))
}
}
return(ret)
}
#--------------------------------------
#ret <- getNBPData(2020)
|
/projekt zaliczeniowy/pobieranie_danych.r
|
no_license
|
mwasielewski15/NBP_app
|
R
| false
| false
| 1,533
|
r
|
getNBPData <- function(year=2019){
ret <- data.frame()
if(year>=2013){
fileName <- paste0(year,"_NBP_data.csv")
try({
if(file.exists(fileName)){
if(as.Date(file.info(fileName)$mtime)==Sys.Date()){
cat(paste("Reading data from local file\n"))
ret<-read.table(file=fileName,sep=";",dec=",",header=T,stringsAsFactor=F)
colnames(ret) <- gsub("X","",colnames(ret))
return(ret)
}
}
})
cat(paste("Downloading data\n"))
res <- try({
d <- readLines(paste0("https://www.nbp.pl/kursy/Archiwum/archiwum_tab_a_",year,".csv"))
d <- d[-2]
d <- d[-c((length(d)-3):length(d))]
tmpColnames <- strsplit(d[1],";",useBytes=T)[[1]]
tmpColnames <- tmpColnames[-c((length(tmpColnames)-1):length(tmpColnames))]
d <- do.call("rbind",
lapply(strsplit(d[-1],";"),
function(x){
matrix(as.numeric(gsub(",",".",x[-c((length(x)-1):length(x))])),nrow=1)
})
)
colnames(d) <- tmpColnames
d <- as.data.frame(d)
d$data <- as.Date(as.character(d$data),format="%Y%m%d")
ret <- d
write.table(ret,file=fileName,sep=";",dec=",",row.names=F)
},silent=T)
if(inherits(res,"try-error")){
cat(paste("An error occurred while downloading data!!!\n"))
}
}
return(ret)
}
#--------------------------------------
#ret <- getNBPData(2020)
|
\name{filling.d}
\alias{filling.d}
\title{Fill in missing values}
\description{
Filling in missing values of a daily timeSeries object
}
\usage{
filling.d(x)
}
\arguments{
\item{x}{daily timeSeries object}
}
\details{See McLeod, Hipel and Camacho(1983) for further details.}
\value{daily timeSeries object}
\references{
McLeod, A.I., Hipel, K.W. and Camacho, F. (1983),
Trend assessment of water quality time series, Water Resources Bulletin,
19 537-547.
}
\author{
Hyukjun Gweon and A.I. McLeod
}
|
/man/filling.d.Rd
|
no_license
|
cran/sltl
|
R
| false
| false
| 526
|
rd
|
\name{filling.d}
\alias{filling.d}
\title{Fill in missing values}
\description{
Filling in missing values of a daily timeSeries object
}
\usage{
filling.d(x)
}
\arguments{
\item{x}{daily timeSeries object}
}
\details{See McLeod, Hipel and Camacho(1983) for further details.}
\value{daily timeSeries object}
\references{
McLeod, A.I., Hipel, K.W. and Camacho, F. (1983),
Trend assessment of water quality time series, Water Resources Bulletin,
19 537-547.
}
\author{
Hyukjun Gweon and A.I. McLeod
}
|
create_covars <- function(endTime="2020-04-27",predTime=NULL) {
t0 <- as.Date("2019-12-31")
tf <- as.Date(endTime)
dat <- read.csv("../samples/covidtesting.csv", header=TRUE)
dat$Reported.Date <- as.Date(dat$Reported.Date)
dat <- subset(dat, Reported.Date>as.Date("2020-02-03") & Reported.Date<=tf)
dat$Reported.Date <- as.numeric(as.Date(dat$Reported.Date))-as.numeric(t0)
dat <- subset(dat, select=c("Reported.Date",
"Total.patients.approved.for.testing.as.of.Reporting.Date",
"Under.Investigation"))
colnames(dat) <- c("Date","TotalTests","Pending")
time <- seq(1,dat$Date[length(dat$Date)])
total_tests <- approx(dat$Date,dat$TotalTests,xout=time,method="linear")$y
total_tests <- ceiling(total_tests)
pending <- rep(0,length(time))
pending[dat$Date] <- dat$Pending
tests <- rep(NA,length(time))
for (i in 2:length(time)) {
tests[i] <- max((total_tests[i]) - (total_tests[i-1]),0)
}
tests <- ceiling(filter(tests, rep(1/2,2)) )
tests[length(tests)] <- tests[length(tests)-1]
data <- data.frame(time,ceiling(as.numeric(tests)))
colnames(data) <- c("time","tests")
if (!is.null(predTime)) {
predTime <- as.Date(predTime)
add_time <- seq(as.numeric(tf)+1,as.numeric(as.Date(predTime)))-as.numeric(t0)
add_tests <- rep(10e3,length(add_time))
add_data <- cbind(add_time,add_tests)
colnames(add_data) <- c("time","tests")
data <- rbind(data,add_data)
data <- as.data.frame(data)
}
return(data)
}
|
/R/CreateCovars.R
|
no_license
|
17nak1/pomp
|
R
| false
| false
| 1,550
|
r
|
create_covars <- function(endTime="2020-04-27",predTime=NULL) {
t0 <- as.Date("2019-12-31")
tf <- as.Date(endTime)
dat <- read.csv("../samples/covidtesting.csv", header=TRUE)
dat$Reported.Date <- as.Date(dat$Reported.Date)
dat <- subset(dat, Reported.Date>as.Date("2020-02-03") & Reported.Date<=tf)
dat$Reported.Date <- as.numeric(as.Date(dat$Reported.Date))-as.numeric(t0)
dat <- subset(dat, select=c("Reported.Date",
"Total.patients.approved.for.testing.as.of.Reporting.Date",
"Under.Investigation"))
colnames(dat) <- c("Date","TotalTests","Pending")
time <- seq(1,dat$Date[length(dat$Date)])
total_tests <- approx(dat$Date,dat$TotalTests,xout=time,method="linear")$y
total_tests <- ceiling(total_tests)
pending <- rep(0,length(time))
pending[dat$Date] <- dat$Pending
tests <- rep(NA,length(time))
for (i in 2:length(time)) {
tests[i] <- max((total_tests[i]) - (total_tests[i-1]),0)
}
tests <- ceiling(filter(tests, rep(1/2,2)) )
tests[length(tests)] <- tests[length(tests)-1]
data <- data.frame(time,ceiling(as.numeric(tests)))
colnames(data) <- c("time","tests")
if (!is.null(predTime)) {
predTime <- as.Date(predTime)
add_time <- seq(as.numeric(tf)+1,as.numeric(as.Date(predTime)))-as.numeric(t0)
add_tests <- rep(10e3,length(add_time))
add_data <- cbind(add_time,add_tests)
colnames(add_data) <- c("time","tests")
data <- rbind(data,add_data)
data <- as.data.frame(data)
}
return(data)
}
|
data = scan()
M = matrix(data, ncol=5, byrow=T)
y = M[,2]
x1 = M[,3]
x2 = M[,3]
x1 = M[,3]
x2 = M[,4]
x3 = M[,5]
model = lm(y~x1+x2+x3)
model
Call:
lm(formula = y ~ x1 + x2 + x3)
Coefficients:
(Intercept) x1 x2 x3
158.491 -1.142 -0.442 -13.470
Y = 158.491-1.142 x1 -0.442x2 -13.470 x3
b2 interpretation
If x2 is incremented by 1 unit, then Y is decreased by 0.442
summary(model)
Call:
lm(formula = y ~ x1 + x2 + x3)
Residuals:
Min 1Q Median 3Q Max
-18.3524 -6.4230 0.5196 8.3715 17.1601
Coefficients:
Estimate Std. Error t value Pr(>|t|)
(Intercept) 158.4913 18.1259 8.744 5.26e-11 ***
x1 -1.1416 0.2148 -5.315 3.81e-06 ***
x2 -0.4420 0.4920 -0.898 0.3741
x3 -13.4702 7.0997 -1.897 0.0647 .
---
Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
Residual standard error: 10.06 on 42 degrees of freedom
Multiple R-squared: 0.6822, Adjusted R-squared: 0.6595
F-statistic: 30.05 on 3 and 42 DF, p-value: 1.542e-10
#coefficient of multiple determination indicate that 68% of variation in Y
#can be explained using this model.
|
/code.R
|
no_license
|
shahrukh-ak/Determining-the-relationship-between-patient-satisfaction-age-severity-of-illness-anxiety-level
|
R
| false
| false
| 1,111
|
r
|
data = scan()
M = matrix(data, ncol=5, byrow=T)
y = M[,2]
x1 = M[,3]
x2 = M[,3]
x1 = M[,3]
x2 = M[,4]
x3 = M[,5]
model = lm(y~x1+x2+x3)
model
Call:
lm(formula = y ~ x1 + x2 + x3)
Coefficients:
(Intercept) x1 x2 x3
158.491 -1.142 -0.442 -13.470
Y = 158.491-1.142 x1 -0.442x2 -13.470 x3
b2 interpretation
If x2 is incremented by 1 unit, then Y is decreased by 0.442
summary(model)
Call:
lm(formula = y ~ x1 + x2 + x3)
Residuals:
Min 1Q Median 3Q Max
-18.3524 -6.4230 0.5196 8.3715 17.1601
Coefficients:
Estimate Std. Error t value Pr(>|t|)
(Intercept) 158.4913 18.1259 8.744 5.26e-11 ***
x1 -1.1416 0.2148 -5.315 3.81e-06 ***
x2 -0.4420 0.4920 -0.898 0.3741
x3 -13.4702 7.0997 -1.897 0.0647 .
---
Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
Residual standard error: 10.06 on 42 degrees of freedom
Multiple R-squared: 0.6822, Adjusted R-squared: 0.6595
F-statistic: 30.05 on 3 and 42 DF, p-value: 1.542e-10
#coefficient of multiple determination indicate that 68% of variation in Y
#can be explained using this model.
|
#' @title Complete numbers
#'
#' @description
#' Function to Complete numbers
#'
#' @param data a list of paramenters.
#' @param charnum number of digits.
#' @return parameter's value extracted.
#' @author Valeria Gogni, Mariano Bonoli, Ruben Bufanio, Diego Edwards
#' @export
#'
#'
shinyUI(pageWithSidebar(
# Application title
headerPanel("Wind Resource"),
# Sidebar with controls to select the variable to plot against mpg
# and to specify whether outliers should be included
sidebarPanel(
selectInput("type", "Type:",
list(
"Histogram" = "histogram",
"Rose" = "rose",
"Boxplot" = "boxplot",
"Correlation" = "correlation",
"Turbulence" = "turbulence",
"Fit" = "fit")),
selectInput("by", "By:",
list("None" = "none",
"Month" = "month",
"Hour" = "hour")),
# htmlOutput("selectUI"),
checkboxInput("Ane1", "Ane1", T),
checkboxInput("Ane2", "Ane2", T),
submitButton("Update View"),
downloadButton('downloadData', 'Download')
),
# Show the caption and plot of the requested variable against mpg
mainPanel(
h3(textOutput("caption")),
tabsetPanel(
tabPanel("Plot",
h4(textOutput("captionP1")),
plotOutput("plot1"),
h4(textOutput("captionP2")),
plotOutput("plot2")
),
tabPanel("Tables",
h4(textOutput("captionT1")),
htmlOutput("table1"),
h4(textOutput("captionT2")),
htmlOutput("table2"),
h4(textOutput("captionT3")),
htmlOutput("table3")
)
)
)))
|
/inst/shiny/ui.R
|
no_license
|
dedwardsmolina/WindResource
|
R
| false
| false
| 1,800
|
r
|
#' @title Complete numbers
#'
#' @description
#' Function to Complete numbers
#'
#' @param data a list of paramenters.
#' @param charnum number of digits.
#' @return parameter's value extracted.
#' @author Valeria Gogni, Mariano Bonoli, Ruben Bufanio, Diego Edwards
#' @export
#'
#'
shinyUI(pageWithSidebar(
# Application title
headerPanel("Wind Resource"),
# Sidebar with controls to select the variable to plot against mpg
# and to specify whether outliers should be included
sidebarPanel(
selectInput("type", "Type:",
list(
"Histogram" = "histogram",
"Rose" = "rose",
"Boxplot" = "boxplot",
"Correlation" = "correlation",
"Turbulence" = "turbulence",
"Fit" = "fit")),
selectInput("by", "By:",
list("None" = "none",
"Month" = "month",
"Hour" = "hour")),
# htmlOutput("selectUI"),
checkboxInput("Ane1", "Ane1", T),
checkboxInput("Ane2", "Ane2", T),
submitButton("Update View"),
downloadButton('downloadData', 'Download')
),
# Show the caption and plot of the requested variable against mpg
mainPanel(
h3(textOutput("caption")),
tabsetPanel(
tabPanel("Plot",
h4(textOutput("captionP1")),
plotOutput("plot1"),
h4(textOutput("captionP2")),
plotOutput("plot2")
),
tabPanel("Tables",
h4(textOutput("captionT1")),
htmlOutput("table1"),
h4(textOutput("captionT2")),
htmlOutput("table2"),
h4(textOutput("captionT3")),
htmlOutput("table3")
)
)
)))
|
#---------------------------------------
#
# calcolo min med max giorno da dati ora
#
#---------------------------------------
#librerie
#-------------------------
library(raster)
library(sp)
library(rgdal)
#library(grid)
#library(gridExtra)
#--------------------------
# file ornalieri in cartelle mensili
#--------------------------
cartella<-paste("../PREC/mensili_ARCIS/",sep="")
print(cartella)
for (mese in c("01","02","03","04","05","06","07","08","09","10","11","12")) {
#stringa_ricerca=paste("PRECI_*",mese,".asc",sep="")
#print(stringa_ricerca)
#elenco dei file (per contarli)
#nomefile<-list.files(path=cartella, pattern=stringa_ricerca, full.names=FALSE)
#numero_anni<-length(nomefile)
#print(numero_anni)
#if (numero_anni != 17 ) {
# print(paste(" mese: ",mese," con dati incompleti (trovo ",numero_anni," files.",sep=""))
# quit()
#}
PRECI <- raster(paste(cartella,"/PRECI_2002",mese,".asc",sep=""))
for (anno in seq(2003,2019)) {
#datainizio<-strptime(paste(anno,mese,"01",sep=""),"%Y%m%d")
#print("a inizio ciclo:")
#print(datainizio)
#ciclo sui giorni
# datainizio <-datainizio+60*60*24
# print(format(datainizio,"%Y%m%d"))
PRECInext<-raster(paste(cartella,"/PRECI_",anno,mese,".asc",sep=""))
PRECI<-sum(PRECI,PRECInext)
}
PRECI<-PRECI/17
#scrivi preci mensili
writeRaster(PRECI, file=paste("../PREC/medie/CUMULATA_MEDIA_",mese,".asc",sep=""), format="ascii", overwrite=TRUE)
} # fine ciclo mesi
#cumulata media annua
PRECI<-raster("../PREC/medie/CUMULATA_MEDIA_01.asc")
for (mese in c("02","03","04","05","06","07","08","09","10","11","12")) {
PRECInext<-raster(paste("../PREC/medie/CUMULATA_MEDIA_",mese,".asc",sep=""))
PRECI<-sum(PRECI,PRECInext)
}
writeRaster(PRECI, file="../PREC/medie/CUMULATA_MEDIA_ANNUA.asc", format="ascii", overwrite=TRUE)
#cumulata media aprile settembre
PRECI<-raster("../PREC/medie/CUMULATA_MEDIA_04.asc")
for (mese in c("05","06","07","08","09")) {
PRECInext<-raster(paste("../PREC/medie/CUMULATA_MEDIA_",mese,".asc",sep=""))
PRECI<-sum(PRECI,PRECInext)
}
writeRaster(PRECI, file="../PREC/medie/CUMULATA_MEDIA_AMGLAS.asc", format="ascii", overwrite=TRUE)
q()
|
/scriptR/media_mese_PREC.R
|
no_license
|
ARPASMR/nocciolo
|
R
| false
| false
| 2,153
|
r
|
#---------------------------------------
#
# calcolo min med max giorno da dati ora
#
#---------------------------------------
#librerie
#-------------------------
library(raster)
library(sp)
library(rgdal)
#library(grid)
#library(gridExtra)
#--------------------------
# file ornalieri in cartelle mensili
#--------------------------
cartella<-paste("../PREC/mensili_ARCIS/",sep="")
print(cartella)
for (mese in c("01","02","03","04","05","06","07","08","09","10","11","12")) {
#stringa_ricerca=paste("PRECI_*",mese,".asc",sep="")
#print(stringa_ricerca)
#elenco dei file (per contarli)
#nomefile<-list.files(path=cartella, pattern=stringa_ricerca, full.names=FALSE)
#numero_anni<-length(nomefile)
#print(numero_anni)
#if (numero_anni != 17 ) {
# print(paste(" mese: ",mese," con dati incompleti (trovo ",numero_anni," files.",sep=""))
# quit()
#}
PRECI <- raster(paste(cartella,"/PRECI_2002",mese,".asc",sep=""))
for (anno in seq(2003,2019)) {
#datainizio<-strptime(paste(anno,mese,"01",sep=""),"%Y%m%d")
#print("a inizio ciclo:")
#print(datainizio)
#ciclo sui giorni
# datainizio <-datainizio+60*60*24
# print(format(datainizio,"%Y%m%d"))
PRECInext<-raster(paste(cartella,"/PRECI_",anno,mese,".asc",sep=""))
PRECI<-sum(PRECI,PRECInext)
}
PRECI<-PRECI/17
#scrivi preci mensili
writeRaster(PRECI, file=paste("../PREC/medie/CUMULATA_MEDIA_",mese,".asc",sep=""), format="ascii", overwrite=TRUE)
} # fine ciclo mesi
#cumulata media annua
PRECI<-raster("../PREC/medie/CUMULATA_MEDIA_01.asc")
for (mese in c("02","03","04","05","06","07","08","09","10","11","12")) {
PRECInext<-raster(paste("../PREC/medie/CUMULATA_MEDIA_",mese,".asc",sep=""))
PRECI<-sum(PRECI,PRECInext)
}
writeRaster(PRECI, file="../PREC/medie/CUMULATA_MEDIA_ANNUA.asc", format="ascii", overwrite=TRUE)
#cumulata media aprile settembre
PRECI<-raster("../PREC/medie/CUMULATA_MEDIA_04.asc")
for (mese in c("05","06","07","08","09")) {
PRECInext<-raster(paste("../PREC/medie/CUMULATA_MEDIA_",mese,".asc",sep=""))
PRECI<-sum(PRECI,PRECInext)
}
writeRaster(PRECI, file="../PREC/medie/CUMULATA_MEDIA_AMGLAS.asc", format="ascii", overwrite=TRUE)
q()
|
library(plsRglm)
### Name: print.coef.plsRmodel
### Title: Print method for plsR models
### Aliases: print.coef.plsRmodel
### Keywords: methods print
### ** Examples
data(Cornell)
XCornell<-Cornell[,1:7]
yCornell<-Cornell[,8]
modpls <- plsRglm(yCornell,XCornell,3,modele="pls")
class(modpls)
print(coef(modpls))
rm(list=c("XCornell","yCornell","modpls"))
|
/data/genthat_extracted_code/plsRglm/examples/print.coef.plsRmodel.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 362
|
r
|
library(plsRglm)
### Name: print.coef.plsRmodel
### Title: Print method for plsR models
### Aliases: print.coef.plsRmodel
### Keywords: methods print
### ** Examples
data(Cornell)
XCornell<-Cornell[,1:7]
yCornell<-Cornell[,8]
modpls <- plsRglm(yCornell,XCornell,3,modele="pls")
class(modpls)
print(coef(modpls))
rm(list=c("XCornell","yCornell","modpls"))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BorrowAction.R
\name{BorrowAction}
\alias{BorrowAction}
\title{BorrowAction}
\usage{
BorrowAction(id = NULL, target = NULL, startTime = NULL, result = NULL,
participant = NULL, object = NULL, location = NULL, instrument = NULL,
error = NULL, endTime = NULL, agent = NULL, actionStatus = NULL,
lender = NULL, url = NULL, sameAs = NULL, potentialAction = NULL,
name = NULL, mainEntityOfPage = NULL, image = NULL, identifier = NULL,
disambiguatingDescription = NULL, description = NULL,
alternateName = NULL, additionalType = NULL, toLocation = NULL,
fromLocation = NULL)
}
\arguments{
\item{id}{identifier for the object (URI)}
\item{target}{(EntryPoint type.) Indicates a target EntryPoint for an Action.}
\item{startTime}{(DateTime or DateTime type.) The startTime of something. For a reserved event or service (e.g. FoodEstablishmentReservation), the time that it is expected to start. For actions that span a period of time, when the action was performed. e.g. John wrote a book from *January* to December.Note that Event uses startDate/endDate instead of startTime/endTime, even when describing dates with times. This situation may be clarified in future revisions.}
\item{result}{(Thing type.) The result produced in the action. e.g. John wrote *a book*.}
\item{participant}{(Person or Organization type.) Other co-agents that participated in the action indirectly. e.g. John wrote a book with *Steve*.}
\item{object}{(Thing type.) The object upon which the action is carried out, whose state is kept intact or changed. Also known as the semantic roles patient, affected or undergoer (which change their state) or theme (which doesn't). e.g. John read *a book*.}
\item{location}{(Text or PostalAddress or Place or Text or PostalAddress or Place or Text or PostalAddress or Place type.) The location of for example where the event is happening, an organization is located, or where an action takes place.}
\item{instrument}{(Thing type.) The object that helped the agent perform the action. e.g. John wrote a book with *a pen*.}
\item{error}{(Thing type.) For failed actions, more information on the cause of the failure.}
\item{endTime}{(DateTime or DateTime type.) The endTime of something. For a reserved event or service (e.g. FoodEstablishmentReservation), the time that it is expected to end. For actions that span a period of time, when the action was performed. e.g. John wrote a book from January to *December*.Note that Event uses startDate/endDate instead of startTime/endTime, even when describing dates with times. This situation may be clarified in future revisions.}
\item{agent}{(Person or Organization type.) The direct performer or driver of the action (animate or inanimate). e.g. *John* wrote a book.}
\item{actionStatus}{(ActionStatusType type.) Indicates the current disposition of the Action.}
\item{lender}{(Person or Organization type.) A sub property of participant. The person that lends the object being borrowed.}
\item{url}{(URL type.) URL of the item.}
\item{sameAs}{(URL type.) URL of a reference Web page that unambiguously indicates the item's identity. E.g. the URL of the item's Wikipedia page, Wikidata entry, or official website.}
\item{potentialAction}{(Action type.) Indicates a potential Action, which describes an idealized action in which this thing would play an 'object' role.}
\item{name}{(Text type.) The name of the item.}
\item{mainEntityOfPage}{(URL or CreativeWork type.) Indicates a page (or other CreativeWork) for which this thing is the main entity being described. See [background notes](/docs/datamodel.html#mainEntityBackground) for details.}
\item{image}{(URL or ImageObject type.) An image of the item. This can be a [[URL]] or a fully described [[ImageObject]].}
\item{identifier}{(URL or Text or PropertyValue type.) The identifier property represents any kind of identifier for any kind of [[Thing]], such as ISBNs, GTIN codes, UUIDs etc. Schema.org provides dedicated properties for representing many of these, either as textual strings or as URL (URI) links. See [background notes](/docs/datamodel.html#identifierBg) for more details.}
\item{disambiguatingDescription}{(Text type.) A sub property of description. A short description of the item used to disambiguate from other, similar items. Information from other properties (in particular, name) may be necessary for the description to be useful for disambiguation.}
\item{description}{(Text type.) A description of the item.}
\item{alternateName}{(Text type.) An alias for the item.}
\item{additionalType}{(URL type.) An additional type for the item, typically used for adding more specific types from external vocabularies in microdata syntax. This is a relationship between something and a class that the thing is in. In RDFa syntax, it is better to use the native RDFa syntax - the 'typeof' attribute - for multiple types. Schema.org tools may have only weaker understanding of extra types, in particular those defined externally.}
\item{toLocation}{(Place or Place or Place or Place type.) A sub property of location. The final location of the object or the agent after the action.}
\item{fromLocation}{(Place or Place or Place type.) A sub property of location. The original location of the object or the agent before the action.}
}
\value{
a list object corresponding to a schema:BorrowAction
}
\description{
The act of obtaining an object under an agreement to return it at a later date. Reciprocal of LendAction.Related actions:* [[LendAction]]: Reciprocal of BorrowAction.
}
|
/man/BorrowAction.Rd
|
no_license
|
cboettig/schemar
|
R
| false
| true
| 5,623
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BorrowAction.R
\name{BorrowAction}
\alias{BorrowAction}
\title{BorrowAction}
\usage{
BorrowAction(id = NULL, target = NULL, startTime = NULL, result = NULL,
participant = NULL, object = NULL, location = NULL, instrument = NULL,
error = NULL, endTime = NULL, agent = NULL, actionStatus = NULL,
lender = NULL, url = NULL, sameAs = NULL, potentialAction = NULL,
name = NULL, mainEntityOfPage = NULL, image = NULL, identifier = NULL,
disambiguatingDescription = NULL, description = NULL,
alternateName = NULL, additionalType = NULL, toLocation = NULL,
fromLocation = NULL)
}
\arguments{
\item{id}{identifier for the object (URI)}
\item{target}{(EntryPoint type.) Indicates a target EntryPoint for an Action.}
\item{startTime}{(DateTime or DateTime type.) The startTime of something. For a reserved event or service (e.g. FoodEstablishmentReservation), the time that it is expected to start. For actions that span a period of time, when the action was performed. e.g. John wrote a book from *January* to December.Note that Event uses startDate/endDate instead of startTime/endTime, even when describing dates with times. This situation may be clarified in future revisions.}
\item{result}{(Thing type.) The result produced in the action. e.g. John wrote *a book*.}
\item{participant}{(Person or Organization type.) Other co-agents that participated in the action indirectly. e.g. John wrote a book with *Steve*.}
\item{object}{(Thing type.) The object upon which the action is carried out, whose state is kept intact or changed. Also known as the semantic roles patient, affected or undergoer (which change their state) or theme (which doesn't). e.g. John read *a book*.}
\item{location}{(Text or PostalAddress or Place or Text or PostalAddress or Place or Text or PostalAddress or Place type.) The location of for example where the event is happening, an organization is located, or where an action takes place.}
\item{instrument}{(Thing type.) The object that helped the agent perform the action. e.g. John wrote a book with *a pen*.}
\item{error}{(Thing type.) For failed actions, more information on the cause of the failure.}
\item{endTime}{(DateTime or DateTime type.) The endTime of something. For a reserved event or service (e.g. FoodEstablishmentReservation), the time that it is expected to end. For actions that span a period of time, when the action was performed. e.g. John wrote a book from January to *December*.Note that Event uses startDate/endDate instead of startTime/endTime, even when describing dates with times. This situation may be clarified in future revisions.}
\item{agent}{(Person or Organization type.) The direct performer or driver of the action (animate or inanimate). e.g. *John* wrote a book.}
\item{actionStatus}{(ActionStatusType type.) Indicates the current disposition of the Action.}
\item{lender}{(Person or Organization type.) A sub property of participant. The person that lends the object being borrowed.}
\item{url}{(URL type.) URL of the item.}
\item{sameAs}{(URL type.) URL of a reference Web page that unambiguously indicates the item's identity. E.g. the URL of the item's Wikipedia page, Wikidata entry, or official website.}
\item{potentialAction}{(Action type.) Indicates a potential Action, which describes an idealized action in which this thing would play an 'object' role.}
\item{name}{(Text type.) The name of the item.}
\item{mainEntityOfPage}{(URL or CreativeWork type.) Indicates a page (or other CreativeWork) for which this thing is the main entity being described. See [background notes](/docs/datamodel.html#mainEntityBackground) for details.}
\item{image}{(URL or ImageObject type.) An image of the item. This can be a [[URL]] or a fully described [[ImageObject]].}
\item{identifier}{(URL or Text or PropertyValue type.) The identifier property represents any kind of identifier for any kind of [[Thing]], such as ISBNs, GTIN codes, UUIDs etc. Schema.org provides dedicated properties for representing many of these, either as textual strings or as URL (URI) links. See [background notes](/docs/datamodel.html#identifierBg) for more details.}
\item{disambiguatingDescription}{(Text type.) A sub property of description. A short description of the item used to disambiguate from other, similar items. Information from other properties (in particular, name) may be necessary for the description to be useful for disambiguation.}
\item{description}{(Text type.) A description of the item.}
\item{alternateName}{(Text type.) An alias for the item.}
\item{additionalType}{(URL type.) An additional type for the item, typically used for adding more specific types from external vocabularies in microdata syntax. This is a relationship between something and a class that the thing is in. In RDFa syntax, it is better to use the native RDFa syntax - the 'typeof' attribute - for multiple types. Schema.org tools may have only weaker understanding of extra types, in particular those defined externally.}
\item{toLocation}{(Place or Place or Place or Place type.) A sub property of location. The final location of the object or the agent after the action.}
\item{fromLocation}{(Place or Place or Place type.) A sub property of location. The original location of the object or the agent before the action.}
}
\value{
a list object corresponding to a schema:BorrowAction
}
\description{
The act of obtaining an object under an agreement to return it at a later date. Reciprocal of LendAction.Related actions:* [[LendAction]]: Reciprocal of BorrowAction.
}
|
#' Calculate Weighted Standard Deviation
#'
#' Function to calculate weighted standard deviation.
#' @param x The observations to calculate the standard deviations from
#' @param w The weights associated with each observation.
#' @param na.rm If \code{TRUE}, then NA values will be removed.
weighted.sd <- function(x, w, na.rm = FALSE){
sum.w <- sum(w, na.rm = na.rm)
sum.w2 <- sum(w^2, na.rm = na.rm)
mean.w <- sum(x * w,na.rm = na.rm) / sum(w, na.rm = na.rm)
x.sd.w <- sqrt((sum.w / (sum.w^2 - sum.w2)) * sum(w * (x - mean.w)^2))
return(x.sd.w)
}
#' Wilcox Location Parameter
#'
#' Modified function to calculate Wilcox' Location paramenter
wilcox.loc <- function(vec, na.rm = FALSE){
n <- length(vec)
# If number of observations is less than 2 then we just return mean as location estimate
if(n <= 2){
return(mean(vec, na.rm = na.rm))
}
# Calculating the paired avagerages
pairAvg <- sort(c(vec, combn(vec, 2, function(x)mean(x, na.rm = na.rm))))
return(median(pairAvg, na.rm = na.rm))
}
#' Cohen's d
#'
#' Function to calculate Cohen's D value when testing effect size
cohens_d <- function(x, y, na.rm = TRUE) {
if(na.rm){
x <- x[!is.na(x)]
y <- y[!is.na(y)]
}
n.x <- length(x)- 1
n.y <- length(y)- 1
mean.diff <- abs(mean(x) - mean(y))
if(n.x == 0 & n.y > 0) {
common.sd <- sqrt(n.y * var(y)/n.y)
} else if (n.x > 0 & n.y == 0){
common.sd <- sqrt(n.x * var(x)/n.x)
} else if (n.x > 0 & n.y > 0) {
common.sd <- sqrt((n.x * var(x) + n.y * var(y))/(n.x + n.y))
} else {
common.sd <- sd(c(x, y)) / 2
}
return(mean.diff/common.sd)
}
#' Default Weights for Projection Sources
#'
#' These are the weights that are used for each source when calculation weighted
#' averages and standard deviations if no weights are specified.
#' \code{c(CBS = 0.344, Yahoo = 0.400, ESPN = 0.329, NFL = 0.329,
#' FFToday = 0.379, NumberFire = 0.322, FantasyPros = 0.000,
#' FantasySharks = 0.327, FantasyFootballNerd = 0.000,
#' Walterfootball = 0.281, RTSports = 0.330,
#' FantasyData = 0.428, Fleaflicker = 0.428)}
default_weights <- c(CBS = 0.344, Yahoo = 0.400, ESPN = 0.329, NFL = 0.329,
FFToday = 0.379, NumberFire = 0.322, FantasyPros = 0.000,
FantasySharks= 0.327, FantasyFootballNerd = 0.000,
Walterfootball = 0.281, RTSports= 0.330,
FantasyData= 0.428, Fleaflicker = 0.428)
# Helper functions to calculate the quantiles and standard deviations for the
# source points. Used in the points_sd and confidence interval functions
quant_funcs <- list(average = quantile, robust = quantile,
weighted = purrr::possibly(Hmisc::wtd.quantile, c(`5%` = NaN, `95%` = NaN)))
quant_args <- list(list(probs = c(0.05, 0.95)), list(probs = c(0.05, 0.95)),
list(probs = c(0.05, 0.95), type = "i/n"))
get_quant <- function(pts, wt)invoke_map(quant_funcs, quant_args, x = pts, na.rm = TRUE, weights = wt)
sd_funcs <- list(average = function(x, w, na.rm)sd(x, na.rm = na.rm),
robust = function(x, w, na.rm)mad(x, na.rm = na.rm),
weighted = weighted.sd)
sd_args <- list(list(na.rm = TRUE), list(na.rm = TRUE), list(na.rm = TRUE))
get_sd <- function(pts, wt)invoke_map(sd_funcs, sd_args, x = pts, w = wt)
#' Calculate Source Points
#'
#' Function to calculate the projected points for each source.
#' @param data_result An output from the \link{scrape_data} function.
#' @param scoring_rules The scoring rules to be used.
source_points <- function(data_result, scoring_rules){
scoring_tbl <- make_scoring_tbl(scoring_rules)
long_result <- data_result %>%
stats_by_category() %>%
map(gather, "data_col", "stat_value", -c(id, data_src, pos)) %>%
bind_rows()
dst_pt_allow <- NULL
if("dst" %in% names(scoring_rules))
dst_pt_allow <- scoring_rules[[c("dst", "dst_pts_allowed")]]
dst_bracket <- is.null(dst_pt_allow) & !is.null(scoring_rules$pts_bracket)
dst_src <- long_result %>% slice(0) %>% add_column(points = 0)
if(dst_bracket){
dst_src <- long_result %>% filter(data_col == "dst_pts_allowed") %>%
mutate(points = ffanalytics:::dst_points(stat_value, scoring$pts_bracket))
}
long_result %>%
inner_join(scoring_tbl, by = c("pos", "data_col")) %>%
mutate(points = stat_value * points) %>%
bind_rows(dst_src) %>%
group_by(pos, data_src, id) %>%
summarise(points = sum(points, na.rm = TRUE)) %>% ungroup()
}
# Generate weights from a source points table if no weights are given
weights_from_src <- function(src_pts, weights = NULL){
if(is.null(weights)){
weights <- default_weights[unique(src_pts$data_src)]
}
weights %>% as.tibble() %>%
`names<-`("weight") %>% rownames_to_column('data_src')
}
#' Calculate Standard Deviations for Projected Points
#'
#' This function calculates the standard deviaion for projected points from
#' different sources
#' @param src_pts An output from the \link{source_points} function
#' @param weights A named vector with the weights from each source.
#' See \link{default_weights}
points_sd <- function(src_pts, weights = NULL){
weight_tbl <- weights_from_src(src_pts, weights)
src_pts %>% inner_join(weight_tbl, by = "data_src") %>%
group_by(id) %>%
mutate(n_obs = n(),
weight = if_else(n_obs == 1 & weight == 0, 1, weight)) %>%
ungroup() %>% select(-n_obs) %>%
split(src_pts$pos) %>% map(~ split(.x, .x$id)) %>%
modify_depth(2, ~ get_sd(.x$points, .x$weight)) %>% modify_depth(2, as.tibble) %>%
modify_depth(1, bind_rows, .id = "id") %>% bind_rows(.id = "pos") %>%
gather("avg_type", "sd_pts", -id, -pos)
}
#' Calculate the Upper and Lower Limits for Projected Points
#'
#' This function calculates the ceiling and floor for projected points from
#' different sources based on quantiles
#' @param src_pts An output from the \link{source_points} function
#' @param weights A named vector with the weights from each source.
#' See \link{default_weights}
confidence_interval <- function(src_pts, weights = NULL){
weight_tbl <- weights_from_src(src_pts, weights)
src_pts %>% inner_join(weight_tbl, by = "data_src") %>%
group_by(id) %>%
mutate(n_obs = n(),
weight = if_else(n_obs == 1 & weight == 0, 1, weight)) %>%
ungroup() %>% select(-n_obs) %>%
split(src_pts$pos) %>% map(~ split(.x, .x$id)) %>%
modify_depth(2, ~ get_quant(.x$points, .x$weight)) %>% modify_depth(3, t) %>%
modify_depth(3, as.tibble) %>% modify_depth(2, bind_rows, .id = "avg_type") %>%
modify_depth(1, bind_rows, .id = "id") %>% bind_rows(.id = "pos") %>%
mutate(`5%` = ifelse(is.na(`5%`),` 5%`, `5%`)) %>% select(-` 5%`) %>%
rename(floor = "5%", ceiling = "95%")
}
#' Aggregate Projected Stats
#'
#' This function aggregates the projected stats collected from each source with
#' the \link{scrape_data} function.
#' @param data_result An output from the \link{scrape_data} function.
#' @param src_weights A named vector with the weights from each source.
#' See \link{default_weights}
#' @export
aggregate_stats <- function(data_result, src_weights = NULL){
if(is.null(src_weights)){
data_src <- data_result %>% map(`[[`, "data_src") %>% reduce(union)
src_weights <- default_weights[data_src]
}
weight_tbl <- src_weights %>% as.tibble() %>%
`names<-`("weight") %>% rownames_to_column('data_src')
data_result %>% stats_by_category() %>%
map(inner_join, weight_tbl, by = "data_src") %>%
map(gather, "data_col", "stat_value",
-c(id, data_src, pos, weight)) %>%
bind_rows() %>% group_by(pos, id, data_col) %>%
summarise(robust = wilcox.loc(stat_value, na.rm = TRUE),
average = mean(stat_value, na.rm = TRUE ),
weighted = weighted.mean(stat_value, w = weight, na.rm = TRUE)) %>%
gather("avg_type", "stat_value", -c(id, pos, data_col))
}
#' Calculate Projected Points
#'
#' This function calculates the projected points for each player based on the
#' aggregated stats from the \link{aggregate_stats} function. The resulting table
#' contains the projected points, the position rank and the points drop-off for
#' each player.
#' @param agg_stats An output from the \link{aggregate_stats} function
#' @param scoring_rules The scoring rules to be used.
projected_points <- function(agg_stats, scoring_rules){
scoring_tbl <- make_scoring_tbl(scoring_rules)
dst_pt_allow <- NULL
if("dst" %in% names(scoring_rules))
dst_pt_allow <- scoring_rules[[c("dst", "dst_pts_allowed")]]
dst_bracket <- is.null(dst_pt_allow) & !is.null(scoring_rules$pts_bracket)
dst_src <- agg_stats %>% slice(0) %>% add_column(points = 0)
if(dst_bracket){
dst_src <- agg_stats %>% filter(data_col == "dst_pts_allowed") %>%
mutate(points = ffanalytics:::dst_points(stat_value, scoring_rules$pts_bracket))
}
dst_agg <- dst_src %>% slice(0)
if(dst_bracket){
dst_agg <- agg_stats %>% filter(data_col == "dst_pts_allowed") %>%
mutate(points = ffanalytics:::dst_points(stat_value, scoring_rules$pts_bracket))
}
agg_stats %>%
inner_join(scoring_tbl, by = c("pos", "data_col")) %>%
mutate(points = stat_value * points) %>%
bind_rows(dst_agg) %>%
group_by(pos, avg_type, id) %>%
summarise(points = if_else(all(is.na(points)), NA_real_, sum(points, na.rm = TRUE))) %>%
mutate(pos_rank = dense_rank(-points),
drop_off = points - (lead(points, order_by = pos_rank) +
lead(points, 2, order_by = pos_rank)) /2 ) %>%
ungroup()
}
#' Default VOR Baseline
#'
#' This is the default baseline that is used if not otherwise specified when
#' calculating VOR:
#' \code{c(QB = 13, RB = 35, WR = 36, TE = 13, K = 8, DST = 3, DL = 10, LB = 10, DB = 10)}
default_baseline <- c(QB = 13, RB = 35, WR = 36, TE = 13, K = 8, DST = 3, DL = 10, LB = 10, DB = 10)
#' Calculate VOR
#'
#' This function calculates the VOR based on an output from the \link{projected_points}
#' and if floor or ceiling VOR is requested with floor and ceiling added from the
#' \link{confidence_interval} function
#' @param points_table An output from the \link{projected_points} function and merged
#' with output from the the \link{projected_points} function and merged if floor or ceiling vor
#' is requested
#' @param vor_baseline The VOR Baseline to be used. If omitted then the
#' \link{default_baseline} will be used
#' @param vor_var One of \code{c("points", "floor", "ceiling")} indicating which
#' basis is used for the vor calculation
set_vor <- function(points_table, vor_baseline = NULL, vor_var = c("points", "floor", "ceiling")){
if(is.null(vor_baseline))
vor_baseline <- default_baseline
vor_var <- match.arg(vor_var)
vor_tbl <- select(points_table, "id", "pos", vor_var) %>%
rename(vor_var = !!vor_var) %>% group_by(pos) %>%
mutate(vor_rank = dense_rank(-vor_var), vor_base = vor_baseline[pos]) %>%
filter(vor_rank >= vor_base - 1 & vor_rank <= vor_base + 1) %>%
summarise(vor_base = mean(vor_var)) %>% ungroup() %>%
select(pos, vor_base) %>% inner_join(points_table, by = c("pos")) %>%
rename(vor_var = !!vor_var) %>%
mutate(vor = vor_var - vor_base,
rank = dense_rank(-vor), !!vor_var := vor_var) %>%
select(id, pos, vor, rank) %>% rename_if(is.numeric, funs(paste(vor_var, ., sep = "_"))) %>%
ungroup()
return(vor_tbl)
}
#' Calculate VOR for Points, Ceiling and Floor
#'
#' This function calculates VOR for projected points as well as the floor and
#' ceiling values.
#' @param tbl The output from the \link{projected_points} function that has
#' been merged with the output from he \link{confidence_interval} function
#' @param vor_baseline The VOR baseline values to be used. If omitted then the
#' \link{default_baseline} will be used
add_vor <- function(tbl, vor_baseline = NULL){
accumulate(c("points", "floor", "ceiling"),
~ inner_join(.x, set_vor(.x, vor_baseline, vor_var = .y),
by = c("id", "pos")),
.init = tbl)[[4]]
}
#' Default Threshold Values for Tiers
#'
#' These are the default threshold values used when applying Cohen's D values
#' to determine tiers:
#' \code{c(QB = 1, RB = 1, WR = 1, TE = 1, K = 1, DST = 0.1, DL = 1, DB = 1, LB = 1)}
default_threshold <- c(QB = 1, RB = 1, WR = 1, TE = 1, K = 1, DST = 0.1, DL = 1, DB = 1, LB = 1)
#' Determine Tiers by Position
#'
#' This function determines tiers for each position by applying Cohen's D effect
#' size
#' @param data_tbl An output from the \link{projected_points} function
#' @param d_threshold THe thresholds to use when applying Cohens'd D function to
#' determine the tiers. If omitted then the \link{default_threshold} will be used.
#' @param src_points An output from the \link{source_points} function
set_tiers <- function(data_tbl, d_threshold = NULL, src_points){
if(is.null(d_threshold))
d_threshold <- default_threshold
tier_tbl <- data_tbl %>% filter(pos %in% names(d_threshold)) %>%
mutate(dthres = d_threshold[pos], tier = ifelse(pos_rank == 1, 1L, NA))
repeat{
before_na <- sum(is.na(tier_tbl$tier))
tier_tbl <-
tier_tbl %>% group_by(pos) %>% filter(tier == tier[which.max(tier)]) %>%
summarise(tier_id = first(id, order_by = -points),
cur_tier = as.integer(max(tier, na.rm = TRUE)),
dthres= max(dthres, na.rm = TRUE)) %>%
inner_join(tier_tbl %>% group_by(pos) %>% filter(is.na(tier)) %>%
summarise(max_id = first(id, order_by = -points)), by = "pos") %>%
group_by(pos) %>%
mutate(d_val = cohens_d(src_points[src_points$id == tier_id,]$points,
src_points[src_points$id == max_id,]$points),
tier = ifelse(d_val > dthres, cur_tier + 1L, cur_tier)) %>%
select(pos, id = max_id, new_tier = tier) %>% right_join(tier_tbl, by = c("pos", "id")) %>%
mutate(tier = ifelse(is.na(tier) & !is.na(new_tier), new_tier, tier)) %>%
select(-new_tier)
after_na <- sum(is.na(tier_tbl$tier))
if(before_na == after_na | after_na == 0)
break
}
tier_tbl %>% select(-dthres) %>% ungroup()
}
#' Create a Projections Table
#'
#' This function creates the projections table based on the scraped data from the
#' \link{scrape_data} function. The output is a table containing the projected
#' points, confidence intervals, standard deviation for points, and if seasonal
#' data also the VOR values
#' @param data_result An output from the \link{scrape_data} function
#' @param scoring_rules The scoring rules to be used for calculations. See
#' \code{vignette("scoring_settings")} on how to define custom scoring settings.
#' If omitted then default \link{scoring} settings will be used.
#' @param src_weights A named vector defining the weights for each source to be
#' used in calculations. If omitted then \link{default_weights} will be used.
#' @param vor_baseline A named vector defineing the baseline to use for VOR
#' calculations. If omitted then the \link{default_baseline} will be used.
#' @param tier_thresholds The threshold values to be used when determining tiers.
#' If omitted then the \link{default_threshold} will be used.
#' @export
projections_table <- function(data_result, scoring_rules = NULL, src_weights = NULL,
vor_baseline = NULL, tier_thresholds = NULL){
if(is.null(scoring_rules))
scoring_rules <- scoring
if(scoring_rules$rec$all_pos){
lg_type <- scoring_rules$rec$rec %>% rep(length(data_result)) %>%
`names<-`(names(data_result)) %>%
map_chr(~ case_when(.x > 0.5 ~ "PPR", .x > 0 ~ "Half", TRUE ~ "Std"))
} else {
lg_type <- map(scoring_rules$rec[-which(names(scoring_rules$rec) == "all_pos")], `[[`, "rec") %>%
keep(~ !is.null(.x)) %>%
map_chr(~ case_when(.x > 0.5 ~ "PPR", .x > 0 ~ "Half", TRUE ~ "Std"))
lg_type[setdiff(names(data_result), names(lg_type))] < "Std"
}
data_list <- invoke_map(list(src_pts = source_points, agg_stats = aggregate_stats),
list(list(data_result = data_result, scoring_rules = scoring_rules),
list(data_result = data_result, src_weights = src_weights)))
pts_uncertainty <- invoke_map(list(points_sd, confidence_interval),
src_pts = data_list$src_pts, weights = src_weights) %>%
reduce(inner_join, by = c("pos", "id","avg_type"))
out_df<- data_list$agg_stats %>%
projected_points(scoring_rules) %>%
inner_join(pts_uncertainty, by = c("pos", "id","avg_type")) %>%
group_by(avg_type) %>%
set_tiers(tier_thresholds, data_list$src_pts ) %>%
ungroup()
if(attr(data_result, "week") == 0){
out_df <- out_df %>% split(.$avg_type) %>%
map(add_vor, vor_baseline = vor_baseline) %>% bind_rows() %>%
rename(rank = points_rank)
}
out_df %>%
`attr<-`(which = "season", attr(data_result, "season")) %>%
`attr<-`(which = "week", attr(data_result, "week")) %>%
`attr<-`(which = "lg_type", lg_type)
}
#' Add ECR to the Projection Table
#'
#' This function will add the ECR values to the projetions table generated from
#' the \link{projections_table} function. It will add the positional ECR, the
#' standard deviation for the positional ECR, and if seasonal data also the
#' overal ECR value
#' @param projection_table An output from the \link{projections_table} function.
#' @export
add_ecr <- function(projection_table){
lg_type <- attr(projection_table, "lg_type")
season <- attr(projection_table, "season")
week <- attr(projection_table, "week")
ecr_pos <- lg_type %>%
imap(~ scrape_ecr(rank_period = ifelse(week == 0, "draft", "week"),
position = .y, rank_type = .x)) %>%
map(select, id, pos_ecr = avg, sd_ecr = std_dev) %>% bind_rows()
projection_table <- left_join(projection_table, ecr_pos, by = "id")
if(week == 0){
lg_ov <- ifelse(any(lg_type == "PPR"), "PPR", ifelse(any(lg_type == "Half"), "Half", "Std"))
ecr_overall <- scrape_ecr(rank_period = "draft", rank_type = lg_ov) %>%
select(id, ecr = avg)
projection_table <- left_join(projection_table, ecr_overall, by = "id")
}
projection_table %>%
`attr<-`(which = "season", season) %>%
`attr<-`(which = "week", week) %>%
`attr<-`(which = "lg_type", lg_type)
}
#' Add ADP to the Projections Table
#'
#' This function will add the ADP data to the projections table from the
#' \link{projections_table} function. It will add the average ADP from the sources
#' specfied, and the difference between the overall rank and ADP
#' @param projection_table An output from the \link{projections_table} function
#' @param sources Which ADP sources should be added. should be one or more of
#' \code{c("RTS", "CBS", "ESPN", "Yahoo", "NFL", "FFC")}
#' @export
add_adp <- function(projection_table,
sources = c("RTS", "CBS", "ESPN", "Yahoo", "NFL", "FFC")){
sources <- match.arg(sources, several.ok = TRUE)
lg_type <- attr(projection_table, "lg_type")
season <- attr(projection_table, "season")
week <- attr(projection_table, "week")
if (week != 0){
warning("ADP data is not available for weekly data", call. = FALSE)
return(projection_table)
}
adp_tbl <- get_adp(sources, type = "ADP") %>% select(1, length(.)) %>%
rename_at(length(.), ~ function(x)return("adp"))
projection_table <- left_join(projection_table, adp_tbl, by = "id") %>%
mutate(adp_diff = rank - adp)
projection_table %>%
`attr<-`(which = "season", season) %>%
`attr<-`(which = "week", week) %>%
`attr<-`(which = "lg_type", lg_type)
}
#' Add AAV to the Projections Table
#'
#' This function will add the AAV data to the projections table from the
#' \link{projections_table} function.
#' @param projection_table An output from the \link{projections_table} function
#' @param sources Which AAV sources should be added. should be one or more of
#' \code{c("RTS", "ESPN", "Yahoo", "NFL")}
#' @export
add_aav <- function(projection_table,
sources = c("RTS", "ESPN", "Yahoo", "NFL")){
sources = match.arg(sources, several.ok = TRUE)
lg_type <- attr(projection_table, "lg_type")
season <- attr(projection_table, "season")
week <- attr(projection_table, "week")
if (week != 0){
warning("AAV data is not available for weekly data", call. = FALSE)
return(projection_table)
}
adp_tbl <- get_adp(sources, type = "AAV") %>% select(1, length(.)) %>%
rename_at(length(.), ~ function(x)return("aav"))
projection_table %>%
`attr<-`(which = "season", season) %>%
`attr<-`(which = "week", week) %>%
`attr<-`(which = "lg_type", lg_type)
}
#' Risk calculation based on two variables
#'
#' Calculation of risk is done by scaling the standard deviation variables
#' passed and averaging them before returning a measure with mean 5 and standard
#' deviation of 2
calculate_risk <- function(var1, var2){
var1 <- as.numeric(var1)
var2 <- as.numeric(var2)
Z_var1 <- scale(var1)
Z_var2 <- scale(var2)
Z_var1[is.na(Z_var1)] <- Z_var2[is.na(Z_var1)]
Z_var2[is.na(Z_var2)] <- Z_var1[is.na(Z_var2)]
risk_value <- 2 * scale(rowMeans(data.frame(Z_var1, Z_var2), na.rm=TRUE)) + 5
return(risk_value)
}
#' Add calculated risk to the table
#'
#' Calculation of risk is done by scaling the standard deviation variables
#' passed and averaging them before returning a measure with mean 5 and standard
#' deviation of 2
#' @export
add_risk <- function(projection_table){
lg_type <- attr(projection_table, "lg_type")
season <- attr(projection_table, "season")
week <- attr(projection_table, "week")
projection_table %>%
group_by(pos) %>%
# Calculate Risk values
mutate(risk = calculate_risk(sd_pts, sd_ecr)) %>%
ungroup() %>%
`attr<-`(which = "season", season) %>%
`attr<-`(which = "week", week) %>%
`attr<-`(which = "lg_type", lg_type)
}
#' Add player information to the table
#'
#' Adds player information to the projections table
#' @export
add_player_info <- function(projection_table){
lg_type <- attr(projection_table, "lg_type")
season <- attr(projection_table, "season")
week <- attr(projection_table, "week")
select(player_table, id, first_name, last_name, team, position, age, exp) %>%
inner_join(projection_table, by = "id") %>%
`attr<-`(which = "season", season) %>%
`attr<-`(which = "week", week) %>%
`attr<-`(which = "lg_type", lg_type)
}
|
/R/calc_projections.R
|
no_license
|
jeremiahpatrick/ffanalytics
|
R
| false
| false
| 22,524
|
r
|
#' Calculate Weighted Standard Deviation
#'
#' Function to calculate weighted standard deviation.
#' @param x The observations to calculate the standard deviations from
#' @param w The weights associated with each observation.
#' @param na.rm If \code{TRUE}, then NA values will be removed.
weighted.sd <- function(x, w, na.rm = FALSE){
sum.w <- sum(w, na.rm = na.rm)
sum.w2 <- sum(w^2, na.rm = na.rm)
mean.w <- sum(x * w,na.rm = na.rm) / sum(w, na.rm = na.rm)
x.sd.w <- sqrt((sum.w / (sum.w^2 - sum.w2)) * sum(w * (x - mean.w)^2))
return(x.sd.w)
}
#' Wilcox Location Parameter
#'
#' Modified function to calculate Wilcox' Location paramenter
wilcox.loc <- function(vec, na.rm = FALSE){
n <- length(vec)
# If number of observations is less than 2 then we just return mean as location estimate
if(n <= 2){
return(mean(vec, na.rm = na.rm))
}
# Calculating the paired avagerages
pairAvg <- sort(c(vec, combn(vec, 2, function(x)mean(x, na.rm = na.rm))))
return(median(pairAvg, na.rm = na.rm))
}
#' Cohen's d
#'
#' Function to calculate Cohen's D value when testing effect size
cohens_d <- function(x, y, na.rm = TRUE) {
if(na.rm){
x <- x[!is.na(x)]
y <- y[!is.na(y)]
}
n.x <- length(x)- 1
n.y <- length(y)- 1
mean.diff <- abs(mean(x) - mean(y))
if(n.x == 0 & n.y > 0) {
common.sd <- sqrt(n.y * var(y)/n.y)
} else if (n.x > 0 & n.y == 0){
common.sd <- sqrt(n.x * var(x)/n.x)
} else if (n.x > 0 & n.y > 0) {
common.sd <- sqrt((n.x * var(x) + n.y * var(y))/(n.x + n.y))
} else {
common.sd <- sd(c(x, y)) / 2
}
return(mean.diff/common.sd)
}
#' Default Weights for Projection Sources
#'
#' These are the weights that are used for each source when calculation weighted
#' averages and standard deviations if no weights are specified.
#' \code{c(CBS = 0.344, Yahoo = 0.400, ESPN = 0.329, NFL = 0.329,
#' FFToday = 0.379, NumberFire = 0.322, FantasyPros = 0.000,
#' FantasySharks = 0.327, FantasyFootballNerd = 0.000,
#' Walterfootball = 0.281, RTSports = 0.330,
#' FantasyData = 0.428, Fleaflicker = 0.428)}
default_weights <- c(CBS = 0.344, Yahoo = 0.400, ESPN = 0.329, NFL = 0.329,
FFToday = 0.379, NumberFire = 0.322, FantasyPros = 0.000,
FantasySharks= 0.327, FantasyFootballNerd = 0.000,
Walterfootball = 0.281, RTSports= 0.330,
FantasyData= 0.428, Fleaflicker = 0.428)
# Helper functions to calculate the quantiles and standard deviations for the
# source points. Used in the points_sd and confidence interval functions
quant_funcs <- list(average = quantile, robust = quantile,
weighted = purrr::possibly(Hmisc::wtd.quantile, c(`5%` = NaN, `95%` = NaN)))
quant_args <- list(list(probs = c(0.05, 0.95)), list(probs = c(0.05, 0.95)),
list(probs = c(0.05, 0.95), type = "i/n"))
get_quant <- function(pts, wt)invoke_map(quant_funcs, quant_args, x = pts, na.rm = TRUE, weights = wt)
sd_funcs <- list(average = function(x, w, na.rm)sd(x, na.rm = na.rm),
robust = function(x, w, na.rm)mad(x, na.rm = na.rm),
weighted = weighted.sd)
sd_args <- list(list(na.rm = TRUE), list(na.rm = TRUE), list(na.rm = TRUE))
get_sd <- function(pts, wt)invoke_map(sd_funcs, sd_args, x = pts, w = wt)
#' Calculate Source Points
#'
#' Function to calculate the projected points for each source.
#' @param data_result An output from the \link{scrape_data} function.
#' @param scoring_rules The scoring rules to be used.
source_points <- function(data_result, scoring_rules){
scoring_tbl <- make_scoring_tbl(scoring_rules)
long_result <- data_result %>%
stats_by_category() %>%
map(gather, "data_col", "stat_value", -c(id, data_src, pos)) %>%
bind_rows()
dst_pt_allow <- NULL
if("dst" %in% names(scoring_rules))
dst_pt_allow <- scoring_rules[[c("dst", "dst_pts_allowed")]]
dst_bracket <- is.null(dst_pt_allow) & !is.null(scoring_rules$pts_bracket)
dst_src <- long_result %>% slice(0) %>% add_column(points = 0)
if(dst_bracket){
dst_src <- long_result %>% filter(data_col == "dst_pts_allowed") %>%
mutate(points = ffanalytics:::dst_points(stat_value, scoring$pts_bracket))
}
long_result %>%
inner_join(scoring_tbl, by = c("pos", "data_col")) %>%
mutate(points = stat_value * points) %>%
bind_rows(dst_src) %>%
group_by(pos, data_src, id) %>%
summarise(points = sum(points, na.rm = TRUE)) %>% ungroup()
}
# Generate weights from a source points table if no weights are given
weights_from_src <- function(src_pts, weights = NULL){
if(is.null(weights)){
weights <- default_weights[unique(src_pts$data_src)]
}
weights %>% as.tibble() %>%
`names<-`("weight") %>% rownames_to_column('data_src')
}
#' Calculate Standard Deviations for Projected Points
#'
#' This function calculates the standard deviaion for projected points from
#' different sources
#' @param src_pts An output from the \link{source_points} function
#' @param weights A named vector with the weights from each source.
#' See \link{default_weights}
points_sd <- function(src_pts, weights = NULL){
weight_tbl <- weights_from_src(src_pts, weights)
src_pts %>% inner_join(weight_tbl, by = "data_src") %>%
group_by(id) %>%
mutate(n_obs = n(),
weight = if_else(n_obs == 1 & weight == 0, 1, weight)) %>%
ungroup() %>% select(-n_obs) %>%
split(src_pts$pos) %>% map(~ split(.x, .x$id)) %>%
modify_depth(2, ~ get_sd(.x$points, .x$weight)) %>% modify_depth(2, as.tibble) %>%
modify_depth(1, bind_rows, .id = "id") %>% bind_rows(.id = "pos") %>%
gather("avg_type", "sd_pts", -id, -pos)
}
#' Calculate the Upper and Lower Limits for Projected Points
#'
#' This function calculates the ceiling and floor for projected points from
#' different sources based on quantiles
#' @param src_pts An output from the \link{source_points} function
#' @param weights A named vector with the weights from each source.
#' See \link{default_weights}
confidence_interval <- function(src_pts, weights = NULL){
weight_tbl <- weights_from_src(src_pts, weights)
src_pts %>% inner_join(weight_tbl, by = "data_src") %>%
group_by(id) %>%
mutate(n_obs = n(),
weight = if_else(n_obs == 1 & weight == 0, 1, weight)) %>%
ungroup() %>% select(-n_obs) %>%
split(src_pts$pos) %>% map(~ split(.x, .x$id)) %>%
modify_depth(2, ~ get_quant(.x$points, .x$weight)) %>% modify_depth(3, t) %>%
modify_depth(3, as.tibble) %>% modify_depth(2, bind_rows, .id = "avg_type") %>%
modify_depth(1, bind_rows, .id = "id") %>% bind_rows(.id = "pos") %>%
mutate(`5%` = ifelse(is.na(`5%`),` 5%`, `5%`)) %>% select(-` 5%`) %>%
rename(floor = "5%", ceiling = "95%")
}
#' Aggregate Projected Stats
#'
#' This function aggregates the projected stats collected from each source with
#' the \link{scrape_data} function.
#' @param data_result An output from the \link{scrape_data} function.
#' @param src_weights A named vector with the weights from each source.
#' See \link{default_weights}
#' @export
aggregate_stats <- function(data_result, src_weights = NULL){
if(is.null(src_weights)){
data_src <- data_result %>% map(`[[`, "data_src") %>% reduce(union)
src_weights <- default_weights[data_src]
}
weight_tbl <- src_weights %>% as.tibble() %>%
`names<-`("weight") %>% rownames_to_column('data_src')
data_result %>% stats_by_category() %>%
map(inner_join, weight_tbl, by = "data_src") %>%
map(gather, "data_col", "stat_value",
-c(id, data_src, pos, weight)) %>%
bind_rows() %>% group_by(pos, id, data_col) %>%
summarise(robust = wilcox.loc(stat_value, na.rm = TRUE),
average = mean(stat_value, na.rm = TRUE ),
weighted = weighted.mean(stat_value, w = weight, na.rm = TRUE)) %>%
gather("avg_type", "stat_value", -c(id, pos, data_col))
}
#' Calculate Projected Points
#'
#' This function calculates the projected points for each player based on the
#' aggregated stats from the \link{aggregate_stats} function. The resulting table
#' contains the projected points, the position rank and the points drop-off for
#' each player.
#' @param agg_stats An output from the \link{aggregate_stats} function
#' @param scoring_rules The scoring rules to be used.
projected_points <- function(agg_stats, scoring_rules){
scoring_tbl <- make_scoring_tbl(scoring_rules)
dst_pt_allow <- NULL
if("dst" %in% names(scoring_rules))
dst_pt_allow <- scoring_rules[[c("dst", "dst_pts_allowed")]]
dst_bracket <- is.null(dst_pt_allow) & !is.null(scoring_rules$pts_bracket)
dst_src <- agg_stats %>% slice(0) %>% add_column(points = 0)
if(dst_bracket){
dst_src <- agg_stats %>% filter(data_col == "dst_pts_allowed") %>%
mutate(points = ffanalytics:::dst_points(stat_value, scoring_rules$pts_bracket))
}
dst_agg <- dst_src %>% slice(0)
if(dst_bracket){
dst_agg <- agg_stats %>% filter(data_col == "dst_pts_allowed") %>%
mutate(points = ffanalytics:::dst_points(stat_value, scoring_rules$pts_bracket))
}
agg_stats %>%
inner_join(scoring_tbl, by = c("pos", "data_col")) %>%
mutate(points = stat_value * points) %>%
bind_rows(dst_agg) %>%
group_by(pos, avg_type, id) %>%
summarise(points = if_else(all(is.na(points)), NA_real_, sum(points, na.rm = TRUE))) %>%
mutate(pos_rank = dense_rank(-points),
drop_off = points - (lead(points, order_by = pos_rank) +
lead(points, 2, order_by = pos_rank)) /2 ) %>%
ungroup()
}
#' Default VOR Baseline
#'
#' This is the default baseline that is used if not otherwise specified when
#' calculating VOR:
#' \code{c(QB = 13, RB = 35, WR = 36, TE = 13, K = 8, DST = 3, DL = 10, LB = 10, DB = 10)}
default_baseline <- c(QB = 13, RB = 35, WR = 36, TE = 13, K = 8, DST = 3, DL = 10, LB = 10, DB = 10)
#' Calculate VOR
#'
#' This function calculates the VOR based on an output from the \link{projected_points}
#' and if floor or ceiling VOR is requested with floor and ceiling added from the
#' \link{confidence_interval} function
#' @param points_table An output from the \link{projected_points} function and merged
#' with output from the the \link{projected_points} function and merged if floor or ceiling vor
#' is requested
#' @param vor_baseline The VOR Baseline to be used. If omitted then the
#' \link{default_baseline} will be used
#' @param vor_var One of \code{c("points", "floor", "ceiling")} indicating which
#' basis is used for the vor calculation
set_vor <- function(points_table, vor_baseline = NULL, vor_var = c("points", "floor", "ceiling")){
if(is.null(vor_baseline))
vor_baseline <- default_baseline
vor_var <- match.arg(vor_var)
vor_tbl <- select(points_table, "id", "pos", vor_var) %>%
rename(vor_var = !!vor_var) %>% group_by(pos) %>%
mutate(vor_rank = dense_rank(-vor_var), vor_base = vor_baseline[pos]) %>%
filter(vor_rank >= vor_base - 1 & vor_rank <= vor_base + 1) %>%
summarise(vor_base = mean(vor_var)) %>% ungroup() %>%
select(pos, vor_base) %>% inner_join(points_table, by = c("pos")) %>%
rename(vor_var = !!vor_var) %>%
mutate(vor = vor_var - vor_base,
rank = dense_rank(-vor), !!vor_var := vor_var) %>%
select(id, pos, vor, rank) %>% rename_if(is.numeric, funs(paste(vor_var, ., sep = "_"))) %>%
ungroup()
return(vor_tbl)
}
#' Calculate VOR for Points, Ceiling and Floor
#'
#' This function calculates VOR for projected points as well as the floor and
#' ceiling values.
#' @param tbl The output from the \link{projected_points} function that has
#' been merged with the output from he \link{confidence_interval} function
#' @param vor_baseline The VOR baseline values to be used. If omitted then the
#' \link{default_baseline} will be used
add_vor <- function(tbl, vor_baseline = NULL){
accumulate(c("points", "floor", "ceiling"),
~ inner_join(.x, set_vor(.x, vor_baseline, vor_var = .y),
by = c("id", "pos")),
.init = tbl)[[4]]
}
#' Default Threshold Values for Tiers
#'
#' These are the default threshold values used when applying Cohen's D values
#' to determine tiers:
#' \code{c(QB = 1, RB = 1, WR = 1, TE = 1, K = 1, DST = 0.1, DL = 1, DB = 1, LB = 1)}
default_threshold <- c(QB = 1, RB = 1, WR = 1, TE = 1, K = 1, DST = 0.1, DL = 1, DB = 1, LB = 1)
#' Determine Tiers by Position
#'
#' This function determines tiers for each position by applying Cohen's D effect
#' size
#' @param data_tbl An output from the \link{projected_points} function
#' @param d_threshold THe thresholds to use when applying Cohens'd D function to
#' determine the tiers. If omitted then the \link{default_threshold} will be used.
#' @param src_points An output from the \link{source_points} function
set_tiers <- function(data_tbl, d_threshold = NULL, src_points){
if(is.null(d_threshold))
d_threshold <- default_threshold
tier_tbl <- data_tbl %>% filter(pos %in% names(d_threshold)) %>%
mutate(dthres = d_threshold[pos], tier = ifelse(pos_rank == 1, 1L, NA))
repeat{
before_na <- sum(is.na(tier_tbl$tier))
tier_tbl <-
tier_tbl %>% group_by(pos) %>% filter(tier == tier[which.max(tier)]) %>%
summarise(tier_id = first(id, order_by = -points),
cur_tier = as.integer(max(tier, na.rm = TRUE)),
dthres= max(dthres, na.rm = TRUE)) %>%
inner_join(tier_tbl %>% group_by(pos) %>% filter(is.na(tier)) %>%
summarise(max_id = first(id, order_by = -points)), by = "pos") %>%
group_by(pos) %>%
mutate(d_val = cohens_d(src_points[src_points$id == tier_id,]$points,
src_points[src_points$id == max_id,]$points),
tier = ifelse(d_val > dthres, cur_tier + 1L, cur_tier)) %>%
select(pos, id = max_id, new_tier = tier) %>% right_join(tier_tbl, by = c("pos", "id")) %>%
mutate(tier = ifelse(is.na(tier) & !is.na(new_tier), new_tier, tier)) %>%
select(-new_tier)
after_na <- sum(is.na(tier_tbl$tier))
if(before_na == after_na | after_na == 0)
break
}
tier_tbl %>% select(-dthres) %>% ungroup()
}
#' Create a Projections Table
#'
#' This function creates the projections table based on the scraped data from the
#' \link{scrape_data} function. The output is a table containing the projected
#' points, confidence intervals, standard deviation for points, and if seasonal
#' data also the VOR values
#' @param data_result An output from the \link{scrape_data} function
#' @param scoring_rules The scoring rules to be used for calculations. See
#' \code{vignette("scoring_settings")} on how to define custom scoring settings.
#' If omitted then default \link{scoring} settings will be used.
#' @param src_weights A named vector defining the weights for each source to be
#' used in calculations. If omitted then \link{default_weights} will be used.
#' @param vor_baseline A named vector defineing the baseline to use for VOR
#' calculations. If omitted then the \link{default_baseline} will be used.
#' @param tier_thresholds The threshold values to be used when determining tiers.
#' If omitted then the \link{default_threshold} will be used.
#' @export
projections_table <- function(data_result, scoring_rules = NULL, src_weights = NULL,
vor_baseline = NULL, tier_thresholds = NULL){
if(is.null(scoring_rules))
scoring_rules <- scoring
if(scoring_rules$rec$all_pos){
lg_type <- scoring_rules$rec$rec %>% rep(length(data_result)) %>%
`names<-`(names(data_result)) %>%
map_chr(~ case_when(.x > 0.5 ~ "PPR", .x > 0 ~ "Half", TRUE ~ "Std"))
} else {
lg_type <- map(scoring_rules$rec[-which(names(scoring_rules$rec) == "all_pos")], `[[`, "rec") %>%
keep(~ !is.null(.x)) %>%
map_chr(~ case_when(.x > 0.5 ~ "PPR", .x > 0 ~ "Half", TRUE ~ "Std"))
lg_type[setdiff(names(data_result), names(lg_type))] < "Std"
}
data_list <- invoke_map(list(src_pts = source_points, agg_stats = aggregate_stats),
list(list(data_result = data_result, scoring_rules = scoring_rules),
list(data_result = data_result, src_weights = src_weights)))
pts_uncertainty <- invoke_map(list(points_sd, confidence_interval),
src_pts = data_list$src_pts, weights = src_weights) %>%
reduce(inner_join, by = c("pos", "id","avg_type"))
out_df<- data_list$agg_stats %>%
projected_points(scoring_rules) %>%
inner_join(pts_uncertainty, by = c("pos", "id","avg_type")) %>%
group_by(avg_type) %>%
set_tiers(tier_thresholds, data_list$src_pts ) %>%
ungroup()
if(attr(data_result, "week") == 0){
out_df <- out_df %>% split(.$avg_type) %>%
map(add_vor, vor_baseline = vor_baseline) %>% bind_rows() %>%
rename(rank = points_rank)
}
out_df %>%
`attr<-`(which = "season", attr(data_result, "season")) %>%
`attr<-`(which = "week", attr(data_result, "week")) %>%
`attr<-`(which = "lg_type", lg_type)
}
#' Add ECR to the Projection Table
#'
#' This function will add the ECR values to the projetions table generated from
#' the \link{projections_table} function. It will add the positional ECR, the
#' standard deviation for the positional ECR, and if seasonal data also the
#' overal ECR value
#' @param projection_table An output from the \link{projections_table} function.
#' @export
add_ecr <- function(projection_table){
lg_type <- attr(projection_table, "lg_type")
season <- attr(projection_table, "season")
week <- attr(projection_table, "week")
ecr_pos <- lg_type %>%
imap(~ scrape_ecr(rank_period = ifelse(week == 0, "draft", "week"),
position = .y, rank_type = .x)) %>%
map(select, id, pos_ecr = avg, sd_ecr = std_dev) %>% bind_rows()
projection_table <- left_join(projection_table, ecr_pos, by = "id")
if(week == 0){
lg_ov <- ifelse(any(lg_type == "PPR"), "PPR", ifelse(any(lg_type == "Half"), "Half", "Std"))
ecr_overall <- scrape_ecr(rank_period = "draft", rank_type = lg_ov) %>%
select(id, ecr = avg)
projection_table <- left_join(projection_table, ecr_overall, by = "id")
}
projection_table %>%
`attr<-`(which = "season", season) %>%
`attr<-`(which = "week", week) %>%
`attr<-`(which = "lg_type", lg_type)
}
#' Add ADP to the Projections Table
#'
#' This function will add the ADP data to the projections table from the
#' \link{projections_table} function. It will add the average ADP from the sources
#' specfied, and the difference between the overall rank and ADP
#' @param projection_table An output from the \link{projections_table} function
#' @param sources Which ADP sources should be added. should be one or more of
#' \code{c("RTS", "CBS", "ESPN", "Yahoo", "NFL", "FFC")}
#' @export
add_adp <- function(projection_table,
sources = c("RTS", "CBS", "ESPN", "Yahoo", "NFL", "FFC")){
sources <- match.arg(sources, several.ok = TRUE)
lg_type <- attr(projection_table, "lg_type")
season <- attr(projection_table, "season")
week <- attr(projection_table, "week")
if (week != 0){
warning("ADP data is not available for weekly data", call. = FALSE)
return(projection_table)
}
adp_tbl <- get_adp(sources, type = "ADP") %>% select(1, length(.)) %>%
rename_at(length(.), ~ function(x)return("adp"))
projection_table <- left_join(projection_table, adp_tbl, by = "id") %>%
mutate(adp_diff = rank - adp)
projection_table %>%
`attr<-`(which = "season", season) %>%
`attr<-`(which = "week", week) %>%
`attr<-`(which = "lg_type", lg_type)
}
#' Add AAV to the Projections Table
#'
#' This function will add the AAV data to the projections table from the
#' \link{projections_table} function.
#' @param projection_table An output from the \link{projections_table} function
#' @param sources Which AAV sources should be added. should be one or more of
#' \code{c("RTS", "ESPN", "Yahoo", "NFL")}
#' @export
add_aav <- function(projection_table,
sources = c("RTS", "ESPN", "Yahoo", "NFL")){
sources = match.arg(sources, several.ok = TRUE)
lg_type <- attr(projection_table, "lg_type")
season <- attr(projection_table, "season")
week <- attr(projection_table, "week")
if (week != 0){
warning("AAV data is not available for weekly data", call. = FALSE)
return(projection_table)
}
adp_tbl <- get_adp(sources, type = "AAV") %>% select(1, length(.)) %>%
rename_at(length(.), ~ function(x)return("aav"))
projection_table %>%
`attr<-`(which = "season", season) %>%
`attr<-`(which = "week", week) %>%
`attr<-`(which = "lg_type", lg_type)
}
#' Risk calculation based on two variables
#'
#' Calculation of risk is done by scaling the standard deviation variables
#' passed and averaging them before returning a measure with mean 5 and standard
#' deviation of 2
calculate_risk <- function(var1, var2){
var1 <- as.numeric(var1)
var2 <- as.numeric(var2)
Z_var1 <- scale(var1)
Z_var2 <- scale(var2)
Z_var1[is.na(Z_var1)] <- Z_var2[is.na(Z_var1)]
Z_var2[is.na(Z_var2)] <- Z_var1[is.na(Z_var2)]
risk_value <- 2 * scale(rowMeans(data.frame(Z_var1, Z_var2), na.rm=TRUE)) + 5
return(risk_value)
}
#' Add calculated risk to the table
#'
#' Calculation of risk is done by scaling the standard deviation variables
#' passed and averaging them before returning a measure with mean 5 and standard
#' deviation of 2
#' @export
add_risk <- function(projection_table){
lg_type <- attr(projection_table, "lg_type")
season <- attr(projection_table, "season")
week <- attr(projection_table, "week")
projection_table %>%
group_by(pos) %>%
# Calculate Risk values
mutate(risk = calculate_risk(sd_pts, sd_ecr)) %>%
ungroup() %>%
`attr<-`(which = "season", season) %>%
`attr<-`(which = "week", week) %>%
`attr<-`(which = "lg_type", lg_type)
}
#' Add player information to the table
#'
#' Adds player information to the projections table
#' @export
add_player_info <- function(projection_table){
lg_type <- attr(projection_table, "lg_type")
season <- attr(projection_table, "season")
week <- attr(projection_table, "week")
select(player_table, id, first_name, last_name, team, position, age, exp) %>%
inner_join(projection_table, by = "id") %>%
`attr<-`(which = "season", season) %>%
`attr<-`(which = "week", week) %>%
`attr<-`(which = "lg_type", lg_type)
}
|
/*
This program cannot pass, since b is not mutable, therefore it cannot be mutable borrowed
*/
f :=: fnTy('a ;
ref('a,imm,own(i32)),
own(i32) ;
ref('a,imm,own(i32)))
fun f(x,y)
newlft
let a = new(i32) in {
let b = new(i32) in {
let c = & mut a in
{
c := & mut b;
let d = a in
{x}
} }}
endlft
|
/tests_all/tytests/simpTesT/t4.r
|
no_license
|
awhite37/thesis
|
R
| false
| false
| 329
|
r
|
/*
This program cannot pass, since b is not mutable, therefore it cannot be mutable borrowed
*/
f :=: fnTy('a ;
ref('a,imm,own(i32)),
own(i32) ;
ref('a,imm,own(i32)))
fun f(x,y)
newlft
let a = new(i32) in {
let b = new(i32) in {
let c = & mut a in
{
c := & mut b;
let d = a in
{x}
} }}
endlft
|
# 2015-12-08
# Jake Yeung
# site count analysis on gene body to do motif enrichment
library(ggplot2)
library(mixtools)
library(dplyr)
library(hash)
source("scripts/functions/MixtureModelFunctions.R")
source("scripts/functions/FisherTestSitecounts.R")
source("scripts/functions/SitecountsFunctions.R")
# Functions ---------------------------------------------------------------
# Load --------------------------------------------------------------------
start <- Sys.time()
# N <- read.table("data/sitecounts/motevo_by_peaks_dhs_gene_bodies/merged.closest.bed", nrows = 10) # 30 GB
S <- read.table("/home/yeung/data/tissue_specificity/motevo_dhs/dhs_signal/dhs_signal_windows500.chr.sorted.closest.mat")
load("Robjs/fits.best.max_3.collapsed_models.amp_cutoff_0.15.phase_sd_maxdiff_avg.Robj")
sitecounts.dir <- "/home/yeung/data/tissue_specificity/motevo_dhs/closest_bed"
sitecounts.path <- "/home/yeung/data/tissue_specificity/motevo_dhs/closest_bed/RORA.closest.bed"
N.RORA <- ReadSitecountsMotif(sitecounts.path)
N.ONECUT <- ReadSitecountsMotif(file.path(sitecounts.dir, "ONECUT1,2.closest.bed"), show.time = TRUE)
# N.RORA <- read.table(sitecounts.path)
# Add colnames ------------------------------------------------------------
tissues <- c("Cere", "Heart", "Kidney", "Liver", "Lung", "Mus")
cnames <- c("chromo", "start", "end", tissues, "chromo.gene", "start.gene", "end.gene", "gene", "blank", "strand", "dist")
colnames(S) <- cnames
# Normalized dat ----------------------------------------------------------
for (tiss in tissues){
S[[tiss]] <- 10^6 * S[[tiss]] / sum(S[[tiss]])
}
signal.vec <- unlist(S[, colnames(S) %in% tissues])
S.long <- data.frame(chromo = S$chromo, start = S$start, end = S$end,
peak = paste(paste(S$chromo, S$start, sep = ":"), S$end, sep = "-"), # chr1:7800234-7800734
tissue = rep(tissues, each = nrow(S)),
signal = signal.vec,
gene = S$gene, dist = S$dist)
# Regions of interest -----------------------------------------------------
liver.genes <- subset(fits.best, model == "Liver")$gene
S.sub <- subset(S.long, gene %in% liver.genes)
# check for bias across tissues
ggplot(S.long[sample(x = 1:nrow(S.long), size = 0.01 * nrow(S.long)), ], aes(x = log10(signal))) + geom_density() + facet_wrap(~tissue)
# lets look at Celsr1 near chr15:85,959,964-85,964,941
jgene <- "Ube2u"
jchromo <- "chr15"
startmin <- 85959964
endmin <- 85964941
S.subsub <- subset(S.sub, gene == jgene)
N.sub.onecut <- subset(N.ONECUT, gene == jgene)
# N.sub <- subset(N.RORA, gene == jgene & start > startmin)
# N.sub <- subset(N.RORA, chromo == jchromo & start > startmin - 10000 & end < endmin + 10000)
N.sub <- subset(N.RORA, gene == jgene)
# show peaks
ggplot(S.subsub, aes(xmin = start, xmax = end, ymin = -0.5, ymax = 0.5, alpha = signal)) + geom_rect() +
geom_rect(aes(xmin = start, xmax = end, ymin = 0.75, ymax = 1.75, alpha = sitecount), data = N.sub.onecut) +
geom_rect(aes(xmin = start, xmax = end, ymin = 2, ymax = 3, alpha = sitecount), data = N.sub)
ggplot(N.sub, aes(xmin = start, xmax = end, ymin = -0.5, ymax = 0.5)) + geom_rect() + geom_rect(aes(xmin = start, xmax = end, ymin = 0.75, ymax = 1.75), data = N.sub.onecut)
# show peaks
# Enrichment time ---------------------------------------------------------
# Find cutoff
pseudo <- 1e-2
cutoff <- -2
ggplot(S.long[sample(x = 1:nrow(S.long), size = 0.01 * nrow(S.long)), ], aes(x = log2(signal + pseudo))) + geom_density() + facet_wrap(~tissue) + geom_vline(xintercept = cutoff)
test <- subset(S.long, tissue == "Heart")
jcut <- FindCutoffLong(test, jlambdas = c(0.7, 0.3), jmus = c(-4, 0), take.frac = 0.001, jshow.fig = TRUE)
print(Sys.time() - start)
# needs to be tissue-specific probably, try mixtools
S.tissuecutoff <- S.long %>%
group_by(tissue) %>%
do(FindCutoffLong(., jlambdas = c(0.7, 0.3), jmus = c(-4, 0), take.frac = 0.003))
# cool
ggplot(S.long[sample(x = 1:nrow(S.long), size = 0.01 * nrow(S.long)), ], aes(x = log2(signal + pseudo))) + geom_density() +
geom_vline(aes(xintercept = log2(cutoff)), data = S.tissuecutoff) + facet_wrap(~tissue)
# now do cutoffs: set DHS signals to 0 or 1
cutoffs.tiss <- hash(as.character(S.tissuecutoff$tissue), as.numeric(S.tissuecutoff$cutoff))
S.sub$signal.cut <- mapply(function(s, tiss){
cutoff.tiss <- cutoffs.tiss[[tiss]]
if (s >= cutoff.tiss){
return(1)
} else {
return(0)
}
}, S.sub$signal, as.character(S.sub$tissue))
# collapse into liver vs non-liver peaks
start <- Sys.time()
S.collapse <- S.sub %>%
group_by(gene, peak) %>%
do(CollapseDat(., indx = 4, tissue = "Liver", non.tissue = "Flat", flat.style = "all"))
print(Sys.time() - start)
# # add sitecount info
# N.RORA.sub <- subset(N.RORA, gene %in% liver.genes)
# N.ONECUT.sub <- subset(N.ONECUT, gene %in% liver.genes)
# collapse readcounts for RORA ONECUT
N.RORA.sub <- subset(N.RORA, gene %in% liver.genes & dist < 1000) %>%
group_by(motif, peak) %>%
summarise(sitecount = sum(sitecount))
N.ONECUT.sub <- subset(N.ONECUT, gene %in% liver.genes & dist < 1000) %>%
group_by(motif, peak) %>%
summarise(sitecount = sum(sitecount))
sitecounts.hash <- hash(as.character(N.RORA.sub$peak), N.RORA.sub$sitecount)
sitecounts.onecut.hash <- hash(as.character(N.ONECUT.sub$peak), N.ONECUT.sub$sitecount)
S.collapse$sitecount.rora <- sapply(S.collapse$peak, AssignSitecount, sitecounts.hash)
S.collapse$sitecount.onecut <- sapply(S.collapse$peak, AssignSitecount, sitecounts.onecut.hash)
FisherTestSitecounts(dat = S.collapse, cutoff = 0.5, sitecount.col = "sitecount.rora", model.col = "peak.type", show.table=TRUE)
FisherTestSitecounts(dat = S.collapse, cutoff = 0.5, sitecount.col = "sitecount.onecut", model.col = "peak.type", show.table=TRUE)
# Do I get RORA if I collapse the peaks assigned to a gene? ---------------
flat.peaks <- subset(S.collapse, peak.type == "Flat")$peak
liver.peaks <- subset(S.collapse, peak.type == "Liver")$peak
N.RORA.flat.gene <- subset(N.RORA, gene %in% liver.genes & dist < 1000 & peak %in% flat.peaks) %>%
group_by(motif, gene) %>%
summarise(sitecount = sum(sitecount))
N.RORA.flat.gene$peak.type <- "Flat"
N.RORA.liver.gene <- subset(N.RORA, gene %in% liver.genes & dist < 1000 & peak %in% liver.peaks) %>%
group_by(motif, gene) %>%
summarise(sitecount = sum(sitecount))
N.RORA.liver.gene$peak.type <- "Liver"
N.RORA.gene <- rbind(N.RORA.flat.gene, N.RORA.liver.gene)
N.ONECUT.flat.gene <- subset(N.ONECUT, gene %in% liver.genes & dist < 1000 & peak %in% flat.peaks) %>%
group_by(motif, gene) %>%
summarise(sitecount = sum(sitecount))
N.ONECUT.flat.gene$peak.type <- "Flat"
N.ONECUT.liver.gene <- subset(N.ONECUT, gene %in% liver.genes & dist < 1000 & peak %in% liver.peaks) %>%
group_by(motif, gene) %>%
summarise(sitecount = sum(sitecount))
N.ONECUT.liver.gene$peak.type <- "Liver"
N.ONECUT.gene <- rbind(N.ONECUT.flat.gene, N.ONECUT.liver.gene)
FisherTestSitecounts(dat = N.ONECUT.gene, cutoff = 1, sitecount.col = "sitecount", model.col = "peak.type", show.table=TRUE)
|
/scripts/sitecounts_analysis/sitecount_analysis_dhs_peak_gene_body.R
|
no_license
|
jakeyeung/Yeung_et_al_2018_TissueSpecificity
|
R
| false
| false
| 7,096
|
r
|
# 2015-12-08
# Jake Yeung
# site count analysis on gene body to do motif enrichment
library(ggplot2)
library(mixtools)
library(dplyr)
library(hash)
source("scripts/functions/MixtureModelFunctions.R")
source("scripts/functions/FisherTestSitecounts.R")
source("scripts/functions/SitecountsFunctions.R")
# Functions ---------------------------------------------------------------
# Load --------------------------------------------------------------------
start <- Sys.time()
# N <- read.table("data/sitecounts/motevo_by_peaks_dhs_gene_bodies/merged.closest.bed", nrows = 10) # 30 GB
S <- read.table("/home/yeung/data/tissue_specificity/motevo_dhs/dhs_signal/dhs_signal_windows500.chr.sorted.closest.mat")
load("Robjs/fits.best.max_3.collapsed_models.amp_cutoff_0.15.phase_sd_maxdiff_avg.Robj")
sitecounts.dir <- "/home/yeung/data/tissue_specificity/motevo_dhs/closest_bed"
sitecounts.path <- "/home/yeung/data/tissue_specificity/motevo_dhs/closest_bed/RORA.closest.bed"
N.RORA <- ReadSitecountsMotif(sitecounts.path)
N.ONECUT <- ReadSitecountsMotif(file.path(sitecounts.dir, "ONECUT1,2.closest.bed"), show.time = TRUE)
# N.RORA <- read.table(sitecounts.path)
# Add colnames ------------------------------------------------------------
tissues <- c("Cere", "Heart", "Kidney", "Liver", "Lung", "Mus")
cnames <- c("chromo", "start", "end", tissues, "chromo.gene", "start.gene", "end.gene", "gene", "blank", "strand", "dist")
colnames(S) <- cnames
# Normalized dat ----------------------------------------------------------
for (tiss in tissues){
S[[tiss]] <- 10^6 * S[[tiss]] / sum(S[[tiss]])
}
signal.vec <- unlist(S[, colnames(S) %in% tissues])
S.long <- data.frame(chromo = S$chromo, start = S$start, end = S$end,
peak = paste(paste(S$chromo, S$start, sep = ":"), S$end, sep = "-"), # chr1:7800234-7800734
tissue = rep(tissues, each = nrow(S)),
signal = signal.vec,
gene = S$gene, dist = S$dist)
# Regions of interest -----------------------------------------------------
liver.genes <- subset(fits.best, model == "Liver")$gene
S.sub <- subset(S.long, gene %in% liver.genes)
# check for bias across tissues
ggplot(S.long[sample(x = 1:nrow(S.long), size = 0.01 * nrow(S.long)), ], aes(x = log10(signal))) + geom_density() + facet_wrap(~tissue)
# lets look at Celsr1 near chr15:85,959,964-85,964,941
jgene <- "Ube2u"
jchromo <- "chr15"
startmin <- 85959964
endmin <- 85964941
S.subsub <- subset(S.sub, gene == jgene)
N.sub.onecut <- subset(N.ONECUT, gene == jgene)
# N.sub <- subset(N.RORA, gene == jgene & start > startmin)
# N.sub <- subset(N.RORA, chromo == jchromo & start > startmin - 10000 & end < endmin + 10000)
N.sub <- subset(N.RORA, gene == jgene)
# show peaks
ggplot(S.subsub, aes(xmin = start, xmax = end, ymin = -0.5, ymax = 0.5, alpha = signal)) + geom_rect() +
geom_rect(aes(xmin = start, xmax = end, ymin = 0.75, ymax = 1.75, alpha = sitecount), data = N.sub.onecut) +
geom_rect(aes(xmin = start, xmax = end, ymin = 2, ymax = 3, alpha = sitecount), data = N.sub)
ggplot(N.sub, aes(xmin = start, xmax = end, ymin = -0.5, ymax = 0.5)) + geom_rect() + geom_rect(aes(xmin = start, xmax = end, ymin = 0.75, ymax = 1.75), data = N.sub.onecut)
# show peaks
# Enrichment time ---------------------------------------------------------
# Find cutoff
pseudo <- 1e-2
cutoff <- -2
ggplot(S.long[sample(x = 1:nrow(S.long), size = 0.01 * nrow(S.long)), ], aes(x = log2(signal + pseudo))) + geom_density() + facet_wrap(~tissue) + geom_vline(xintercept = cutoff)
test <- subset(S.long, tissue == "Heart")
jcut <- FindCutoffLong(test, jlambdas = c(0.7, 0.3), jmus = c(-4, 0), take.frac = 0.001, jshow.fig = TRUE)
print(Sys.time() - start)
# needs to be tissue-specific probably, try mixtools
S.tissuecutoff <- S.long %>%
group_by(tissue) %>%
do(FindCutoffLong(., jlambdas = c(0.7, 0.3), jmus = c(-4, 0), take.frac = 0.003))
# cool
ggplot(S.long[sample(x = 1:nrow(S.long), size = 0.01 * nrow(S.long)), ], aes(x = log2(signal + pseudo))) + geom_density() +
geom_vline(aes(xintercept = log2(cutoff)), data = S.tissuecutoff) + facet_wrap(~tissue)
# now do cutoffs: set DHS signals to 0 or 1
cutoffs.tiss <- hash(as.character(S.tissuecutoff$tissue), as.numeric(S.tissuecutoff$cutoff))
S.sub$signal.cut <- mapply(function(s, tiss){
cutoff.tiss <- cutoffs.tiss[[tiss]]
if (s >= cutoff.tiss){
return(1)
} else {
return(0)
}
}, S.sub$signal, as.character(S.sub$tissue))
# collapse into liver vs non-liver peaks
start <- Sys.time()
S.collapse <- S.sub %>%
group_by(gene, peak) %>%
do(CollapseDat(., indx = 4, tissue = "Liver", non.tissue = "Flat", flat.style = "all"))
print(Sys.time() - start)
# # add sitecount info
# N.RORA.sub <- subset(N.RORA, gene %in% liver.genes)
# N.ONECUT.sub <- subset(N.ONECUT, gene %in% liver.genes)
# collapse readcounts for RORA ONECUT
N.RORA.sub <- subset(N.RORA, gene %in% liver.genes & dist < 1000) %>%
group_by(motif, peak) %>%
summarise(sitecount = sum(sitecount))
N.ONECUT.sub <- subset(N.ONECUT, gene %in% liver.genes & dist < 1000) %>%
group_by(motif, peak) %>%
summarise(sitecount = sum(sitecount))
sitecounts.hash <- hash(as.character(N.RORA.sub$peak), N.RORA.sub$sitecount)
sitecounts.onecut.hash <- hash(as.character(N.ONECUT.sub$peak), N.ONECUT.sub$sitecount)
S.collapse$sitecount.rora <- sapply(S.collapse$peak, AssignSitecount, sitecounts.hash)
S.collapse$sitecount.onecut <- sapply(S.collapse$peak, AssignSitecount, sitecounts.onecut.hash)
FisherTestSitecounts(dat = S.collapse, cutoff = 0.5, sitecount.col = "sitecount.rora", model.col = "peak.type", show.table=TRUE)
FisherTestSitecounts(dat = S.collapse, cutoff = 0.5, sitecount.col = "sitecount.onecut", model.col = "peak.type", show.table=TRUE)
# Do I get RORA if I collapse the peaks assigned to a gene? ---------------
flat.peaks <- subset(S.collapse, peak.type == "Flat")$peak
liver.peaks <- subset(S.collapse, peak.type == "Liver")$peak
N.RORA.flat.gene <- subset(N.RORA, gene %in% liver.genes & dist < 1000 & peak %in% flat.peaks) %>%
group_by(motif, gene) %>%
summarise(sitecount = sum(sitecount))
N.RORA.flat.gene$peak.type <- "Flat"
N.RORA.liver.gene <- subset(N.RORA, gene %in% liver.genes & dist < 1000 & peak %in% liver.peaks) %>%
group_by(motif, gene) %>%
summarise(sitecount = sum(sitecount))
N.RORA.liver.gene$peak.type <- "Liver"
N.RORA.gene <- rbind(N.RORA.flat.gene, N.RORA.liver.gene)
N.ONECUT.flat.gene <- subset(N.ONECUT, gene %in% liver.genes & dist < 1000 & peak %in% flat.peaks) %>%
group_by(motif, gene) %>%
summarise(sitecount = sum(sitecount))
N.ONECUT.flat.gene$peak.type <- "Flat"
N.ONECUT.liver.gene <- subset(N.ONECUT, gene %in% liver.genes & dist < 1000 & peak %in% liver.peaks) %>%
group_by(motif, gene) %>%
summarise(sitecount = sum(sitecount))
N.ONECUT.liver.gene$peak.type <- "Liver"
N.ONECUT.gene <- rbind(N.ONECUT.flat.gene, N.ONECUT.liver.gene)
FisherTestSitecounts(dat = N.ONECUT.gene, cutoff = 1, sitecount.col = "sitecount", model.col = "peak.type", show.table=TRUE)
|
page_kMeans <- function()
{
tabItem(
tabName = "kMeans",
sidebarLayout(
# ----- __sidebar -----
sidebarPanel = sidebarPanel(
titlePanel(
h1("k-Means")
),
tabsetPanel(
id = "kMeans_panel1",
# ----- ____Lean -----
tabPanel(
title = "Learn",
h4("Example of Centroid model"),
p("Assume that the elements of each cluster are",
"close from its centroid.")
),
# ----- ____Auto Run -----
tabPanel(
title = "Auto Run",
h4("How many clusters ? Which seed ?"),
p("In this kMeans version, the algorithm maximize",
" the silhouette score by selecting the ideal configuration",
" with the number of expected clusters between 1 and 8",
" and the seed between 1 and 20.")
),
# ----- ____Manual Run -----
tabPanel(
title = "Manual Run",
h4("Manual exploring"),
sliderTextInput(
inputId = "kMeans_nbCenters",
label = "Expected clusters",
choices = 1:8,
selected = 3,
grid = TRUE
),
sliderTextInput(
inputId = "kMeans_seed",
label = "kMeans seed",
choices = 1:20,
selected = 1,
animate = TRUE,
grid = TRUE
),
sliderTextInput(
inputId = "kMeans_myIterMax",
label = "Maximum of iterations",
choices = 1:20,
selected = 10,
grid = TRUE
)
)
)
),
# ----- __main -----
mainPanel = mainPanel(
fluidRow(
id = "bodyTitle",
column(
width = 2,
uiOutput(outputId = "kMeans_info",
inline = TRUE)
),
column(
width = 2,
uiOutput(outputId = "kMeans_nbCenters")
),
column(
width = 2,
uiOutput(outputId = "kMeans_silhouette")
),
column(
width = 2,
uiOutput(outputId = "kMeans_seed")
)
),
fluidRow(
column(
width = 6,
plotOutput(
height = "500px",
outputId = "kMeans_plot",
brush = brushOpts(id = "kMeans_brush", resetOnNew = FALSE)
)
),
column(
width = 6,
plotOutput(
height = "500px",
outputId = "kMeans_plot_2"
)
)
),
fluidRow(
column(
width = 2,
class = "myCheckbox square",
checkboxInput(
inputId = "kMeans_initCenters",
label = "Initial Centers",
value = TRUE
)
),
column(
width = 4,
class = "myCheckbox round",
checkboxInput(
inputId = "kMeans_finalCenters",
label = "Final Centers",
value = TRUE
)
),
column(
width = 6,
radioGroupButtons(
inputId = "kMeans_plotChoice",
label = "Plot Choice",
choices = c(
"Initial Clusters" = "init",
"Zoom" = "zoom",
"Index Heatmap" = "heatmap"
),
selected = "init",
checkIcon = list(
yes = tags$i(class = "fa fa-check-square",
style = "color: #d73925"),
no = tags$i(class = "fa fa-square",
style = "color: #d73925")
)
)
)
)
)
)
)
}
|
/Shiny/pages/kMeans.R
|
no_license
|
rvcoudert/ClusteringCatalog
|
R
| false
| false
| 3,927
|
r
|
page_kMeans <- function()
{
tabItem(
tabName = "kMeans",
sidebarLayout(
# ----- __sidebar -----
sidebarPanel = sidebarPanel(
titlePanel(
h1("k-Means")
),
tabsetPanel(
id = "kMeans_panel1",
# ----- ____Lean -----
tabPanel(
title = "Learn",
h4("Example of Centroid model"),
p("Assume that the elements of each cluster are",
"close from its centroid.")
),
# ----- ____Auto Run -----
tabPanel(
title = "Auto Run",
h4("How many clusters ? Which seed ?"),
p("In this kMeans version, the algorithm maximize",
" the silhouette score by selecting the ideal configuration",
" with the number of expected clusters between 1 and 8",
" and the seed between 1 and 20.")
),
# ----- ____Manual Run -----
tabPanel(
title = "Manual Run",
h4("Manual exploring"),
sliderTextInput(
inputId = "kMeans_nbCenters",
label = "Expected clusters",
choices = 1:8,
selected = 3,
grid = TRUE
),
sliderTextInput(
inputId = "kMeans_seed",
label = "kMeans seed",
choices = 1:20,
selected = 1,
animate = TRUE,
grid = TRUE
),
sliderTextInput(
inputId = "kMeans_myIterMax",
label = "Maximum of iterations",
choices = 1:20,
selected = 10,
grid = TRUE
)
)
)
),
# ----- __main -----
mainPanel = mainPanel(
fluidRow(
id = "bodyTitle",
column(
width = 2,
uiOutput(outputId = "kMeans_info",
inline = TRUE)
),
column(
width = 2,
uiOutput(outputId = "kMeans_nbCenters")
),
column(
width = 2,
uiOutput(outputId = "kMeans_silhouette")
),
column(
width = 2,
uiOutput(outputId = "kMeans_seed")
)
),
fluidRow(
column(
width = 6,
plotOutput(
height = "500px",
outputId = "kMeans_plot",
brush = brushOpts(id = "kMeans_brush", resetOnNew = FALSE)
)
),
column(
width = 6,
plotOutput(
height = "500px",
outputId = "kMeans_plot_2"
)
)
),
fluidRow(
column(
width = 2,
class = "myCheckbox square",
checkboxInput(
inputId = "kMeans_initCenters",
label = "Initial Centers",
value = TRUE
)
),
column(
width = 4,
class = "myCheckbox round",
checkboxInput(
inputId = "kMeans_finalCenters",
label = "Final Centers",
value = TRUE
)
),
column(
width = 6,
radioGroupButtons(
inputId = "kMeans_plotChoice",
label = "Plot Choice",
choices = c(
"Initial Clusters" = "init",
"Zoom" = "zoom",
"Index Heatmap" = "heatmap"
),
selected = "init",
checkIcon = list(
yes = tags$i(class = "fa fa-check-square",
style = "color: #d73925"),
no = tags$i(class = "fa fa-square",
style = "color: #d73925")
)
)
)
)
)
)
)
}
|
library(tidyverse)
library(dynbenchmark)
experiment("08-summary")
folder <- "../../dyndocs/funky_cover/data/"
# save all palettes
palettes <- tribble(
~palette, ~colours,
# blues palette
"overall", grDevices::colorRampPalette(rev(RColorBrewer::brewer.pal(9, "Greys")[-1]))(101),
"benchmark", grDevices::colorRampPalette(rev(RColorBrewer::brewer.pal(9, "Blues") %>% c("#011636")))(101),
"scaling", grDevices::colorRampPalette(rev(RColorBrewer::brewer.pal(9, "Reds")[-8:-9]))(101),
"stability", grDevices::colorRampPalette(rev(RColorBrewer::brewer.pal(9, "YlOrBr")[-7:-9]))(101),
"qc", grDevices::colorRampPalette(rev(RColorBrewer::brewer.pal(9, "Greens")[-1] %>% c("#00250f")))(101),
"white6black4", c(rep("white", 3), rep("black", 7)) %>% gplots::col2hex(),
"column_annotation", c(overall = "#555555", benchmark = "#4292c6", scaling = "#f6483a", stability = "#fe9929", qc = "#41ab5d")
)
jsonlite::write_json(palettes %>% deframe(), file.path(folder, "palettes.json"))
# save columns and groups
source(scripts_file("2-main_figure.R"))
source(scripts_file("2a_columns_all.R"))
data_sel <- data %>%
slice(1:10)
# select colums
column_infos_sel <- column_info %>%
filter(geom %in% c("bar", "funkyrect", "rect") | id == "method_name") %>%
select(-options, -name)
# create experiments from groups
group_infos_sel <- column_groups %>%
mutate(experiment = Experiment) %>%
mutate(color = map_chr(deframe(palettes)[palette], first)) %>%
select(-Experiment)
experiment_infos_sel <- group_infos_sel %>%
group_by(experiment) %>%
summarise(
palette = first(palette),
color = first(color)
)
column_infos_sel <- column_infos_sel %>%
left_join(group_infos_sel %>% select(experiment, group))
# precalculate column positions, and use those to precalculate positions of groups and experiments
column_infos_sel <- column_infos_sel %>%
mutate(
w = case_when(geom == "bar" ~ 4, TRUE ~ 1),
padding = as.integer(lag(group, default = first(group)) != group),
x = cumsum(lag(w, default = 0) + padding)
)
group_infos_sel <- column_infos_sel %>%
group_by(group) %>%
summarise(w = max(x + w) - min(x), x = min(x)) %>%
left_join(group_infos_sel) %>%
arrange(x)
experiment_infos_sel <- group_infos_sel %>%
group_by(experiment) %>%
summarise(w = max(x + w) - min(x), x = min(x)) %>%
left_join(experiment_infos_sel) %>%
arrange(x)
# determine row info
row_infos_sel <- tibble(
id = data_sel$id,
group = data_sel$method_most_complex_trajectory_type,
padding = as.integer(lag(group, default = first(group)) != group),
height = 1
) %>%
mutate(
y = cumsum(lag(height, default = 0) + padding),
y = max(y) - y,
z = cumsum(padding * 5),
z = max(z) - z
)
rowgroup_infos_sel <- row_infos_sel %>%
group_by(group) %>%
summarise(
y = min(y),
z = min(z),
height = sum(height)
) %>%
arrange(y)
column_infos_sel %>%
write_csv(file.path(folder, "column_infos.csv"))
group_infos_sel %>%
write_csv(file.path(folder, "group_infos.csv"))
experiment_infos_sel %>%
write_csv(file.path(folder, "experiment_infos.csv"))
row_infos_sel %>%
write_csv(file.path(folder, "row_infos.csv"))
rowgroup_infos_sel %>%
write_csv(file.path(folder, "rowgroup_infos.csv"))
# save actual data
data_sel <- data_sel %>%
select(column_infos_sel$id)
data_sel %>%
write_csv(file.path(folder, "data.csv"))
|
/scripts/08-summary/3-funky_cover.R
|
permissive
|
dynverse/dynbenchmark
|
R
| false
| false
| 3,398
|
r
|
library(tidyverse)
library(dynbenchmark)
experiment("08-summary")
folder <- "../../dyndocs/funky_cover/data/"
# save all palettes
palettes <- tribble(
~palette, ~colours,
# blues palette
"overall", grDevices::colorRampPalette(rev(RColorBrewer::brewer.pal(9, "Greys")[-1]))(101),
"benchmark", grDevices::colorRampPalette(rev(RColorBrewer::brewer.pal(9, "Blues") %>% c("#011636")))(101),
"scaling", grDevices::colorRampPalette(rev(RColorBrewer::brewer.pal(9, "Reds")[-8:-9]))(101),
"stability", grDevices::colorRampPalette(rev(RColorBrewer::brewer.pal(9, "YlOrBr")[-7:-9]))(101),
"qc", grDevices::colorRampPalette(rev(RColorBrewer::brewer.pal(9, "Greens")[-1] %>% c("#00250f")))(101),
"white6black4", c(rep("white", 3), rep("black", 7)) %>% gplots::col2hex(),
"column_annotation", c(overall = "#555555", benchmark = "#4292c6", scaling = "#f6483a", stability = "#fe9929", qc = "#41ab5d")
)
jsonlite::write_json(palettes %>% deframe(), file.path(folder, "palettes.json"))
# save columns and groups
source(scripts_file("2-main_figure.R"))
source(scripts_file("2a_columns_all.R"))
data_sel <- data %>%
slice(1:10)
# select colums
column_infos_sel <- column_info %>%
filter(geom %in% c("bar", "funkyrect", "rect") | id == "method_name") %>%
select(-options, -name)
# create experiments from groups
group_infos_sel <- column_groups %>%
mutate(experiment = Experiment) %>%
mutate(color = map_chr(deframe(palettes)[palette], first)) %>%
select(-Experiment)
experiment_infos_sel <- group_infos_sel %>%
group_by(experiment) %>%
summarise(
palette = first(palette),
color = first(color)
)
column_infos_sel <- column_infos_sel %>%
left_join(group_infos_sel %>% select(experiment, group))
# precalculate column positions, and use those to precalculate positions of groups and experiments
column_infos_sel <- column_infos_sel %>%
mutate(
w = case_when(geom == "bar" ~ 4, TRUE ~ 1),
padding = as.integer(lag(group, default = first(group)) != group),
x = cumsum(lag(w, default = 0) + padding)
)
group_infos_sel <- column_infos_sel %>%
group_by(group) %>%
summarise(w = max(x + w) - min(x), x = min(x)) %>%
left_join(group_infos_sel) %>%
arrange(x)
experiment_infos_sel <- group_infos_sel %>%
group_by(experiment) %>%
summarise(w = max(x + w) - min(x), x = min(x)) %>%
left_join(experiment_infos_sel) %>%
arrange(x)
# determine row info
row_infos_sel <- tibble(
id = data_sel$id,
group = data_sel$method_most_complex_trajectory_type,
padding = as.integer(lag(group, default = first(group)) != group),
height = 1
) %>%
mutate(
y = cumsum(lag(height, default = 0) + padding),
y = max(y) - y,
z = cumsum(padding * 5),
z = max(z) - z
)
rowgroup_infos_sel <- row_infos_sel %>%
group_by(group) %>%
summarise(
y = min(y),
z = min(z),
height = sum(height)
) %>%
arrange(y)
column_infos_sel %>%
write_csv(file.path(folder, "column_infos.csv"))
group_infos_sel %>%
write_csv(file.path(folder, "group_infos.csv"))
experiment_infos_sel %>%
write_csv(file.path(folder, "experiment_infos.csv"))
row_infos_sel %>%
write_csv(file.path(folder, "row_infos.csv"))
rowgroup_infos_sel %>%
write_csv(file.path(folder, "rowgroup_infos.csv"))
# save actual data
data_sel <- data_sel %>%
select(column_infos_sel$id)
data_sel %>%
write_csv(file.path(folder, "data.csv"))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ggcyto_GatingSet.R
\name{ggcyto.GatingSet}
\alias{ggcyto.GatingHierarchy}
\alias{ggcyto.GatingSet}
\alias{ggcyto.GatingSetList}
\title{Create a new ggcyto plot from a GatingSet}
\usage{
\method{ggcyto}{GatingSet}(data, mapping, subset = "_parent_", ...)
\method{ggcyto}{GatingSetList}(data, ...)
\method{ggcyto}{GatingHierarchy}(data, ...)
}
\arguments{
\item{data}{GatingSet to plot}
\item{mapping}{default list of aesthetic mappings (these can be colour,
size, shape, line type -- see individual geom functions for more details)}
\item{subset}{character that specifies the node path or node name in the GatingSet.
Default is "_parent_", which will be substitute with the actual node name
based on the geom_gate layer to be added later.}
\item{...}{ignored}
}
\value{
a ggcyto_GatingSet object which is a subclass of ggcyto_flowSet class.
}
\description{
Create a new ggcyto plot from a GatingSet
}
\examples{
dataDir <- system.file("extdata",package="flowWorkspaceData")
gs <- load_gs(list.files(dataDir, pattern = "gs_manual",full = TRUE))
# 2d plot
ggcyto(gs, aes(x = CD4, y = CD8), subset = "CD3+") + geom_hex(bins = 64)
# 1d plot
ggcyto(gs, aes(x = CD4), subset = "CD3+") + geom_density()
}
|
/man/ggcyto.GatingSet.Rd
|
no_license
|
cristhomas/ggcyto
|
R
| false
| true
| 1,288
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ggcyto_GatingSet.R
\name{ggcyto.GatingSet}
\alias{ggcyto.GatingHierarchy}
\alias{ggcyto.GatingSet}
\alias{ggcyto.GatingSetList}
\title{Create a new ggcyto plot from a GatingSet}
\usage{
\method{ggcyto}{GatingSet}(data, mapping, subset = "_parent_", ...)
\method{ggcyto}{GatingSetList}(data, ...)
\method{ggcyto}{GatingHierarchy}(data, ...)
}
\arguments{
\item{data}{GatingSet to plot}
\item{mapping}{default list of aesthetic mappings (these can be colour,
size, shape, line type -- see individual geom functions for more details)}
\item{subset}{character that specifies the node path or node name in the GatingSet.
Default is "_parent_", which will be substitute with the actual node name
based on the geom_gate layer to be added later.}
\item{...}{ignored}
}
\value{
a ggcyto_GatingSet object which is a subclass of ggcyto_flowSet class.
}
\description{
Create a new ggcyto plot from a GatingSet
}
\examples{
dataDir <- system.file("extdata",package="flowWorkspaceData")
gs <- load_gs(list.files(dataDir, pattern = "gs_manual",full = TRUE))
# 2d plot
ggcyto(gs, aes(x = CD4, y = CD8), subset = "CD3+") + geom_hex(bins = 64)
# 1d plot
ggcyto(gs, aes(x = CD4), subset = "CD3+") + geom_density()
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Illig_data.R
\docType{data}
\name{Illig_data}
\alias{Illig_data}
\title{Illig_data}
\format{A dataframe with 187 rows and 9 columns.}
\usage{
data(Illig_data)
}
\description{
Illig_data contains 187 CpGs required to calcualte smoking score based on Elliot et al approach
}
\examples{
data(Illig_data)
head(Illig_data)
}
\references{
Elliott HR, Tillin T, McArdle WL, et al. Differences in smoking associated DNA methylation patterns in
South Asians and Europeans. Clinical Epigenetics. 2014;6(1):4.PMID: 24485148.
\href{https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3915234/}{PMC}
}
\keyword{datasets}
|
/man/Illig_data.Rd
|
no_license
|
spurthy111/EpiSmokEr
|
R
| false
| true
| 679
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Illig_data.R
\docType{data}
\name{Illig_data}
\alias{Illig_data}
\title{Illig_data}
\format{A dataframe with 187 rows and 9 columns.}
\usage{
data(Illig_data)
}
\description{
Illig_data contains 187 CpGs required to calcualte smoking score based on Elliot et al approach
}
\examples{
data(Illig_data)
head(Illig_data)
}
\references{
Elliott HR, Tillin T, McArdle WL, et al. Differences in smoking associated DNA methylation patterns in
South Asians and Europeans. Clinical Epigenetics. 2014;6(1):4.PMID: 24485148.
\href{https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3915234/}{PMC}
}
\keyword{datasets}
|
# NOTE: The functions in this class are just templates that are to be implemented for all subclasses of BayesianOutput. They are not functional.
#' Extracts the sample from a bayesianOutput
#' @author Florian Hartig
#' @param sampler an object of class mcmcSampler, mcmcSamplerList, smcSampler, smcSamplerList, mcmc, mcmc.list, double, numeric
#' @param parametersOnly if F, likelihood, posterior and prior values are also provided in the output
#' @param coda works only for mcmc classes - provides output as a coda object. Note: if mcmcSamplerList contains mcmc samplers such as DE that have several chains, the internal chains will be collapsed. This may not be the desired behavior for all applications.
#' @param start for mcmc samplers start value in the chain. For SMC samplers, start particle
#' @param end for mcmc samplers end value in the chain. For SMC samplers, end particle
#' @param thin thinning parameter. Either an integer determining the thinning intervall (default is 1) or "auto" for automatic thinning.
#' @param numSamples sample size (only used if thin = 1). If you want to use numSamples set thin to 1.
#' @param whichParameters possibility to select parameters by index
#' @param includesProbabilities applies only to getSample.Matrix. logical, determining whether probabilities should be included in the result.
#' @param reportDiagnostics logical, determines whether settings should be included in the output
#' @param ... further arguments
#' @example /inst/examples/getSampleHelp.R
#' @details If thin is greater than the total number of samples in the sampler object the first and the last element (of each chain if a sampler with multiples chains is used) are sampled. If numSamples is greater than the total number of samples all samples are selected. In both cases a warning is displayed.
#' @details If thin and numSamples is passed, the function will use the thin argument if it is valid and greater than 1, else numSamples will be used.
#' @export
getSample <- function(sampler, parametersOnly = T, coda = F, start = 1, end = NULL, thin = 1, numSamples = NULL, whichParameters = NULL, includesProbabilities = F, reportDiagnostics = FALSE, ...) UseMethod("getSample")
# TODO: here we have to check many times if the object is a matrix to
# cover edge cases (single row/col). Maybe this should be restructured
#' @rdname getSample
#' @author Florian Hartig
#' @export
getSample.matrix <- function(sampler, parametersOnly = T, coda = F, start = 1, end = NULL, thin = "auto", numSamples = NULL, whichParameters = NULL, includesProbabilities = F, reportDiagnostics = F, ...){
if(is.null(end)) end = nrow(sampler)
if(includesProbabilities) nPars = ncol(sampler) - 3 else nPars = ncol(sampler)
if(parametersOnly == T | includesProbabilities == F) {
out = sampler[start:end,1:nPars]
if(class(out)[1] == "numeric") out = as.matrix(sampler) # case 1 parameter
} else {
out = out[start:end,]
#if(!is.null(sampler$setup$names)) colnames(out) = c(sampler$setup$names, "Lposterior", "Llikelihood", "Lprior")
}
if (!is.matrix(out)) {
out <- matrix(out, ncol = nPars)
}
########################
# THINNING
nTotalSamples <- nrow(out)
thin <- correctThin(nTotalSamples, thin = thin)
if (thin == 1 && !is.null(numSamples)) {
out <- sampleEquallySpaced(out, numSamples)
} else {
sel = seq(1, nTotalSamples, by = thin)
out = out[sel,]
if (!is.matrix(out)) out <- matrix(out, ncol = nPars)
}
# if (thin == "auto"){
# thin = max(floor(nrow(out) / 5000), 1)
# }
# if(is.null(thin) || thin == F || thin < 1 || is.nan(thin)) thin = 1
# if (thin > nrow(sampler)) warning("thin is greater than the total number of samples!")
# if (! thin == 1){
# sel = seq(1,dim(out)[1], by = thin )
# out = out[sel,]
# }
#############
if (!is.null(whichParameters)) out = out[,whichParameters]
if(coda == T) out = makeObjectClassCodaMCMC(out, start = start, end = end, thin = thin)
if(reportDiagnostics == T){
return(list(chain = out, start = start, end = end, thin = thin))
} else return(out)
}
#' @rdname getSample
#' @author Tankred Ott
#' @export
# TODO: This is right now only a helper function for getSample.mcmc. It is needed to return a vector istead of a matrix, if
# the mcmc object passed to getSample.mcmc contains a vector.
getSample.double <- function(sampler, parametersOnly = T, coda = F, start = 1, end = NULL, thin = "auto", numSamples = NULL, whichParameters = NULL, includesProbabilities = F, reportDiagnostics = F, ...){
if(is.null(end)) end = length(sampler)
out <- sampler[start:end]
nTotalSamples <- length(out)
thin = correctThin(nTotalSamples, thin)
if (thin == 1 && !is.null(numSamples)) {
out <- sampleEquallySpaced(out, numSamples)
} else {
sel = seq(1, nTotalSamples, by = thin)
out = out[sel]
}
return(out)
}
#' @rdname getSample
#' @author Tankred Ott
#' @export
# TODO: This is right now only a helper function for getSample.mcmc. It is needed to return a vector instead of a matrix, if
# the mcmc object passed to getSample.mcmc contains a vector.
getSample.integer <- function(sampler, parametersOnly = T, coda = F, start = 1, end = NULL, thin = "auto", numSamples = NULL, whichParameters = NULL, includesProbabilities = F, reportDiagnostics = F, ...){
if(is.null(end)) end = length(sampler)
out <- sampler[start:end]
nTotalSamples <- length(out)
thin = correctThin(nTotalSamples, thin)
if (thin == 1 && !is.null(numSamples)) {
out <- sampleEquallySpaced(out, numSamples)
} else {
sel = seq(1, nTotalSamples, by = thin)
out = out[sel]
}
return(out)
}
#' @rdname getSample
#' @author Tankred Ott
#' @export
getSample.data.frame <- function(sampler, parametersOnly = T, coda = F, start = 1, end = NULL, thin = "auto", numSamples = NULL, whichParameters = NULL, includesProbabilities = F, reportDiagnostics = F, ...){
getSample(as.matrix(sampler), parametersOnly = parametersOnly, coda = coda, start = start, end = end, thin = thin, whichParameters = whichParameters, includesProbabilities = includesProbabilities, reportDiagnostics = reportDiagnostics)
}
#' @rdname getSample
#' @author Tankred Ott
#' @export
getSample.list <- function(sampler, parametersOnly = T, coda = F, start = 1, end = NULL, thin = "auto", numSamples = NULL, whichParameters = NULL, includesProbabilities = F, reportDiagnostics = F, ...){
if(!is.null(numSamples)) numSamples = ceiling(numSamples/length(sampler))
if(coda == F){
# out = NULL
out <- rep(list(NA), length(sampler))
for (i in 1:length(sampler)){
# out = rbind(out, getSample(sampler[[i]], parametersOnly = parametersOnly, coda = coda, start = start, end = end, thin = thin, numSamples = numSamples, whichParameters = whichParameters, reportDiagnostics= F))
out[[i]] <- getSample(sampler[[i]], parametersOnly = parametersOnly, coda = coda, start = start, end = end, thin = thin, numSamples = numSamples, whichParameters = whichParameters, reportDiagnostics= F)
}
out <- combineChains(out)
}
if(coda == T){
out = list()
for (i in 1:length(sampler)){
out[[i]] = getSample(sampler[[i]], parametersOnly = parametersOnly, coda = coda, start = start, end = end, thin = thin, numSamples = numSamples, whichParameters = whichParameters, reportDiagnostics= F)
}
if(class(out[[1]]) == "mcmc.list") out = unlist(out, recursive = F)
class(out) = "mcmc.list"
out = out
}
return(out)
}
# The following two S3 implementations make getSample compatible with coda::mcmc and coda::mcmc.list
#' @rdname getSample
#' @author Tankred Ott
#' @export
getSample.mcmc <- function(sampler, parametersOnly = T, coda = F, start = 1, end = NULL, thin = "auto", numSamples = NULL, whichParameters = NULL, includesProbabilities = F, reportDiagnostics = F, ...){
if(coda == T){
# mcmc objects can contain matrices or vectors
if (is.matrix(sampler)) {
nTotalSamples <- nrow(sampler)
} else {
nTotalSamples <- length(sampler)
}
if (is.null(end)) end = nTotalSamples
# check/correct thin
thin <- correctThin(nTotalSamples, thin)
# see http://svitsrv25.epfl.ch/R-doc/library/coda/html/window.mcmc.html
# for coda's window implementation
return(window(sampler, start = start, end = end, thin = thin))
} else if(coda == F){
# mcmc objects can contain matrices or vectors
if (is.matrix(sampler)) {
out <- getSample(as.matrix(sampler), parametersOnly = parametersOnly, coda = coda, start = start, end = end, thin = thin, numSamples = numSamples, whichParameters = whichParameters, includesProbabilities = includesProbabilities, reportDiagnostics = reportDiagnostics)
} else {
out <- getSample(as.vector(sampler), parametersOnly = parametersOnly, coda = coda, start = start, end = end, thin = thin, numSamples = numSamples, whichParameters = whichParameters, includesProbabilities = includesProbabilities, reportDiagnostics = reportDiagnostics)
}
return(out)
}
}
#' @author Tankred Ott
#' @rdname getSample
#' @export
getSample.mcmc.list <- function(sampler, parametersOnly = T, coda = F, start = 1, end = NULL, thin = "auto", numSamples = NULL, whichParameters = NULL, includesProbabilities = F, reportDiagnostics = F, ...){
# TODO: implement handling of wrong inputs?
if(coda == T){
if (is.matrix(sampler[[1]])) {
nTotalSamples <- nrow(sampler[[1]])
} else {
nTotalSamples <- length(sampler[[1]])
}
if (is.null(end)) end = nTotalSamples
# check/correct thin
thin <- correctThin(nTotalSamples, thin)
# see http://svitsrv25.epfl.ch/R-doc/library/coda/html/window.mcmc.html
# for coda's window implementation
return(window(sampler, start = start, end = end, thin = thin))
} else if(coda == F){
if(is.matrix(sampler[[1]])) {
return(getSample(combineChains(sampler), parametersOnly = parametersOnly, coda = coda, start = start, end = end, thin = thin, numSamples = numSamples, whichParameters = whichParameters, includesProbabilities = includesProbabilities, reportDiagnostics = reportDiagnostics))
} else {
return(as.vector(getSample(combineChains(sampler), parametersOnly = parametersOnly, coda = coda, start = start, end = end, thin = thin, numSamples = numSamples, whichParameters = whichParameters, includesProbabilities = includesProbabilities, reportDiagnostics = reportDiagnostics)))
}
}
}
# getSample implementation for nimble objects of class MCMC
#' @rdname getSample
#' @author Tankred Ott
#' @export
getSample.MCMC <- function(sampler, parametersOnly = T, coda = F, start = 1, end = NULL, thin = "auto", numSamples = NULL, whichParameters = NULL, includesProbabilities = F, reportDiagnostics = F, ...){
return(getSample(as.matrix(sampler$mvSamples), parametersOnly = parametersOnly, coda = coda, start = start, end = end, thin = thin, numSamples = numSamples, whichParameters = whichParameters, includesProbabilities = includesProbabilities, reportDiagnostics = reportDiagnostics))
}
#' @rdname getSample
#' @author Tankred Ott
#' @export
getSample.MCMC_refClass <- function(sampler, parametersOnly = T, coda = F, start = 1, end = NULL, thin = "auto", numSamples = NULL, whichParameters = NULL, includesProbabilities = F, reportDiagnostics = F, ...){
return(getSample(as.matrix(sampler$mvSamples), parametersOnly = parametersOnly, coda = coda, start = start, end = end, thin = thin, numSamples = numSamples, whichParameters = whichParameters, includesProbabilities = includesProbabilities, reportDiagnostics = reportDiagnostics))
}
#' Merge Chains
#'
#' Merge a list of MCMCs or chains
#'
#' The function merges a list of MCMC objects. Requirement is that the list contains classes for which the getSample function works
#'
#' @param l the list with MCMC outputs
#' @param ... arguments to be passed on to getSample
#'
#' @return a matrix
#'
#' @author Florian Hartig
#'
#' @export
#'
mergeChains <- function(l, ...){
x = getSample(l[[1]], ...)
for(i in 2:length(l)){
x = rbind(x, getSample(l[[i]], ...))
}
return(x)
}
|
/BayesianTools/R/classBayesianOutput.R
|
no_license
|
akhikolla/ClusterTests
|
R
| false
| false
| 12,343
|
r
|
# NOTE: The functions in this class are just templates that are to be implemented for all subclasses of BayesianOutput. They are not functional.
#' Extracts the sample from a bayesianOutput
#' @author Florian Hartig
#' @param sampler an object of class mcmcSampler, mcmcSamplerList, smcSampler, smcSamplerList, mcmc, mcmc.list, double, numeric
#' @param parametersOnly if F, likelihood, posterior and prior values are also provided in the output
#' @param coda works only for mcmc classes - provides output as a coda object. Note: if mcmcSamplerList contains mcmc samplers such as DE that have several chains, the internal chains will be collapsed. This may not be the desired behavior for all applications.
#' @param start for mcmc samplers start value in the chain. For SMC samplers, start particle
#' @param end for mcmc samplers end value in the chain. For SMC samplers, end particle
#' @param thin thinning parameter. Either an integer determining the thinning intervall (default is 1) or "auto" for automatic thinning.
#' @param numSamples sample size (only used if thin = 1). If you want to use numSamples set thin to 1.
#' @param whichParameters possibility to select parameters by index
#' @param includesProbabilities applies only to getSample.Matrix. logical, determining whether probabilities should be included in the result.
#' @param reportDiagnostics logical, determines whether settings should be included in the output
#' @param ... further arguments
#' @example /inst/examples/getSampleHelp.R
#' @details If thin is greater than the total number of samples in the sampler object the first and the last element (of each chain if a sampler with multiples chains is used) are sampled. If numSamples is greater than the total number of samples all samples are selected. In both cases a warning is displayed.
#' @details If thin and numSamples is passed, the function will use the thin argument if it is valid and greater than 1, else numSamples will be used.
#' @export
getSample <- function(sampler, parametersOnly = T, coda = F, start = 1, end = NULL, thin = 1, numSamples = NULL, whichParameters = NULL, includesProbabilities = F, reportDiagnostics = FALSE, ...) UseMethod("getSample")
# TODO: here we have to check many times if the object is a matrix to
# cover edge cases (single row/col). Maybe this should be restructured
#' @rdname getSample
#' @author Florian Hartig
#' @export
getSample.matrix <- function(sampler, parametersOnly = T, coda = F, start = 1, end = NULL, thin = "auto", numSamples = NULL, whichParameters = NULL, includesProbabilities = F, reportDiagnostics = F, ...){
if(is.null(end)) end = nrow(sampler)
if(includesProbabilities) nPars = ncol(sampler) - 3 else nPars = ncol(sampler)
if(parametersOnly == T | includesProbabilities == F) {
out = sampler[start:end,1:nPars]
if(class(out)[1] == "numeric") out = as.matrix(sampler) # case 1 parameter
} else {
out = out[start:end,]
#if(!is.null(sampler$setup$names)) colnames(out) = c(sampler$setup$names, "Lposterior", "Llikelihood", "Lprior")
}
if (!is.matrix(out)) {
out <- matrix(out, ncol = nPars)
}
########################
# THINNING
nTotalSamples <- nrow(out)
thin <- correctThin(nTotalSamples, thin = thin)
if (thin == 1 && !is.null(numSamples)) {
out <- sampleEquallySpaced(out, numSamples)
} else {
sel = seq(1, nTotalSamples, by = thin)
out = out[sel,]
if (!is.matrix(out)) out <- matrix(out, ncol = nPars)
}
# if (thin == "auto"){
# thin = max(floor(nrow(out) / 5000), 1)
# }
# if(is.null(thin) || thin == F || thin < 1 || is.nan(thin)) thin = 1
# if (thin > nrow(sampler)) warning("thin is greater than the total number of samples!")
# if (! thin == 1){
# sel = seq(1,dim(out)[1], by = thin )
# out = out[sel,]
# }
#############
if (!is.null(whichParameters)) out = out[,whichParameters]
if(coda == T) out = makeObjectClassCodaMCMC(out, start = start, end = end, thin = thin)
if(reportDiagnostics == T){
return(list(chain = out, start = start, end = end, thin = thin))
} else return(out)
}
#' @rdname getSample
#' @author Tankred Ott
#' @export
# TODO: This is right now only a helper function for getSample.mcmc. It is needed to return a vector istead of a matrix, if
# the mcmc object passed to getSample.mcmc contains a vector.
getSample.double <- function(sampler, parametersOnly = T, coda = F, start = 1, end = NULL, thin = "auto", numSamples = NULL, whichParameters = NULL, includesProbabilities = F, reportDiagnostics = F, ...){
if(is.null(end)) end = length(sampler)
out <- sampler[start:end]
nTotalSamples <- length(out)
thin = correctThin(nTotalSamples, thin)
if (thin == 1 && !is.null(numSamples)) {
out <- sampleEquallySpaced(out, numSamples)
} else {
sel = seq(1, nTotalSamples, by = thin)
out = out[sel]
}
return(out)
}
#' @rdname getSample
#' @author Tankred Ott
#' @export
# TODO: This is right now only a helper function for getSample.mcmc. It is needed to return a vector instead of a matrix, if
# the mcmc object passed to getSample.mcmc contains a vector.
getSample.integer <- function(sampler, parametersOnly = T, coda = F, start = 1, end = NULL, thin = "auto", numSamples = NULL, whichParameters = NULL, includesProbabilities = F, reportDiagnostics = F, ...){
if(is.null(end)) end = length(sampler)
out <- sampler[start:end]
nTotalSamples <- length(out)
thin = correctThin(nTotalSamples, thin)
if (thin == 1 && !is.null(numSamples)) {
out <- sampleEquallySpaced(out, numSamples)
} else {
sel = seq(1, nTotalSamples, by = thin)
out = out[sel]
}
return(out)
}
#' @rdname getSample
#' @author Tankred Ott
#' @export
getSample.data.frame <- function(sampler, parametersOnly = T, coda = F, start = 1, end = NULL, thin = "auto", numSamples = NULL, whichParameters = NULL, includesProbabilities = F, reportDiagnostics = F, ...){
getSample(as.matrix(sampler), parametersOnly = parametersOnly, coda = coda, start = start, end = end, thin = thin, whichParameters = whichParameters, includesProbabilities = includesProbabilities, reportDiagnostics = reportDiagnostics)
}
#' @rdname getSample
#' @author Tankred Ott
#' @export
getSample.list <- function(sampler, parametersOnly = T, coda = F, start = 1, end = NULL, thin = "auto", numSamples = NULL, whichParameters = NULL, includesProbabilities = F, reportDiagnostics = F, ...){
if(!is.null(numSamples)) numSamples = ceiling(numSamples/length(sampler))
if(coda == F){
# out = NULL
out <- rep(list(NA), length(sampler))
for (i in 1:length(sampler)){
# out = rbind(out, getSample(sampler[[i]], parametersOnly = parametersOnly, coda = coda, start = start, end = end, thin = thin, numSamples = numSamples, whichParameters = whichParameters, reportDiagnostics= F))
out[[i]] <- getSample(sampler[[i]], parametersOnly = parametersOnly, coda = coda, start = start, end = end, thin = thin, numSamples = numSamples, whichParameters = whichParameters, reportDiagnostics= F)
}
out <- combineChains(out)
}
if(coda == T){
out = list()
for (i in 1:length(sampler)){
out[[i]] = getSample(sampler[[i]], parametersOnly = parametersOnly, coda = coda, start = start, end = end, thin = thin, numSamples = numSamples, whichParameters = whichParameters, reportDiagnostics= F)
}
if(class(out[[1]]) == "mcmc.list") out = unlist(out, recursive = F)
class(out) = "mcmc.list"
out = out
}
return(out)
}
# The following two S3 implementations make getSample compatible with coda::mcmc and coda::mcmc.list
#' @rdname getSample
#' @author Tankred Ott
#' @export
getSample.mcmc <- function(sampler, parametersOnly = T, coda = F, start = 1, end = NULL, thin = "auto", numSamples = NULL, whichParameters = NULL, includesProbabilities = F, reportDiagnostics = F, ...){
if(coda == T){
# mcmc objects can contain matrices or vectors
if (is.matrix(sampler)) {
nTotalSamples <- nrow(sampler)
} else {
nTotalSamples <- length(sampler)
}
if (is.null(end)) end = nTotalSamples
# check/correct thin
thin <- correctThin(nTotalSamples, thin)
# see http://svitsrv25.epfl.ch/R-doc/library/coda/html/window.mcmc.html
# for coda's window implementation
return(window(sampler, start = start, end = end, thin = thin))
} else if(coda == F){
# mcmc objects can contain matrices or vectors
if (is.matrix(sampler)) {
out <- getSample(as.matrix(sampler), parametersOnly = parametersOnly, coda = coda, start = start, end = end, thin = thin, numSamples = numSamples, whichParameters = whichParameters, includesProbabilities = includesProbabilities, reportDiagnostics = reportDiagnostics)
} else {
out <- getSample(as.vector(sampler), parametersOnly = parametersOnly, coda = coda, start = start, end = end, thin = thin, numSamples = numSamples, whichParameters = whichParameters, includesProbabilities = includesProbabilities, reportDiagnostics = reportDiagnostics)
}
return(out)
}
}
#' @author Tankred Ott
#' @rdname getSample
#' @export
getSample.mcmc.list <- function(sampler, parametersOnly = T, coda = F, start = 1, end = NULL, thin = "auto", numSamples = NULL, whichParameters = NULL, includesProbabilities = F, reportDiagnostics = F, ...){
# TODO: implement handling of wrong inputs?
if(coda == T){
if (is.matrix(sampler[[1]])) {
nTotalSamples <- nrow(sampler[[1]])
} else {
nTotalSamples <- length(sampler[[1]])
}
if (is.null(end)) end = nTotalSamples
# check/correct thin
thin <- correctThin(nTotalSamples, thin)
# see http://svitsrv25.epfl.ch/R-doc/library/coda/html/window.mcmc.html
# for coda's window implementation
return(window(sampler, start = start, end = end, thin = thin))
} else if(coda == F){
if(is.matrix(sampler[[1]])) {
return(getSample(combineChains(sampler), parametersOnly = parametersOnly, coda = coda, start = start, end = end, thin = thin, numSamples = numSamples, whichParameters = whichParameters, includesProbabilities = includesProbabilities, reportDiagnostics = reportDiagnostics))
} else {
return(as.vector(getSample(combineChains(sampler), parametersOnly = parametersOnly, coda = coda, start = start, end = end, thin = thin, numSamples = numSamples, whichParameters = whichParameters, includesProbabilities = includesProbabilities, reportDiagnostics = reportDiagnostics)))
}
}
}
# getSample implementation for nimble objects of class MCMC
#' @rdname getSample
#' @author Tankred Ott
#' @export
getSample.MCMC <- function(sampler, parametersOnly = T, coda = F, start = 1, end = NULL, thin = "auto", numSamples = NULL, whichParameters = NULL, includesProbabilities = F, reportDiagnostics = F, ...){
return(getSample(as.matrix(sampler$mvSamples), parametersOnly = parametersOnly, coda = coda, start = start, end = end, thin = thin, numSamples = numSamples, whichParameters = whichParameters, includesProbabilities = includesProbabilities, reportDiagnostics = reportDiagnostics))
}
#' @rdname getSample
#' @author Tankred Ott
#' @export
getSample.MCMC_refClass <- function(sampler, parametersOnly = T, coda = F, start = 1, end = NULL, thin = "auto", numSamples = NULL, whichParameters = NULL, includesProbabilities = F, reportDiagnostics = F, ...){
return(getSample(as.matrix(sampler$mvSamples), parametersOnly = parametersOnly, coda = coda, start = start, end = end, thin = thin, numSamples = numSamples, whichParameters = whichParameters, includesProbabilities = includesProbabilities, reportDiagnostics = reportDiagnostics))
}
#' Merge Chains
#'
#' Merge a list of MCMCs or chains
#'
#' The function merges a list of MCMC objects. Requirement is that the list contains classes for which the getSample function works
#'
#' @param l the list with MCMC outputs
#' @param ... arguments to be passed on to getSample
#'
#' @return a matrix
#'
#' @author Florian Hartig
#'
#' @export
#'
mergeChains <- function(l, ...){
x = getSample(l[[1]], ...)
for(i in 2:length(l)){
x = rbind(x, getSample(l[[i]], ...))
}
return(x)
}
|
setwd("C:/Users/Pippo/Documents/Master Geoinformation Beuth/Masterarbeit/Daten/R_Codes/R_Choropleth_Leaflet")
library(rgdal)
berlin <- readOGR("LOR-Planungsraeume.kml", #name of file
#if your browser adds a .txt after downloading the file
#you can add it here, too!
"LOR_Planungsraum", #name of layer
encoding="utf-8" #if our data contains german Umlauts like ä, ö and ü
)
plot(berlin)
Auslaender2007 <- read.csv("LOR_Auslaender_2007.csv", encoding="latin1", sep=",", dec=".")
Auslaender2008 <- read.csv("LOR_Auslaender_2008.csv", encoding="latin1", sep=",", dec=".")
library(leaflet)
palette <- colorBin(c('#fef0d9',
'#fdd49e',
'#fdbb84',
'#fc8d59',
'#e34a33',
'#b30000'),
Auslaender2008$ANTEIL, bins = 6, pretty=TRUE, alpha = TRUE)
popup2007 <- paste0("<strong>Auslaender 2007</strong></span>",
"<br><strong>LOR </strong></span>",
Auslaender2007$LORNAME,
"<br><strong> Relativer Auslaenderanteil </strong></span>",
Auslaender2007$ANTEIL
,"<br><strong>Absoluter Auslaenderanteil</strong></span>",
Auslaender2007$AUSLAENDER
)
popup2008 <- paste0("<strong>Auslaender 2007</strong></span>",
"<br><strong>LOR </strong></span>",
Auslaender2008$LORNAME,
"<br><strong> Relativer Auslaenderanteil </strong></span>",
Auslaender2008$ANTEIL
,"<br><strong>Absoluter Auslaenderanteil</strong></span>",
Auslaender2008$AUSLAENDER
)
mymap <- leaflet() %>%
addProviderTiles("Esri.WorldGrayCanvas",
options = tileOptions(minZoom=10, maxZoom=16)) %>% #"freeze" the mapwindow to max and min zoomlevel
addPolygons(data = berlin,
fillColor = ~palette(Auslaender2007$ANTEIL), ## we want the polygon filled with
## one of the palette-colors
## according to the value in student1$Anteil
fillOpacity = 1, ## how transparent do you want the polygon to be?
color = "darkgrey", ## color of borders between districts
weight = 1.5, ## width of borders
popup = popup2007, ## which popup?
group="<span style='font-size: 11pt'><strong>2007</strong></span>")%>%
## which group?
## the group's name has to be the same as later in "baseGroups", where we define
## the groups for the Layerscontrol. Because for this layer I wanted a specific
## color and size, the group name includes some font arguments.
## for the second layer we mix things up a little bit, so you'll see the difference in the map!
addPolygons(data = berlin,
fillColor = ~palette(Auslaender2008$ANTEIL),
fillOpacity = 1,
color = "darkgrey",
weight = 1.5,
popup = popup2008,
group="<span style='font-size: 11pt'><strong>2008</strong></span>")%>%
addLayersControl(
baseGroups = c("<span style='font-size: 11pt'><strong>2007</strong></span>", "<span style='font-size: 11pt'><strong>2008</strong></span>"
),
options = layersControlOptions(collapsed = FALSE))%>% ## we want our control to be seen right away
addLegend(position = 'topleft', pal = palette, values = Auslaender2008$ANTEIL, opacity = 1, title = "Relativer<br>Auslaenderanteil")
print(mymap)
|
/Choropleth_Leaflet.R
|
no_license
|
Pippo87/R-leaflet-choropleth
|
R
| false
| false
| 3,665
|
r
|
setwd("C:/Users/Pippo/Documents/Master Geoinformation Beuth/Masterarbeit/Daten/R_Codes/R_Choropleth_Leaflet")
library(rgdal)
berlin <- readOGR("LOR-Planungsraeume.kml", #name of file
#if your browser adds a .txt after downloading the file
#you can add it here, too!
"LOR_Planungsraum", #name of layer
encoding="utf-8" #if our data contains german Umlauts like ä, ö and ü
)
plot(berlin)
Auslaender2007 <- read.csv("LOR_Auslaender_2007.csv", encoding="latin1", sep=",", dec=".")
Auslaender2008 <- read.csv("LOR_Auslaender_2008.csv", encoding="latin1", sep=",", dec=".")
library(leaflet)
palette <- colorBin(c('#fef0d9',
'#fdd49e',
'#fdbb84',
'#fc8d59',
'#e34a33',
'#b30000'),
Auslaender2008$ANTEIL, bins = 6, pretty=TRUE, alpha = TRUE)
popup2007 <- paste0("<strong>Auslaender 2007</strong></span>",
"<br><strong>LOR </strong></span>",
Auslaender2007$LORNAME,
"<br><strong> Relativer Auslaenderanteil </strong></span>",
Auslaender2007$ANTEIL
,"<br><strong>Absoluter Auslaenderanteil</strong></span>",
Auslaender2007$AUSLAENDER
)
popup2008 <- paste0("<strong>Auslaender 2007</strong></span>",
"<br><strong>LOR </strong></span>",
Auslaender2008$LORNAME,
"<br><strong> Relativer Auslaenderanteil </strong></span>",
Auslaender2008$ANTEIL
,"<br><strong>Absoluter Auslaenderanteil</strong></span>",
Auslaender2008$AUSLAENDER
)
mymap <- leaflet() %>%
addProviderTiles("Esri.WorldGrayCanvas",
options = tileOptions(minZoom=10, maxZoom=16)) %>% #"freeze" the mapwindow to max and min zoomlevel
addPolygons(data = berlin,
fillColor = ~palette(Auslaender2007$ANTEIL), ## we want the polygon filled with
## one of the palette-colors
## according to the value in student1$Anteil
fillOpacity = 1, ## how transparent do you want the polygon to be?
color = "darkgrey", ## color of borders between districts
weight = 1.5, ## width of borders
popup = popup2007, ## which popup?
group="<span style='font-size: 11pt'><strong>2007</strong></span>")%>%
## which group?
## the group's name has to be the same as later in "baseGroups", where we define
## the groups for the Layerscontrol. Because for this layer I wanted a specific
## color and size, the group name includes some font arguments.
## for the second layer we mix things up a little bit, so you'll see the difference in the map!
addPolygons(data = berlin,
fillColor = ~palette(Auslaender2008$ANTEIL),
fillOpacity = 1,
color = "darkgrey",
weight = 1.5,
popup = popup2008,
group="<span style='font-size: 11pt'><strong>2008</strong></span>")%>%
addLayersControl(
baseGroups = c("<span style='font-size: 11pt'><strong>2007</strong></span>", "<span style='font-size: 11pt'><strong>2008</strong></span>"
),
options = layersControlOptions(collapsed = FALSE))%>% ## we want our control to be seen right away
addLegend(position = 'topleft', pal = palette, values = Auslaender2008$ANTEIL, opacity = 1, title = "Relativer<br>Auslaenderanteil")
print(mymap)
|
Eseal_1981_2014 <- read.csv("C:/bbecker/Projects/eSeal/2018Analyses/Data/Eseal_1981_2014.csv")
attach(eSealRain)
head(eSealRain)
library(lme4)
library(lattice)
library(effects)
library(ggplot2)
library(dplyr)
library(tidyr)
|
/Code/eSealCode.R
|
no_license
|
bbecker1000/eSeal_git
|
R
| false
| false
| 226
|
r
|
Eseal_1981_2014 <- read.csv("C:/bbecker/Projects/eSeal/2018Analyses/Data/Eseal_1981_2014.csv")
attach(eSealRain)
head(eSealRain)
library(lme4)
library(lattice)
library(effects)
library(ggplot2)
library(dplyr)
library(tidyr)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/massdefect.R
\name{getmassdiff}
\alias{getmassdiff}
\title{Isotope extraction for single group of samples with certain mass diff}
\usage{
getmassdiff(list, massdiff, rtwindow, mzwindow, ppm)
}
\arguments{
\item{list}{a list with mzrt profile, mz and rt}
\item{massdiff}{mass defect}
\item{rtwindow}{retention time range}
\item{mzwindow}{mass charge ratio window}
\item{ppm}{resolution of the mass spectrum}
}
\value{
dataframe with mass, retention time, scaled mass and scaled mass defect
}
\description{
Isotope extraction for single group of samples with certain mass diff
}
\seealso{
\code{\link{getmassdefect}},\code{\link{plotkms}}
}
|
/man/getmassdiff.Rd
|
no_license
|
zachcp/enviGCMS
|
R
| false
| true
| 721
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/massdefect.R
\name{getmassdiff}
\alias{getmassdiff}
\title{Isotope extraction for single group of samples with certain mass diff}
\usage{
getmassdiff(list, massdiff, rtwindow, mzwindow, ppm)
}
\arguments{
\item{list}{a list with mzrt profile, mz and rt}
\item{massdiff}{mass defect}
\item{rtwindow}{retention time range}
\item{mzwindow}{mass charge ratio window}
\item{ppm}{resolution of the mass spectrum}
}
\value{
dataframe with mass, retention time, scaled mass and scaled mass defect
}
\description{
Isotope extraction for single group of samples with certain mass diff
}
\seealso{
\code{\link{getmassdefect}},\code{\link{plotkms}}
}
|
# Here, we test accessing the Altmetrics API, and investigate the data it returns.
##---- Set up ----
# Add code to check for existing altmetric data. We will eventually need to build a separate dashboard
# for the publisher where they can easily add and retrieve data.
library(tidyverse) # for manipulating data easily
library(httr) # for accessing APIs within R
library(jsonlite) # for translating the JSON results into something readable
# Here's a quick function to access passwords without needing to put them in the script
my_secrets <- function(secret){
path <- paste0("secrets/", secret) # you should have a files with your email and password for the altmetrics API
if (!file.exists(path)) {
stop("Can't find secret file: '", path, "'")
}
read_file(path)
}
##---- Getting a token ----
url <- "https://altmetrics.ubiquity.press"
path <- "api/get_token"
raw.token <- GET(url = url,
path = path,
authenticate(user = my_secrets("email.csv"), password = my_secrets("password.csv"), type = "basic"))
JWT <- content(x = raw.token, as = "text")
##---- Calling the API multiple times ----
# Here's some books for which we have readership metrics but want alt-metrics
metrics_data <- read_csv("data/metrics.csv") %>%
as_tibble()
dois_of_interest <- metrics_data %>%
pull(work_uri) %>%
unique()
retrieve_altmetrics <- function(uris, key){
calls_list <- vector("list", length(uris))
for(i in 1:length(uris)){
uri <- uris[i]
raw.result <- GET(url = "https://metrics.ubiquity.press",
path = "metrics",
add_headers(Authorization = key),
query = list(uri = uri))
if(raw.result$status_code != 200){
message(paste0(raw.result$status_code," "), appendLF = FALSE)
warning(paste0(uri, " did not return valid results. Status code: ", raw.result$status_code),
call. = FALSE)
next()
}
this.content <- raw.result$content %>%
rawToChar() %>%
fromJSON()
metrics <- this.content[[3]] %>%
as_tibble()
calls_list[[i]] <- metrics # don't forget the double [[]] when indexing a list, not a vector
message("ok ", appendLF = FALSE) # this prints dots on the same line
Sys.sleep(time = 0) # this would be polite to the metrics server, but we want the data asap
}
combined_data <- bind_rows(calls_list)
return(combined_data)
}
my_altmetrics <- retrieve_altmetrics(dois_of_interest, JWT)
# glimpse(my_altmetrics) # a check
if (file.exists("data/altmetrics.csv")){
warning("altmetrics.csv already exists. Overwriting it.")
}
write_csv(my_altmetrics, "data/altmetrics.csv")
# At this stage, the API is returning data in a non-standard format, and appears to be sparsely populated.
# Until the API is performing properly, we should take care with using it.
|
/get_altmetrics.R
|
no_license
|
jamesha95/HIRMEOS-dashboard
|
R
| false
| false
| 2,884
|
r
|
# Here, we test accessing the Altmetrics API, and investigate the data it returns.
##---- Set up ----
# Add code to check for existing altmetric data. We will eventually need to build a separate dashboard
# for the publisher where they can easily add and retrieve data.
library(tidyverse) # for manipulating data easily
library(httr) # for accessing APIs within R
library(jsonlite) # for translating the JSON results into something readable
# Here's a quick function to access passwords without needing to put them in the script
my_secrets <- function(secret){
path <- paste0("secrets/", secret) # you should have a files with your email and password for the altmetrics API
if (!file.exists(path)) {
stop("Can't find secret file: '", path, "'")
}
read_file(path)
}
##---- Getting a token ----
url <- "https://altmetrics.ubiquity.press"
path <- "api/get_token"
raw.token <- GET(url = url,
path = path,
authenticate(user = my_secrets("email.csv"), password = my_secrets("password.csv"), type = "basic"))
JWT <- content(x = raw.token, as = "text")
##---- Calling the API multiple times ----
# Here's some books for which we have readership metrics but want alt-metrics
metrics_data <- read_csv("data/metrics.csv") %>%
as_tibble()
dois_of_interest <- metrics_data %>%
pull(work_uri) %>%
unique()
retrieve_altmetrics <- function(uris, key){
calls_list <- vector("list", length(uris))
for(i in 1:length(uris)){
uri <- uris[i]
raw.result <- GET(url = "https://metrics.ubiquity.press",
path = "metrics",
add_headers(Authorization = key),
query = list(uri = uri))
if(raw.result$status_code != 200){
message(paste0(raw.result$status_code," "), appendLF = FALSE)
warning(paste0(uri, " did not return valid results. Status code: ", raw.result$status_code),
call. = FALSE)
next()
}
this.content <- raw.result$content %>%
rawToChar() %>%
fromJSON()
metrics <- this.content[[3]] %>%
as_tibble()
calls_list[[i]] <- metrics # don't forget the double [[]] when indexing a list, not a vector
message("ok ", appendLF = FALSE) # this prints dots on the same line
Sys.sleep(time = 0) # this would be polite to the metrics server, but we want the data asap
}
combined_data <- bind_rows(calls_list)
return(combined_data)
}
my_altmetrics <- retrieve_altmetrics(dois_of_interest, JWT)
# glimpse(my_altmetrics) # a check
if (file.exists("data/altmetrics.csv")){
warning("altmetrics.csv already exists. Overwriting it.")
}
write_csv(my_altmetrics, "data/altmetrics.csv")
# At this stage, the API is returning data in a non-standard format, and appears to be sparsely populated.
# Until the API is performing properly, we should take care with using it.
|
# See ch7_R_simple_linear_regression.pdf
# ch7_R_simple_linear_regression.R
# Code from Chapter 7 of Applied Statistics
# Text and code used from:
# Applied Statistics with R
# 2021-07-23
# The license for Applied Statistics with R is given in the line below.
# This work is licensed under a Creative Commons Attribution- NonCommercial-ShareAlike 4.0 International License.
# The most current version of Applied Statistics with R should be available at:
# https://github.com/daviddalpiaz/appliedstats
# Chapter 7 Simple Linear Regression
#
# “All models are wrong, but some are useful.”
#
# — George E. P. Box
#
# After reading this chapter you will be able to:
#
# Understand the concept of a model.
# Describe two ways in which regression coefficients are derived.
# Estimate and visualize a regression model using R.
# Interpret regression coefficients and statistics in the context of real-world problems.
# Use a regression model to make predictions.
#
# 7.1 Modeling
#
# Let’s consider a simple example of how the speed of a car affects its stopping distance, that is, how far it travels before it comes to a stop. To examine this relationship, we will use the cars dataset which, is a default R dataset. Thus, we don’t need to load a package first; it is immediately available.
#
# To get a first look at the data you can use the View() function inside RStudio.
View(cars)
# We could also take a look at the variable names, the dimension of the data frame, and some sample observations with str().
str(cars)
## 'data.frame': 50 obs. of 2 variables:
## $ speed: num 4 4 7 7 8 9 10 10 10 11 ...
## $ dist : num 2 10 4 22 16 10 18 26 34 17 ...
# As we have seen before with data frames, there are a number of additional functions to access some of this information directly.
dim(cars)
## [1] 50 2
nrow(cars)
## [1] 50
ncol(cars)
## [1] 2
#Other than the two variable names and the number of observations, this data is still just a bunch of numbers, so we should probably obtain some context.
?cars
#Reading the documentation we learn that this is data gathered during the 1920s about the speed of cars and the resulting distance it takes for the car to come to a stop. The interesting task here is to determine how far a car travels before stopping, when traveling at a certain speed. So, we will first plot the stopping distance against the speed.
plot(dist ~ speed, data = cars,
xlab = "Speed (in Miles Per Hour)",
ylab = "Stopping Distance (in Feet)",
main = "Stopping Distance vs Speed",
pch = 20,
cex = 2,
col = "grey")
# 7.1.1 Simple Linear Regression Model
# 7.2 Least Squares Approach
x = cars$speed
y = cars$dist
# calculate three sums of squares
Sxy = sum((x - mean(x)) * (y - mean(y)))
Sxx = sum((x - mean(x)) ^ 2)
Syy = sum((y - mean(y)) ^ 2)
c(Sxy, Sxx, Syy)
## [1] 5387.40 1370.00 32538.98
beta_1_hat = Sxy / Sxx
beta_0_hat = mean(y) - beta_1_hat * mean(x)
c(beta_0_hat, beta_1_hat)
## [1] -17.579095 3.932409
# We can now use this line to make predictions. First, let’s see the possible
# x
# values in the cars dataset. Since some
# x
# values may appear more than once, we use the unique() to return each unique value only once.
unique(cars$speed)
## [1] 4 7 8 9 10 11 12 13 14 15 16 17 18 19 20 22 23 24 25
# Let’s make a prediction for the stopping distance of a car traveling at 8 miles per hour.
beta_0_hat + beta_1_hat * 8
## [1] 13.88018
# This tells us that the estimated mean stopping distance of a car traveling at 8 miles per hour is
# 13.88
# .
#
# Now let’s make a prediction for the stopping distance of a car traveling at 21 miles per hour. This is considered interpolation as 21 is not an observed value of
# x
# . (But is in the data range.) We can use the special %in% operator to quickly verify this in R.
8 %in% unique(cars$speed)
## [1] TRUE
21 %in% unique(cars$speed)
## [1] FALSE
min(cars$speed) < 21 & 21 < max(cars$speed)
## [1] TRUE
beta_0_hat + beta_1_hat * 21
## [1] 65.00149
# Lastly, we can make a prediction for the stopping distance of a car traveling at 50 miles per hour. This is considered extrapolation as 50 is not an observed value of
# x
# and is outside data range. We should be less confident in predictions of this type.
range(cars$speed)
## [1] 4 25
range(cars$speed)[1] < 50 & 50 < range(cars$speed)[2]
## [1] FALSE
beta_0_hat + beta_1_hat * 50
## [1] 179.0413
# Cars travel 50 miles per hour rather easily today, but not in the 1920s!
# This is also an issue we saw when interpreting
# ^
# β
# 0
# =
# −
# 17.58
# , which is equivalent to making a prediction at
# x
# =
# 0
# We should not be confident in the estimated linear relationship outside of the range of data we have observed.
# 7.2.2 Residuals
# If we think of our model as “Response = Prediction + Error,” we can then write it as
#
# y
# =
# ^
# y
# +
# e
#
#
#
# We then define a residual to be the observed value minus the predicted value.
#
# e
# i
# =
# y
# i
# −
# ^
# y
# i
# Let’s calculate the residual for the prediction we made for a car traveling 8 miles per hour. First, we need to obtain the observed value of
which(cars$speed == 8)
## [1] 5
cars[5, ]
## speed dist
## 5 8 16
cars[which(cars$speed == 8), ]
## speed dist
## 5 8 16
# We can then calculate the residual.
16 - (beta_0_hat + beta_1_hat * 8)
## [1] 2.119825
#The positive residual value indicates that the observed stopping distance is actually 2.12 feet more than what was predicted.
# 7.2.3 Variance Estimation
# We’ll now use the residuals for each of the points to create an estimate for the variance
y_hat = beta_0_hat + beta_1_hat * x
e = y - y_hat
n = length(e)
s2_e = sum(e^2) / (n - 2)
s2_e
## [1] 236.5317
# Just as with the univariate measure of variance, this value of 236.53 doesn’t have a practical interpretation in terms of stopping distance. Taking the square root, however, computes the standard deviation of the residuals, also known as residual standard error.
s_e = sqrt(s2_e)
s_e
## [1] 15.37959
# This tells us that our estimates of mean stopping distance are “typically” off by 15.38 feet.
# 7.3 Decomposition of Variation
# The quantity “Sum of Squares Error,”
# SSE
# , represents the unexplained variation of the observed
# y
# values. You will often see
# SSE
# written as
# RSS
# , or “Residual Sum of Squares.”
SST = sum((y - mean(y)) ^ 2)
SSReg = sum((y_hat - mean(y)) ^ 2)
SSE = sum((y - y_hat) ^ 2)
c(SST = SST, SSReg = SSReg, SSE = SSE)
## SST SSReg SSE
## 32538.98 21185.46 11353.52
SSE / (n - 2)
## [1] 236.5317
s2_e == SSE / (n - 2)
## [1] TRUE
#These three measures also do not have an important practical interpretation individually. But together, they’re about to reveal a new statistic to help measure the strength of a SLR model.
# 7.3.1 Coefficient of Determination R2
#
# The coefficient of determination is interpreted as the proportion of observed variation in
# y that can be explained by the simple linear regression model.
R2 = SSReg / SST
R2
## [1] 0.6510794
# 7.4 The lm Function
#
# So far we have done regression by deriving the least squares estimates, then writing simple R commands to perform the necessary calculations. Since this is such a common task, this is functionality that is built directly into R via the lm() command.
#
# The lm() command is used to fit linear models which actually account for a broader class of models than simple linear regression, but we will use SLR as our first demonstration of lm(). The lm() function will be one of our most commonly used tools, so you may want to take a look at the documentation by using ?lm. You’ll notice there is a lot of information there, but we will start with just the very basics. This is documentation you will want to return to often.
#
# We’ll continue using the cars data, and essentially use the lm() function to check the work we had previously done.
stop_dist_model = lm(dist ~ speed, data = cars)
# This line of code fits our very first linear model. The syntax should look somewhat familiar. We use the dist ~ speed syntax to tell R we would like to model the response variable dist as a linear function of the predictor variable speed. In general, you should think of the syntax as response ~ predictor. The data = cars argument then tells R that that dist and speed variables are from the dataset cars. We then store this result in a variable stop_dist_model.
#
# The variable stop_dist_model now contains a wealth of information, and we will now see how to extract and use that information. The first thing we will do is simply output whatever is stored immediately in the variable stop_dist_model.
stop_dist_model
##
## Call:
## lm(formula = dist ~ speed, data = cars)
##
## Coefficients:
## (Intercept) speed
## -17.579 3.932
# We see that it first tells us the formula we input into R, that is lm(formula = dist ~ speed, data = cars). We also see the coefficients of the model. We can check that these are what we had calculated previously. (Minus some rounding that R is doing when displaying the results. They are stored with full precision.)
c(beta_0_hat, beta_1_hat)
## [1] -17.579095 3.932409
# Next, it would be nice to add the fitted line to the scatterplot. To do so we will use the abline() function.
plot(dist ~ speed, data = cars,
xlab = "Speed (in Miles Per Hour)",
ylab = "Stopping Distance (in Feet)",
main = "Stopping Distance vs Speed",
pch = 20,
cex = 2,
col = "grey")
abline(stop_dist_model, lwd = 3, col = "darkorange")
# The abline() function is used to add lines of the form
# a
# +
# b
# x
# to a plot. (Hence abline.) When we give it stop_dist_model as an argument, it automatically extracts the regression coefficient estimates (
#
# ^
# β
# 0
# and
# ^
# β
# 1
# ) and uses them as the slope and intercept of the line. Here we also use lwd to modify the width of the line, as well as col to modify the color of the line.
#
# The “thing” that is returned by the lm() function is actually an object of class lm which is a list. The exact details of this are unimportant unless you are seriously interested in the inner-workings of R, but know that we can determine the names of the elements of the list using the names() command.
names(stop_dist_model)
## [1] "coefficients" "residuals" "effects" "rank"
## [5] "fitted.values" "assign" "qr" "df.residual"
## [9] "xlevels" "call" "terms" "model"
# We can then use this information to, for example, access the residuals using the $ operator.
stop_dist_model$residuals
## 1 2 3 4 5 6 7
## 3.849460 11.849460 -5.947766 12.052234 2.119825 -7.812584 -3.744993
## 8 9 10 11 12 13 14
## 4.255007 12.255007 -8.677401 2.322599 -15.609810 -9.609810 -5.609810
## 15 16 17 18 19 20 21
## -1.609810 -7.542219 0.457781 0.457781 12.457781 -11.474628 -1.474628
## 22 23 24 25 26 27 28
## 22.525372 42.525372 -21.407036 -15.407036 12.592964 -13.339445 -5.339445
## 29 30 31 32 33 34 35
## -17.271854 -9.271854 0.728146 -11.204263 2.795737 22.795737 30.795737
## 36 37 38 39 40 41 42
## -21.136672 -11.136672 10.863328 -29.069080 -13.069080 -9.069080 -5.069080
## 43 44 45 46 47 48 49
## 2.930920 -2.933898 -18.866307 -6.798715 15.201285 16.201285 43.201285
## 50
## 4.268876
#Another way to access stored information in stop_dist_model are the coef(), resid(), and fitted() functions. These return the coefficients, residuals, and fitted values, respectively.
coef(stop_dist_model)
## (Intercept) speed
## -17.579095 3.932409
resid(stop_dist_model)
## 1 2 3 4 5 6 7
## 3.849460 11.849460 -5.947766 12.052234 2.119825 -7.812584 -3.744993
## 8 9 10 11 12 13 14
## 4.255007 12.255007 -8.677401 2.322599 -15.609810 -9.609810 -5.609810
## 15 16 17 18 19 20 21
## -1.609810 -7.542219 0.457781 0.457781 12.457781 -11.474628 -1.474628
## 22 23 24 25 26 27 28
## 22.525372 42.525372 -21.407036 -15.407036 12.592964 -13.339445 -5.339445
## 29 30 31 32 33 34 35
## -17.271854 -9.271854 0.728146 -11.204263 2.795737 22.795737 30.795737
## 36 37 38 39 40 41 42
## -21.136672 -11.136672 10.863328 -29.069080 -13.069080 -9.069080 -5.069080
## 43 44 45 46 47 48 49
## 2.930920 -2.933898 -18.866307 -6.798715 15.201285 16.201285 43.201285
## 50
## 4.268876
fitted(stop_dist_model)
## 1 2 3 4 5 6 7 8
## -1.849460 -1.849460 9.947766 9.947766 13.880175 17.812584 21.744993 21.744993
## 9 10 11 12 13 14 15 16
## 21.744993 25.677401 25.677401 29.609810 29.609810 29.609810 29.609810 33.542219
## 17 18 19 20 21 22 23 24
## 33.542219 33.542219 33.542219 37.474628 37.474628 37.474628 37.474628 41.407036
## 25 26 27 28 29 30 31 32
## 41.407036 41.407036 45.339445 45.339445 49.271854 49.271854 49.271854 53.204263
## 33 34 35 36 37 38 39 40
## 53.204263 53.204263 53.204263 57.136672 57.136672 57.136672 61.069080 61.069080
## 41 42 43 44 45 46 47 48
## 61.069080 61.069080 61.069080 68.933898 72.866307 76.798715 76.798715 76.798715
## 49 50
## 76.798715 80.731124
# An R function that is useful in many situations is summary(). We see that when it is called on our model, it returns a good deal of information. By the end of the course, you will know what every value here is used for. For now, you should immediately notice the coefficient estimates, and you may recognize the
# R
# 2
# value we saw earlier.
summary(stop_dist_model)
##
## Call:
## lm(formula = dist ~ speed, data = cars)
##
## Residuals:
## Min 1Q Median 3Q Max
## -29.069 -9.525 -2.272 9.215 43.201
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) -17.5791 6.7584 -2.601 0.0123 *
## speed 3.9324 0.4155 9.464 1.49e-12 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 15.38 on 48 degrees of freedom
## Multiple R-squared: 0.6511, Adjusted R-squared: 0.6438
## F-statistic: 89.57 on 1 and 48 DF, p-value: 1.49e-12
# The summary() command also returns a list, and we can again use names() to learn what about the elements of this list.
names(summary(stop_dist_model))
## [1] "call" "terms" "residuals" "coefficients"
## [5] "aliased" "sigma" "df" "r.squared"
## [9] "adj.r.squared" "fstatistic" "cov.unscaled"
# So, for example, if we wanted to directly access the value of
# R
# 2
# , instead of copy and pasting it out of the printed statement from summary(), we could do so.
summary(stop_dist_model)$r.squared
## [1] 0.6510794
# Another value we may want to access
summary(stop_dist_model)$sigma
## [1] 15.37959
# Note that this is the same result seen earlier as s_e. You may also notice that this value was displayed above as a result of the summary() command, which R labeled the “Residual Standard Error.”
# Often it is useful to talk about
# s
# e
# (or RSE) instead of
# s
# 2
# e
# because of their units. The units of
# s
# e
# in the cars example is feet, while the units of
# s
# 2
# e
# is feet-squared.
#
# Another useful function, which we will use almost as often as lm() is the predict() function.
predict(stop_dist_model, newdata = data.frame(speed = 8))
## 1
## 13.88018
# The above code reads “predict the stopping distance of a car traveling 8 miles per hour using the stop_dist_model.” Importantly, the second argument to predict() is a data frame that we make in place. We do this so that we can specify that 8 is a value of speed, so that predict knows how to use it with the model stored in stop_dist_model. We see that this result is what we had calculated “by hand” previously.
#
# We could also predict multiple values at once.
predict(stop_dist_model, newdata = data.frame(speed = c(8, 21, 50)))
## 1 2 3
## 13.88018 65.00149 179.04134
# Or we could calculate the fitted value for each of the original data points. We can simply supply the original data frame, cars, since it contains a variables called speed which has the values we would like to predict at.
predict(stop_dist_model, newdata = cars)
## 1 2 3 4 5 6 7 8
## -1.849460 -1.849460 9.947766 9.947766 13.880175 17.812584 21.744993 21.744993
## 9 10 11 12 13 14 15 16
## 21.744993 25.677401 25.677401 29.609810 29.609810 29.609810 29.609810 33.542219
## 17 18 19 20 21 22 23 24
## 33.542219 33.542219 33.542219 37.474628 37.474628 37.474628 37.474628 41.407036
## 25 26 27 28 29 30 31 32
## 41.407036 41.407036 45.339445 45.339445 49.271854 49.271854 49.271854 53.204263
## 33 34 35 36 37 38 39 40
## 53.204263 53.204263 53.204263 57.136672 57.136672 57.136672 61.069080 61.069080
## 41 42 43 44 45 46 47 48
## 61.069080 61.069080 61.069080 68.933898 72.866307 76.798715 76.798715 76.798715
## 49 50
## 76.798715 80.731124
# predict(stop_dist_model, newdata = data.frame(speed = cars$speed))
# This is actually equivalent to simply calling predict() on stop_dist_model without a second argument.
predict(stop_dist_model)
## 1 2 3 4 5 6 7 8
## -1.849460 -1.849460 9.947766 9.947766 13.880175 17.812584 21.744993 21.744993
## 9 10 11 12 13 14 15 16
## 21.744993 25.677401 25.677401 29.609810 29.609810 29.609810 29.609810 33.542219
## 17 18 19 20 21 22 23 24
## 33.542219 33.542219 33.542219 37.474628 37.474628 37.474628 37.474628 41.407036
## 25 26 27 28 29 30 31 32
## 41.407036 41.407036 45.339445 45.339445 49.271854 49.271854 49.271854 53.204263
## 33 34 35 36 37 38 39 40
## 53.204263 53.204263 53.204263 57.136672 57.136672 57.136672 61.069080 61.069080
## 41 42 43 44 45 46 47 48
## 61.069080 61.069080 61.069080 68.933898 72.866307 76.798715 76.798715 76.798715
## 49 50
## 76.798715 80.731124
# Note that then in this case, this is the same as using fitted().
fitted(stop_dist_model)
## 1 2 3 4 5 6 7 8
## -1.849460 -1.849460 9.947766 9.947766 13.880175 17.812584 21.744993 21.744993
## 9 10 11 12 13 14 15 16
## 21.744993 25.677401 25.677401 29.609810 29.609810 29.609810 29.609810 33.542219
## 17 18 19 20 21 22 23 24
## 33.542219 33.542219 33.542219 37.474628 37.474628 37.474628 37.474628 41.407036
## 25 26 27 28 29 30 31 32
## 41.407036 41.407036 45.339445 45.339445 49.271854 49.271854 49.271854 53.204263
## 33 34 35 36 37 38 39 40
## 53.204263 53.204263 53.204263 57.136672 57.136672 57.136672 61.069080 61.069080
## 41 42 43 44 45 46 47 48
## 61.069080 61.069080 61.069080 68.933898 72.866307 76.798715 76.798715 76.798715
## 49 50
## 76.798715 80.731124
#7.5 Maximum Likelihood Estimation (MLE) Approach
# Our goal is to find values of
# β
# 0
# ,
# β
# 1
# , and
# σ
# 2
# which maximize this function, which is a straightforward multivariate calculus problem.
# Note that we use
# log
# to mean the natural logarithm.
# 7.6 Simulating SLR
#
# We return again to more examples of simulation. This will be a common theme!
#
# In practice you will almost never have a true model, and you will use data to attempt to recover information about the unknown true model.
# With simulation, we decide the true model and simulate data from it. Then, we apply a method to the data, in this case least squares.
# Now, since we know the true model, we can assess how well it did.
# For this example, we will simulate
# n = 21 observations
# We first set the true parameters of the model to be simulated.
num_obs = 21
beta_0 = 5
beta_1 = -2
sigma = 3
# Next, we obtain simulated values of
# ϵi
# after setting a seed for reproducibility.
set.seed(1)
epsilon = rnorm(n = num_obs, mean = 0, sd = sigma)
x_vals = seq(from = 0, to = 10, length.out = num_obs)
# Another common practice is to generate them from a uniform distribution, and then use them for the remainder of the analysis.
# set.seed(1)
# x_vals = runif(num_obs, 0, 10)
y_vals = beta_0 + beta_1 * x_vals + epsilon
# The data,
# (
# x
# i
# ,
# y
# i
# )
# represent a possible sample from the true distribution. Now to check how well the method of least squares works, we use lm() to fit the model to our simulated data, then take a look at the estimated coefficients.
sim_fit = lm(y_vals ~ x_vals)
coef(sim_fit)
## (Intercept) x_vals
## 4.832639 -1.831401
# And look at that, they aren’t too far from the true parameters we specified!
plot(y_vals ~ x_vals)
abline(sim_fit)
# We should say here, that we’re being sort of lazy, and not the good kinda of lazy that could be considered efficient. Any time you simulate data, you should consider doing two things: writing a function, and storing the data in a data frame.
#
# The function below, sim_slr(), can be used for the same task as above, but is much more flexible. Notice that we provide x to the function, instead of generating x inside the function. In the SLR model, the
# x
# i
# are considered known values. That is, they are not random, so we do not assume a distribution for the
# x
# i
# . Because of this, we will repeatedly use the same x values across all simulations.
sim_slr = function(x, beta_0 = 10, beta_1 = 5, sigma = 1) {
n = length(x)
epsilon = rnorm(n, mean = 0, sd = sigma)
y = beta_0 + beta_1 * x + epsilon
data.frame(predictor = x, response = y)
}
# Here, we use the function to repeat the analysis above.
set.seed(1)
sim_data = sim_slr(x = x_vals, beta_0 = 5, beta_1 = -2, sigma = 3)
# This time, the simulated observations are stored in a data frame.
head(sim_data)
## predictor response
## 1 0.0 3.1206386
## 2 0.5 4.5509300
## 3 1.0 0.4931142
## 4 1.5 6.7858424
## 5 2.0 1.9885233
## 6 2.5 -2.4614052
# Now when we fit the model with lm() we can use a data argument, a very good practice.
sim_fit = lm(response ~ predictor, data = sim_data)
coef(sim_fit)
## (Intercept) predictor
## 4.832639 -1.831401
# And this time, we’ll make the plot look a lot nicer.
plot(response ~ predictor, data = sim_data,
xlab = "Simulated Predictor Variable",
ylab = "Simulated Response Variable",
main = "Simulated Regression Data",
pch = 20,
cex = 2,
col = "grey")
abline(sim_fit, lwd = 3, lty = 1, col = "darkorange")
abline(beta_0, beta_1, lwd = 3, lty = 2, col = "dodgerblue")
legend("topright", c("Estimate", "Truth"), lty = c(1, 2), lwd = 2,
col = c("darkorange", "dodgerblue"))
# 7.7 History
# For some brief background on the history of linear regression, see “Galton, Pearson, and the Peas: A Brief History of Linear Regression for Statistics Instructors” from the Journal of Statistics Education as well as the Wikipedia page on the history of regression analysis and lastly the article for regression to the mean which details the origins of the term “regression.”
|
/ch7_R_simple_linear_regression.R
|
no_license
|
nicholaskarlson/AppliedStats22Sep2021
|
R
| false
| false
| 25,353
|
r
|
# See ch7_R_simple_linear_regression.pdf
# ch7_R_simple_linear_regression.R
# Code from Chapter 7 of Applied Statistics
# Text and code used from:
# Applied Statistics with R
# 2021-07-23
# The license for Applied Statistics with R is given in the line below.
# This work is licensed under a Creative Commons Attribution- NonCommercial-ShareAlike 4.0 International License.
# The most current version of Applied Statistics with R should be available at:
# https://github.com/daviddalpiaz/appliedstats
# Chapter 7 Simple Linear Regression
#
# “All models are wrong, but some are useful.”
#
# — George E. P. Box
#
# After reading this chapter you will be able to:
#
# Understand the concept of a model.
# Describe two ways in which regression coefficients are derived.
# Estimate and visualize a regression model using R.
# Interpret regression coefficients and statistics in the context of real-world problems.
# Use a regression model to make predictions.
#
# 7.1 Modeling
#
# Let’s consider a simple example of how the speed of a car affects its stopping distance, that is, how far it travels before it comes to a stop. To examine this relationship, we will use the cars dataset which, is a default R dataset. Thus, we don’t need to load a package first; it is immediately available.
#
# To get a first look at the data you can use the View() function inside RStudio.
View(cars)
# We could also take a look at the variable names, the dimension of the data frame, and some sample observations with str().
str(cars)
## 'data.frame': 50 obs. of 2 variables:
## $ speed: num 4 4 7 7 8 9 10 10 10 11 ...
## $ dist : num 2 10 4 22 16 10 18 26 34 17 ...
# As we have seen before with data frames, there are a number of additional functions to access some of this information directly.
dim(cars)
## [1] 50 2
nrow(cars)
## [1] 50
ncol(cars)
## [1] 2
#Other than the two variable names and the number of observations, this data is still just a bunch of numbers, so we should probably obtain some context.
?cars
#Reading the documentation we learn that this is data gathered during the 1920s about the speed of cars and the resulting distance it takes for the car to come to a stop. The interesting task here is to determine how far a car travels before stopping, when traveling at a certain speed. So, we will first plot the stopping distance against the speed.
plot(dist ~ speed, data = cars,
xlab = "Speed (in Miles Per Hour)",
ylab = "Stopping Distance (in Feet)",
main = "Stopping Distance vs Speed",
pch = 20,
cex = 2,
col = "grey")
# 7.1.1 Simple Linear Regression Model
# 7.2 Least Squares Approach
x = cars$speed
y = cars$dist
# calculate three sums of squares
Sxy = sum((x - mean(x)) * (y - mean(y)))
Sxx = sum((x - mean(x)) ^ 2)
Syy = sum((y - mean(y)) ^ 2)
c(Sxy, Sxx, Syy)
## [1] 5387.40 1370.00 32538.98
beta_1_hat = Sxy / Sxx
beta_0_hat = mean(y) - beta_1_hat * mean(x)
c(beta_0_hat, beta_1_hat)
## [1] -17.579095 3.932409
# We can now use this line to make predictions. First, let’s see the possible
# x
# values in the cars dataset. Since some
# x
# values may appear more than once, we use the unique() to return each unique value only once.
unique(cars$speed)
## [1] 4 7 8 9 10 11 12 13 14 15 16 17 18 19 20 22 23 24 25
# Let’s make a prediction for the stopping distance of a car traveling at 8 miles per hour.
beta_0_hat + beta_1_hat * 8
## [1] 13.88018
# This tells us that the estimated mean stopping distance of a car traveling at 8 miles per hour is
# 13.88
# .
#
# Now let’s make a prediction for the stopping distance of a car traveling at 21 miles per hour. This is considered interpolation as 21 is not an observed value of
# x
# . (But is in the data range.) We can use the special %in% operator to quickly verify this in R.
8 %in% unique(cars$speed)
## [1] TRUE
21 %in% unique(cars$speed)
## [1] FALSE
min(cars$speed) < 21 & 21 < max(cars$speed)
## [1] TRUE
beta_0_hat + beta_1_hat * 21
## [1] 65.00149
# Lastly, we can make a prediction for the stopping distance of a car traveling at 50 miles per hour. This is considered extrapolation as 50 is not an observed value of
# x
# and is outside data range. We should be less confident in predictions of this type.
range(cars$speed)
## [1] 4 25
range(cars$speed)[1] < 50 & 50 < range(cars$speed)[2]
## [1] FALSE
beta_0_hat + beta_1_hat * 50
## [1] 179.0413
# Cars travel 50 miles per hour rather easily today, but not in the 1920s!
# This is also an issue we saw when interpreting
# ^
# β
# 0
# =
# −
# 17.58
# , which is equivalent to making a prediction at
# x
# =
# 0
# We should not be confident in the estimated linear relationship outside of the range of data we have observed.
# 7.2.2 Residuals
# If we think of our model as “Response = Prediction + Error,” we can then write it as
#
# y
# =
# ^
# y
# +
# e
#
#
#
# We then define a residual to be the observed value minus the predicted value.
#
# e
# i
# =
# y
# i
# −
# ^
# y
# i
# Let’s calculate the residual for the prediction we made for a car traveling 8 miles per hour. First, we need to obtain the observed value of
which(cars$speed == 8)
## [1] 5
cars[5, ]
## speed dist
## 5 8 16
cars[which(cars$speed == 8), ]
## speed dist
## 5 8 16
# We can then calculate the residual.
16 - (beta_0_hat + beta_1_hat * 8)
## [1] 2.119825
#The positive residual value indicates that the observed stopping distance is actually 2.12 feet more than what was predicted.
# 7.2.3 Variance Estimation
# We’ll now use the residuals for each of the points to create an estimate for the variance
y_hat = beta_0_hat + beta_1_hat * x
e = y - y_hat
n = length(e)
s2_e = sum(e^2) / (n - 2)
s2_e
## [1] 236.5317
# Just as with the univariate measure of variance, this value of 236.53 doesn’t have a practical interpretation in terms of stopping distance. Taking the square root, however, computes the standard deviation of the residuals, also known as residual standard error.
s_e = sqrt(s2_e)
s_e
## [1] 15.37959
# This tells us that our estimates of mean stopping distance are “typically” off by 15.38 feet.
# 7.3 Decomposition of Variation
# The quantity “Sum of Squares Error,”
# SSE
# , represents the unexplained variation of the observed
# y
# values. You will often see
# SSE
# written as
# RSS
# , or “Residual Sum of Squares.”
SST = sum((y - mean(y)) ^ 2)
SSReg = sum((y_hat - mean(y)) ^ 2)
SSE = sum((y - y_hat) ^ 2)
c(SST = SST, SSReg = SSReg, SSE = SSE)
## SST SSReg SSE
## 32538.98 21185.46 11353.52
SSE / (n - 2)
## [1] 236.5317
s2_e == SSE / (n - 2)
## [1] TRUE
#These three measures also do not have an important practical interpretation individually. But together, they’re about to reveal a new statistic to help measure the strength of a SLR model.
# 7.3.1 Coefficient of Determination R2
#
# The coefficient of determination is interpreted as the proportion of observed variation in
# y that can be explained by the simple linear regression model.
R2 = SSReg / SST
R2
## [1] 0.6510794
# 7.4 The lm Function
#
# So far we have done regression by deriving the least squares estimates, then writing simple R commands to perform the necessary calculations. Since this is such a common task, this is functionality that is built directly into R via the lm() command.
#
# The lm() command is used to fit linear models which actually account for a broader class of models than simple linear regression, but we will use SLR as our first demonstration of lm(). The lm() function will be one of our most commonly used tools, so you may want to take a look at the documentation by using ?lm. You’ll notice there is a lot of information there, but we will start with just the very basics. This is documentation you will want to return to often.
#
# We’ll continue using the cars data, and essentially use the lm() function to check the work we had previously done.
stop_dist_model = lm(dist ~ speed, data = cars)
# This line of code fits our very first linear model. The syntax should look somewhat familiar. We use the dist ~ speed syntax to tell R we would like to model the response variable dist as a linear function of the predictor variable speed. In general, you should think of the syntax as response ~ predictor. The data = cars argument then tells R that that dist and speed variables are from the dataset cars. We then store this result in a variable stop_dist_model.
#
# The variable stop_dist_model now contains a wealth of information, and we will now see how to extract and use that information. The first thing we will do is simply output whatever is stored immediately in the variable stop_dist_model.
stop_dist_model
##
## Call:
## lm(formula = dist ~ speed, data = cars)
##
## Coefficients:
## (Intercept) speed
## -17.579 3.932
# We see that it first tells us the formula we input into R, that is lm(formula = dist ~ speed, data = cars). We also see the coefficients of the model. We can check that these are what we had calculated previously. (Minus some rounding that R is doing when displaying the results. They are stored with full precision.)
c(beta_0_hat, beta_1_hat)
## [1] -17.579095 3.932409
# Next, it would be nice to add the fitted line to the scatterplot. To do so we will use the abline() function.
plot(dist ~ speed, data = cars,
xlab = "Speed (in Miles Per Hour)",
ylab = "Stopping Distance (in Feet)",
main = "Stopping Distance vs Speed",
pch = 20,
cex = 2,
col = "grey")
abline(stop_dist_model, lwd = 3, col = "darkorange")
# The abline() function is used to add lines of the form
# a
# +
# b
# x
# to a plot. (Hence abline.) When we give it stop_dist_model as an argument, it automatically extracts the regression coefficient estimates (
#
# ^
# β
# 0
# and
# ^
# β
# 1
# ) and uses them as the slope and intercept of the line. Here we also use lwd to modify the width of the line, as well as col to modify the color of the line.
#
# The “thing” that is returned by the lm() function is actually an object of class lm which is a list. The exact details of this are unimportant unless you are seriously interested in the inner-workings of R, but know that we can determine the names of the elements of the list using the names() command.
names(stop_dist_model)
## [1] "coefficients" "residuals" "effects" "rank"
## [5] "fitted.values" "assign" "qr" "df.residual"
## [9] "xlevels" "call" "terms" "model"
# We can then use this information to, for example, access the residuals using the $ operator.
stop_dist_model$residuals
## 1 2 3 4 5 6 7
## 3.849460 11.849460 -5.947766 12.052234 2.119825 -7.812584 -3.744993
## 8 9 10 11 12 13 14
## 4.255007 12.255007 -8.677401 2.322599 -15.609810 -9.609810 -5.609810
## 15 16 17 18 19 20 21
## -1.609810 -7.542219 0.457781 0.457781 12.457781 -11.474628 -1.474628
## 22 23 24 25 26 27 28
## 22.525372 42.525372 -21.407036 -15.407036 12.592964 -13.339445 -5.339445
## 29 30 31 32 33 34 35
## -17.271854 -9.271854 0.728146 -11.204263 2.795737 22.795737 30.795737
## 36 37 38 39 40 41 42
## -21.136672 -11.136672 10.863328 -29.069080 -13.069080 -9.069080 -5.069080
## 43 44 45 46 47 48 49
## 2.930920 -2.933898 -18.866307 -6.798715 15.201285 16.201285 43.201285
## 50
## 4.268876
#Another way to access stored information in stop_dist_model are the coef(), resid(), and fitted() functions. These return the coefficients, residuals, and fitted values, respectively.
coef(stop_dist_model)
## (Intercept) speed
## -17.579095 3.932409
resid(stop_dist_model)
## 1 2 3 4 5 6 7
## 3.849460 11.849460 -5.947766 12.052234 2.119825 -7.812584 -3.744993
## 8 9 10 11 12 13 14
## 4.255007 12.255007 -8.677401 2.322599 -15.609810 -9.609810 -5.609810
## 15 16 17 18 19 20 21
## -1.609810 -7.542219 0.457781 0.457781 12.457781 -11.474628 -1.474628
## 22 23 24 25 26 27 28
## 22.525372 42.525372 -21.407036 -15.407036 12.592964 -13.339445 -5.339445
## 29 30 31 32 33 34 35
## -17.271854 -9.271854 0.728146 -11.204263 2.795737 22.795737 30.795737
## 36 37 38 39 40 41 42
## -21.136672 -11.136672 10.863328 -29.069080 -13.069080 -9.069080 -5.069080
## 43 44 45 46 47 48 49
## 2.930920 -2.933898 -18.866307 -6.798715 15.201285 16.201285 43.201285
## 50
## 4.268876
fitted(stop_dist_model)
## 1 2 3 4 5 6 7 8
## -1.849460 -1.849460 9.947766 9.947766 13.880175 17.812584 21.744993 21.744993
## 9 10 11 12 13 14 15 16
## 21.744993 25.677401 25.677401 29.609810 29.609810 29.609810 29.609810 33.542219
## 17 18 19 20 21 22 23 24
## 33.542219 33.542219 33.542219 37.474628 37.474628 37.474628 37.474628 41.407036
## 25 26 27 28 29 30 31 32
## 41.407036 41.407036 45.339445 45.339445 49.271854 49.271854 49.271854 53.204263
## 33 34 35 36 37 38 39 40
## 53.204263 53.204263 53.204263 57.136672 57.136672 57.136672 61.069080 61.069080
## 41 42 43 44 45 46 47 48
## 61.069080 61.069080 61.069080 68.933898 72.866307 76.798715 76.798715 76.798715
## 49 50
## 76.798715 80.731124
# An R function that is useful in many situations is summary(). We see that when it is called on our model, it returns a good deal of information. By the end of the course, you will know what every value here is used for. For now, you should immediately notice the coefficient estimates, and you may recognize the
# R
# 2
# value we saw earlier.
summary(stop_dist_model)
##
## Call:
## lm(formula = dist ~ speed, data = cars)
##
## Residuals:
## Min 1Q Median 3Q Max
## -29.069 -9.525 -2.272 9.215 43.201
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) -17.5791 6.7584 -2.601 0.0123 *
## speed 3.9324 0.4155 9.464 1.49e-12 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 15.38 on 48 degrees of freedom
## Multiple R-squared: 0.6511, Adjusted R-squared: 0.6438
## F-statistic: 89.57 on 1 and 48 DF, p-value: 1.49e-12
# The summary() command also returns a list, and we can again use names() to learn what about the elements of this list.
names(summary(stop_dist_model))
## [1] "call" "terms" "residuals" "coefficients"
## [5] "aliased" "sigma" "df" "r.squared"
## [9] "adj.r.squared" "fstatistic" "cov.unscaled"
# So, for example, if we wanted to directly access the value of
# R
# 2
# , instead of copy and pasting it out of the printed statement from summary(), we could do so.
summary(stop_dist_model)$r.squared
## [1] 0.6510794
# Another value we may want to access
summary(stop_dist_model)$sigma
## [1] 15.37959
# Note that this is the same result seen earlier as s_e. You may also notice that this value was displayed above as a result of the summary() command, which R labeled the “Residual Standard Error.”
# Often it is useful to talk about
# s
# e
# (or RSE) instead of
# s
# 2
# e
# because of their units. The units of
# s
# e
# in the cars example is feet, while the units of
# s
# 2
# e
# is feet-squared.
#
# Another useful function, which we will use almost as often as lm() is the predict() function.
predict(stop_dist_model, newdata = data.frame(speed = 8))
## 1
## 13.88018
# The above code reads “predict the stopping distance of a car traveling 8 miles per hour using the stop_dist_model.” Importantly, the second argument to predict() is a data frame that we make in place. We do this so that we can specify that 8 is a value of speed, so that predict knows how to use it with the model stored in stop_dist_model. We see that this result is what we had calculated “by hand” previously.
#
# We could also predict multiple values at once.
predict(stop_dist_model, newdata = data.frame(speed = c(8, 21, 50)))
## 1 2 3
## 13.88018 65.00149 179.04134
# Or we could calculate the fitted value for each of the original data points. We can simply supply the original data frame, cars, since it contains a variables called speed which has the values we would like to predict at.
predict(stop_dist_model, newdata = cars)
## 1 2 3 4 5 6 7 8
## -1.849460 -1.849460 9.947766 9.947766 13.880175 17.812584 21.744993 21.744993
## 9 10 11 12 13 14 15 16
## 21.744993 25.677401 25.677401 29.609810 29.609810 29.609810 29.609810 33.542219
## 17 18 19 20 21 22 23 24
## 33.542219 33.542219 33.542219 37.474628 37.474628 37.474628 37.474628 41.407036
## 25 26 27 28 29 30 31 32
## 41.407036 41.407036 45.339445 45.339445 49.271854 49.271854 49.271854 53.204263
## 33 34 35 36 37 38 39 40
## 53.204263 53.204263 53.204263 57.136672 57.136672 57.136672 61.069080 61.069080
## 41 42 43 44 45 46 47 48
## 61.069080 61.069080 61.069080 68.933898 72.866307 76.798715 76.798715 76.798715
## 49 50
## 76.798715 80.731124
# predict(stop_dist_model, newdata = data.frame(speed = cars$speed))
# This is actually equivalent to simply calling predict() on stop_dist_model without a second argument.
predict(stop_dist_model)
## 1 2 3 4 5 6 7 8
## -1.849460 -1.849460 9.947766 9.947766 13.880175 17.812584 21.744993 21.744993
## 9 10 11 12 13 14 15 16
## 21.744993 25.677401 25.677401 29.609810 29.609810 29.609810 29.609810 33.542219
## 17 18 19 20 21 22 23 24
## 33.542219 33.542219 33.542219 37.474628 37.474628 37.474628 37.474628 41.407036
## 25 26 27 28 29 30 31 32
## 41.407036 41.407036 45.339445 45.339445 49.271854 49.271854 49.271854 53.204263
## 33 34 35 36 37 38 39 40
## 53.204263 53.204263 53.204263 57.136672 57.136672 57.136672 61.069080 61.069080
## 41 42 43 44 45 46 47 48
## 61.069080 61.069080 61.069080 68.933898 72.866307 76.798715 76.798715 76.798715
## 49 50
## 76.798715 80.731124
# Note that then in this case, this is the same as using fitted().
fitted(stop_dist_model)
## 1 2 3 4 5 6 7 8
## -1.849460 -1.849460 9.947766 9.947766 13.880175 17.812584 21.744993 21.744993
## 9 10 11 12 13 14 15 16
## 21.744993 25.677401 25.677401 29.609810 29.609810 29.609810 29.609810 33.542219
## 17 18 19 20 21 22 23 24
## 33.542219 33.542219 33.542219 37.474628 37.474628 37.474628 37.474628 41.407036
## 25 26 27 28 29 30 31 32
## 41.407036 41.407036 45.339445 45.339445 49.271854 49.271854 49.271854 53.204263
## 33 34 35 36 37 38 39 40
## 53.204263 53.204263 53.204263 57.136672 57.136672 57.136672 61.069080 61.069080
## 41 42 43 44 45 46 47 48
## 61.069080 61.069080 61.069080 68.933898 72.866307 76.798715 76.798715 76.798715
## 49 50
## 76.798715 80.731124
#7.5 Maximum Likelihood Estimation (MLE) Approach
# Our goal is to find values of
# β
# 0
# ,
# β
# 1
# , and
# σ
# 2
# which maximize this function, which is a straightforward multivariate calculus problem.
# Note that we use
# log
# to mean the natural logarithm.
# 7.6 Simulating SLR
#
# We return again to more examples of simulation. This will be a common theme!
#
# In practice you will almost never have a true model, and you will use data to attempt to recover information about the unknown true model.
# With simulation, we decide the true model and simulate data from it. Then, we apply a method to the data, in this case least squares.
# Now, since we know the true model, we can assess how well it did.
# For this example, we will simulate
# n = 21 observations
# We first set the true parameters of the model to be simulated.
num_obs = 21
beta_0 = 5
beta_1 = -2
sigma = 3
# Next, we obtain simulated values of
# ϵi
# after setting a seed for reproducibility.
set.seed(1)
epsilon = rnorm(n = num_obs, mean = 0, sd = sigma)
x_vals = seq(from = 0, to = 10, length.out = num_obs)
# Another common practice is to generate them from a uniform distribution, and then use them for the remainder of the analysis.
# set.seed(1)
# x_vals = runif(num_obs, 0, 10)
y_vals = beta_0 + beta_1 * x_vals + epsilon
# The data,
# (
# x
# i
# ,
# y
# i
# )
# represent a possible sample from the true distribution. Now to check how well the method of least squares works, we use lm() to fit the model to our simulated data, then take a look at the estimated coefficients.
sim_fit = lm(y_vals ~ x_vals)
coef(sim_fit)
## (Intercept) x_vals
## 4.832639 -1.831401
# And look at that, they aren’t too far from the true parameters we specified!
plot(y_vals ~ x_vals)
abline(sim_fit)
# We should say here, that we’re being sort of lazy, and not the good kinda of lazy that could be considered efficient. Any time you simulate data, you should consider doing two things: writing a function, and storing the data in a data frame.
#
# The function below, sim_slr(), can be used for the same task as above, but is much more flexible. Notice that we provide x to the function, instead of generating x inside the function. In the SLR model, the
# x
# i
# are considered known values. That is, they are not random, so we do not assume a distribution for the
# x
# i
# . Because of this, we will repeatedly use the same x values across all simulations.
sim_slr = function(x, beta_0 = 10, beta_1 = 5, sigma = 1) {
n = length(x)
epsilon = rnorm(n, mean = 0, sd = sigma)
y = beta_0 + beta_1 * x + epsilon
data.frame(predictor = x, response = y)
}
# Here, we use the function to repeat the analysis above.
set.seed(1)
sim_data = sim_slr(x = x_vals, beta_0 = 5, beta_1 = -2, sigma = 3)
# This time, the simulated observations are stored in a data frame.
head(sim_data)
## predictor response
## 1 0.0 3.1206386
## 2 0.5 4.5509300
## 3 1.0 0.4931142
## 4 1.5 6.7858424
## 5 2.0 1.9885233
## 6 2.5 -2.4614052
# Now when we fit the model with lm() we can use a data argument, a very good practice.
sim_fit = lm(response ~ predictor, data = sim_data)
coef(sim_fit)
## (Intercept) predictor
## 4.832639 -1.831401
# And this time, we’ll make the plot look a lot nicer.
plot(response ~ predictor, data = sim_data,
xlab = "Simulated Predictor Variable",
ylab = "Simulated Response Variable",
main = "Simulated Regression Data",
pch = 20,
cex = 2,
col = "grey")
abline(sim_fit, lwd = 3, lty = 1, col = "darkorange")
abline(beta_0, beta_1, lwd = 3, lty = 2, col = "dodgerblue")
legend("topright", c("Estimate", "Truth"), lty = c(1, 2), lwd = 2,
col = c("darkorange", "dodgerblue"))
# 7.7 History
# For some brief background on the history of linear regression, see “Galton, Pearson, and the Peas: A Brief History of Linear Regression for Statistics Instructors” from the Journal of Statistics Education as well as the Wikipedia page on the history of regression analysis and lastly the article for regression to the mean which details the origins of the term “regression.”
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Layouts.R
\name{layoutEmail}
\alias{layoutEmail}
\title{Logging layout for e-mail}
\usage{
layoutEmail(level, message)
}
\arguments{
\item{level}{The level of the message (e.g. "INFO")}
\item{message}{The message to layout.}
}
\description{
A layout function to be used with an e-mail appender. This layout creates a short summary e-mail
message on the event, including stack trace.
}
|
/man/layoutEmail.Rd
|
permissive
|
cran/ParallelLogger
|
R
| false
| true
| 481
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Layouts.R
\name{layoutEmail}
\alias{layoutEmail}
\title{Logging layout for e-mail}
\usage{
layoutEmail(level, message)
}
\arguments{
\item{level}{The level of the message (e.g. "INFO")}
\item{message}{The message to layout.}
}
\description{
A layout function to be used with an e-mail appender. This layout creates a short summary e-mail
message on the event, including stack trace.
}
|
# Author: Maurits Hilverda
# Date: June 28, 2020
# Set the desired working directory
setwd("/Users/mauritshilverda/Documents/Studie/MSc/THESIS/R")
# Install and activate package required to read excel file
library("readxl")
# Import data
data <- read_excel("/Users/mauritshilverda/Documents/Studie/MSc/THESIS/DATA/CombinedData_June28.xlsx")
#Remove Property_Type, Listing_Type, Count_Reservation Days LTM, Count_Available days LTM and Count_Blocked days LTM variables
data <- data[,-c(3,4,18,19,20)]
# Rename rownames based on the Property IDs
row.names(data) <- data$Property_ID
# Remove Property_ID and Host_ID variables
data <- data[, -c(1,2)]
# Check data
str(data)
# Convert character variables (Canc_Policy, District and Image_Location) to factors
# At this point we do not yet convert the Premiumization_Level variable to a factor but we might do this later
data[,c(9,20,28)] <- lapply(data[,c(9,20,28)] , factor)
#data[,c(9,20,28,29)] <- lapply(data[,c(9,20,28,29)] , factor) (this one also takes the premlevel variable)
# Check data structure again to see if it worked properly
str(data)
summary(data)
# Name dataset with normal Face_Count variable
data1 <- data[, -c(18)]
# Name dataset with Face_Count_Alternative variable
data2 <- data[, -c(17)]
# For interpretation purposes later in this research we rescale the ocupancy rate to a value between 0 and 100 instead of a value between 0 and 1
#data1$Occupancy_Rate_LTM <- data1$Occupancy_Rate_LTM*100
#data2$Occupancy_Rate_LTM <- data2$Occupancy_Rate_LTM*100
# Remove factor variables in order to make a correlation plot
corrplotdata <- data1[,-c(9,19,27)]
# Calculate correlations
correlations <- cor(corrplotdata)
# Create corrplot
library(corrplot)
corrplot(correlations, type = "upper", order = "hclust", tl.col = "black", tl.srt = 45, tl.cex = 0.8)
# First check difference in mean occupancy rates between listings with humanized or not-humanized images
library(dplyr)
data1 %>%
group_by(Humanized) %>%
summarize(meanocc <- mean(Occupancy_Rate_LTM))
# Run t-test to see if difference is significant
t.test(data1$Occupancy_Rate_LTM ~ data1$Humanized)
# Apply the scaling for all numeric variables (for both dataset 1 and 2, and including premiumization level as a numeric variable)
data1$Average_Daily_Rate <- scale(data1$Average_Daily_Rate, scale = TRUE)
data1$No_Reviews <- scale(data1$No_Reviews, scale = TRUE)
data1$No_Bedrooms <- scale(data1$No_Bedrooms, scale = TRUE)
data1$No_Bathrooms <- scale(data1$No_Bathrooms, scale = TRUE)
data1$Max_No_Guests <- scale(data1$Max_No_Guests, scale = TRUE)
data1$Response_Time <- scale(data1$Response_Time, scale = TRUE)
data1$Sec_Deposit <- scale(data1$Sec_Deposit, scale = TRUE)
data1$Cleaning_Fee <- scale(data1$Cleaning_Fee, scale = TRUE)
data1$`Extra People Fee` <- scale(data1$`Extra People Fee`, scale = TRUE)
data1$`Minimum Stay` <- scale(data1$`Minimum Stay`, scale = TRUE)
data1$`Number of Photos` <- scale(data1$`Number of Photos`, scale = TRUE)
data1$Overall_Rating <- scale(data1$Overall_Rating, scale = TRUE)
data1$Face_Count <- scale(data1$Face_Count, scale = TRUE)
data1$Host_No_Listings <- scale(data1$Host_No_Listings, scale = TRUE)
data1$Colorfulness <- scale(data1$Colorfulness, scale = TRUE)
data1$Luminance <- scale(data1$Luminance, scale = TRUE)
data1$Contrasts <- scale(data1$Contrasts, scale = TRUE)
data1$Sharpness <- scale(data1$Sharpness, scale = TRUE)
data1$Object_Count <- scale(data1$Object_Count, scale = TRUE)
data1$Unique_Objects <- scale(data1$Unique_Objects, scale = TRUE)
data1$Premiumization_Level <- scale(data1$Premiumization_Level, scale = TRUE)
data2$Average_Daily_Rate <- scale(data2$Average_Daily_Rate, scale = TRUE)
data2$No_Reviews <- scale(data2$No_Reviews, scale = TRUE)
data2$No_Bedrooms <- scale(data2$No_Bedrooms, scale = TRUE)
data2$No_Bathrooms <- scale(data2$No_Bathrooms, scale = TRUE)
data2$Max_No_Guests <- scale(data2$Max_No_Guests, scale = TRUE)
data2$Response_Time <- scale(data2$Response_Time, scale = TRUE)
data2$Sec_Deposit <- scale(data2$Sec_Deposit, scale = TRUE)
data2$Cleaning_Fee <- scale(data2$Cleaning_Fee, scale = TRUE)
data2$`Extra People Fee` <- scale(data2$`Extra People Fee`, scale = TRUE)
data2$`Minimum Stay` <- scale(data2$`Minimum Stay`, scale = TRUE)
data2$`Number of Photos` <- scale(data2$`Number of Photos`, scale = TRUE)
data2$Overall_Rating <- scale(data2$Overall_Rating, scale = TRUE)
data2$Face_Count_Alternative <- scale(data2$Face_Count_Alternative, scale = TRUE)
data2$Host_No_Listings <- scale(data2$Host_No_Listings, scale = TRUE)
data2$Colorfulness <- scale(data2$Colorfulness, scale = TRUE)
data2$Luminance <- scale(data2$Luminance, scale = TRUE)
data2$Contrasts <- scale(data2$Contrasts, scale = TRUE)
data2$Sharpness <- scale(data2$Sharpness, scale = TRUE)
data2$Object_Count <- scale(data2$Object_Count, scale = TRUE)
data2$Unique_Objects <- scale(data2$Unique_Objects, scale = TRUE)
data2$Premiumization_Level <- scale(data2$Premiumization_Level, scale = TRUE)
# Fit simple linear regression model
model1 <- lm(Occupancy_Rate_LTM ~., data1)
summary(model1)
# Check VIFs
library(car)
vif(model1)
# Taking the squares of the generalized VIF^(1/(2*Df)), does not indicate excessive multicollinearity
# Therefore removing certain variables from this model is not needed
# Fit moderated regression model
model2 <- lm(Occupancy_Rate_LTM ~. + Colorfulness*Premiumization_Level + Luminance*Premiumization_Level + Contrasts*Premiumization_Level + Sharpness*Premiumization_Level + Humanized*Premiumization_Level + Object_Count*Premiumization_Level, data1)
summary(model2)
# Check VIFs
vif(model2)
# Taking the squares of the generalized VIF^(1/(2*Df)), does not indicate excessive multicollinearity
# Therefore removing certain variables from this model is not needed
# Now with data2
model3 <- lm(Occupancy_Rate_LTM ~. + Colorfulness*Premiumization_Level + Luminance*Premiumization_Level + Contrasts*Premiumization_Level + Sharpness*Premiumization_Level + Humanized*Premiumization_Level + Object_Count*Premiumization_Level, data2)
summary(model3)
# Check VIFs
vif(model3)
# Taking the squares of the generalized VIF^(1/(2*Df)), does not indicate excessive multicollinearity
# Therefore removing certain variables from this model is not needed
# Plot the errors
plot(model3, which = 1, las = 1)
# Rerun model with log transformed dependend variable to assess if heteroscedasticity persists
model4 <- lm(log(Occupancy_Rate_LTM) ~. + Colorfulness*Premiumization_Level + Luminance*Premiumization_Level + Contrasts*Premiumization_Level + Sharpness*Premiumization_Level + Humanized*Premiumization_Level + Object_Count*Premiumization_Level, data2)
summary(model4)
# Check VIFs
vif(model4)
# Taking the squares of the generalized VIF^(1/(2*Df)), does not indicate excessive multicollinearity
# Therefore removing certain variables from this model is not needed
# Check Residuals vs Fitted plot
plot(model4, which = 1, las = 1)
|
/Modelling/DataAnalysis.R
|
no_license
|
mauritshilv/MSc-Thesis
|
R
| false
| false
| 6,933
|
r
|
# Author: Maurits Hilverda
# Date: June 28, 2020
# Set the desired working directory
setwd("/Users/mauritshilverda/Documents/Studie/MSc/THESIS/R")
# Install and activate package required to read excel file
library("readxl")
# Import data
data <- read_excel("/Users/mauritshilverda/Documents/Studie/MSc/THESIS/DATA/CombinedData_June28.xlsx")
#Remove Property_Type, Listing_Type, Count_Reservation Days LTM, Count_Available days LTM and Count_Blocked days LTM variables
data <- data[,-c(3,4,18,19,20)]
# Rename rownames based on the Property IDs
row.names(data) <- data$Property_ID
# Remove Property_ID and Host_ID variables
data <- data[, -c(1,2)]
# Check data
str(data)
# Convert character variables (Canc_Policy, District and Image_Location) to factors
# At this point we do not yet convert the Premiumization_Level variable to a factor but we might do this later
data[,c(9,20,28)] <- lapply(data[,c(9,20,28)] , factor)
#data[,c(9,20,28,29)] <- lapply(data[,c(9,20,28,29)] , factor) (this one also takes the premlevel variable)
# Check data structure again to see if it worked properly
str(data)
summary(data)
# Name dataset with normal Face_Count variable
data1 <- data[, -c(18)]
# Name dataset with Face_Count_Alternative variable
data2 <- data[, -c(17)]
# For interpretation purposes later in this research we rescale the ocupancy rate to a value between 0 and 100 instead of a value between 0 and 1
#data1$Occupancy_Rate_LTM <- data1$Occupancy_Rate_LTM*100
#data2$Occupancy_Rate_LTM <- data2$Occupancy_Rate_LTM*100
# Remove factor variables in order to make a correlation plot
corrplotdata <- data1[,-c(9,19,27)]
# Calculate correlations
correlations <- cor(corrplotdata)
# Create corrplot
library(corrplot)
corrplot(correlations, type = "upper", order = "hclust", tl.col = "black", tl.srt = 45, tl.cex = 0.8)
# First check difference in mean occupancy rates between listings with humanized or not-humanized images
library(dplyr)
data1 %>%
group_by(Humanized) %>%
summarize(meanocc <- mean(Occupancy_Rate_LTM))
# Run t-test to see if difference is significant
t.test(data1$Occupancy_Rate_LTM ~ data1$Humanized)
# Apply the scaling for all numeric variables (for both dataset 1 and 2, and including premiumization level as a numeric variable)
data1$Average_Daily_Rate <- scale(data1$Average_Daily_Rate, scale = TRUE)
data1$No_Reviews <- scale(data1$No_Reviews, scale = TRUE)
data1$No_Bedrooms <- scale(data1$No_Bedrooms, scale = TRUE)
data1$No_Bathrooms <- scale(data1$No_Bathrooms, scale = TRUE)
data1$Max_No_Guests <- scale(data1$Max_No_Guests, scale = TRUE)
data1$Response_Time <- scale(data1$Response_Time, scale = TRUE)
data1$Sec_Deposit <- scale(data1$Sec_Deposit, scale = TRUE)
data1$Cleaning_Fee <- scale(data1$Cleaning_Fee, scale = TRUE)
data1$`Extra People Fee` <- scale(data1$`Extra People Fee`, scale = TRUE)
data1$`Minimum Stay` <- scale(data1$`Minimum Stay`, scale = TRUE)
data1$`Number of Photos` <- scale(data1$`Number of Photos`, scale = TRUE)
data1$Overall_Rating <- scale(data1$Overall_Rating, scale = TRUE)
data1$Face_Count <- scale(data1$Face_Count, scale = TRUE)
data1$Host_No_Listings <- scale(data1$Host_No_Listings, scale = TRUE)
data1$Colorfulness <- scale(data1$Colorfulness, scale = TRUE)
data1$Luminance <- scale(data1$Luminance, scale = TRUE)
data1$Contrasts <- scale(data1$Contrasts, scale = TRUE)
data1$Sharpness <- scale(data1$Sharpness, scale = TRUE)
data1$Object_Count <- scale(data1$Object_Count, scale = TRUE)
data1$Unique_Objects <- scale(data1$Unique_Objects, scale = TRUE)
data1$Premiumization_Level <- scale(data1$Premiumization_Level, scale = TRUE)
data2$Average_Daily_Rate <- scale(data2$Average_Daily_Rate, scale = TRUE)
data2$No_Reviews <- scale(data2$No_Reviews, scale = TRUE)
data2$No_Bedrooms <- scale(data2$No_Bedrooms, scale = TRUE)
data2$No_Bathrooms <- scale(data2$No_Bathrooms, scale = TRUE)
data2$Max_No_Guests <- scale(data2$Max_No_Guests, scale = TRUE)
data2$Response_Time <- scale(data2$Response_Time, scale = TRUE)
data2$Sec_Deposit <- scale(data2$Sec_Deposit, scale = TRUE)
data2$Cleaning_Fee <- scale(data2$Cleaning_Fee, scale = TRUE)
data2$`Extra People Fee` <- scale(data2$`Extra People Fee`, scale = TRUE)
data2$`Minimum Stay` <- scale(data2$`Minimum Stay`, scale = TRUE)
data2$`Number of Photos` <- scale(data2$`Number of Photos`, scale = TRUE)
data2$Overall_Rating <- scale(data2$Overall_Rating, scale = TRUE)
data2$Face_Count_Alternative <- scale(data2$Face_Count_Alternative, scale = TRUE)
data2$Host_No_Listings <- scale(data2$Host_No_Listings, scale = TRUE)
data2$Colorfulness <- scale(data2$Colorfulness, scale = TRUE)
data2$Luminance <- scale(data2$Luminance, scale = TRUE)
data2$Contrasts <- scale(data2$Contrasts, scale = TRUE)
data2$Sharpness <- scale(data2$Sharpness, scale = TRUE)
data2$Object_Count <- scale(data2$Object_Count, scale = TRUE)
data2$Unique_Objects <- scale(data2$Unique_Objects, scale = TRUE)
data2$Premiumization_Level <- scale(data2$Premiumization_Level, scale = TRUE)
# Fit simple linear regression model
model1 <- lm(Occupancy_Rate_LTM ~., data1)
summary(model1)
# Check VIFs
library(car)
vif(model1)
# Taking the squares of the generalized VIF^(1/(2*Df)), does not indicate excessive multicollinearity
# Therefore removing certain variables from this model is not needed
# Fit moderated regression model
model2 <- lm(Occupancy_Rate_LTM ~. + Colorfulness*Premiumization_Level + Luminance*Premiumization_Level + Contrasts*Premiumization_Level + Sharpness*Premiumization_Level + Humanized*Premiumization_Level + Object_Count*Premiumization_Level, data1)
summary(model2)
# Check VIFs
vif(model2)
# Taking the squares of the generalized VIF^(1/(2*Df)), does not indicate excessive multicollinearity
# Therefore removing certain variables from this model is not needed
# Now with data2
model3 <- lm(Occupancy_Rate_LTM ~. + Colorfulness*Premiumization_Level + Luminance*Premiumization_Level + Contrasts*Premiumization_Level + Sharpness*Premiumization_Level + Humanized*Premiumization_Level + Object_Count*Premiumization_Level, data2)
summary(model3)
# Check VIFs
vif(model3)
# Taking the squares of the generalized VIF^(1/(2*Df)), does not indicate excessive multicollinearity
# Therefore removing certain variables from this model is not needed
# Plot the errors
plot(model3, which = 1, las = 1)
# Rerun model with log transformed dependend variable to assess if heteroscedasticity persists
model4 <- lm(log(Occupancy_Rate_LTM) ~. + Colorfulness*Premiumization_Level + Luminance*Premiumization_Level + Contrasts*Premiumization_Level + Sharpness*Premiumization_Level + Humanized*Premiumization_Level + Object_Count*Premiumization_Level, data2)
summary(model4)
# Check VIFs
vif(model4)
# Taking the squares of the generalized VIF^(1/(2*Df)), does not indicate excessive multicollinearity
# Therefore removing certain variables from this model is not needed
# Check Residuals vs Fitted plot
plot(model4, which = 1, las = 1)
|
#========================normlized crime in UTTAR PRADESH =====================
library(ggplot2)
theme_set(theme_bw())
library(dplyr)
library(ggplot2)
table=read.csv("E:/project/crimes.csv")
data2=table %>% group_by(DISTRICT) %>% filter(STATE.UT == "UTTAR PRADESH") %>% summarise(all_crimes=sum(Rape,Kidnapping.and.Abduction,Dowry.Deaths,Assault.on.women.with.intent.to.outrage.her.modesty,Insult.to.modesty.of.Women,Cruelty.by.Husband.or.his.Relatives,Importation.of.Girls))
#View(data2)
data=data.frame(data2)
print(data2)
data$avg = round((data$all_crimes - mean(data$all_crimes))/sd(data$all_crimes),2)
data$chart_type = ifelse(data$avg < 0,"below","above")
data=data[order(data$avg),]
ggplot(data,aes(x=DISTRICT,y=avg))+
geom_bar(stat = 'identity',aes(fill=chart_type),width = 0.5)+
scale_fill_manual(name="crimes",labels=c("above avg","below avg"),
values = c("above"="#00ba38","below"="#f8766d"))+
labs(subtitle = "normlized crimes in UTTAR PRADESH",
title = "DIverging Bars")+
coord_flip()
|
/Q4/34/district_34.r
|
no_license
|
PurvishaThakkar/datascience-project-in-R-studio
|
R
| false
| false
| 1,041
|
r
|
#========================normlized crime in UTTAR PRADESH =====================
library(ggplot2)
theme_set(theme_bw())
library(dplyr)
library(ggplot2)
table=read.csv("E:/project/crimes.csv")
data2=table %>% group_by(DISTRICT) %>% filter(STATE.UT == "UTTAR PRADESH") %>% summarise(all_crimes=sum(Rape,Kidnapping.and.Abduction,Dowry.Deaths,Assault.on.women.with.intent.to.outrage.her.modesty,Insult.to.modesty.of.Women,Cruelty.by.Husband.or.his.Relatives,Importation.of.Girls))
#View(data2)
data=data.frame(data2)
print(data2)
data$avg = round((data$all_crimes - mean(data$all_crimes))/sd(data$all_crimes),2)
data$chart_type = ifelse(data$avg < 0,"below","above")
data=data[order(data$avg),]
ggplot(data,aes(x=DISTRICT,y=avg))+
geom_bar(stat = 'identity',aes(fill=chart_type),width = 0.5)+
scale_fill_manual(name="crimes",labels=c("above avg","below avg"),
values = c("above"="#00ba38","below"="#f8766d"))+
labs(subtitle = "normlized crimes in UTTAR PRADESH",
title = "DIverging Bars")+
coord_flip()
|
## plot4.R generates a single png graphic file with a grid of four plots:
## Global_active_power vs Time
## Voltage vs Time
## Sub_metering_1, Sub_metering_2, Sub_metering_3 vs Time
## Global_reactive_power vs Time
## Data file supplied by Coursera Exploratory Data Analysis class
## https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip
## referenced to University of California Irvine machine learning databases
## "Individual household electric power consumption Data Set"
## http://archive.ics.uci.edu/ml/datasets/Individual+household+electric+power+consumption
## null observations are marked by "?" in Coursera data,instead of
## two adjectant semi-colons
## Hard coded information, change as required
datafile <- "~/Programming/ExploreData/household_power_consumption.txt"
start <- as.Date("2007-02-01", format = "%Y-%m-%d")
end <- as.Date("2007-02-02", format = "%Y-%m-%d")
pngfile = "~/Programming/ExploreData/ExData_Plotting1/plot4.png"
##
# To save memory space just the needed subsection of data will be read in from file.
# first, though, need to find which rows have the required dates.
# read just date column from data file
datecolclass <- c("character", rep.int("NULL", 8))
data <- read.csv(datafile, sep = ";", quote = "", na.strings = "?",
colClasses = datecolclass)
data$Date <- as.Date(data$Date, format = "%d/%m/%Y")
# find element numbers of dates that match
gooddate <- which(data$Date == start | data$Date == end)
# assumption is made that file data is in chronological order
rowstart <- min(gooddate)
rowstoread <- max(gooddate) - rowstart + 1
# read first line (header) of file to get column names, then convert to vector
datacolnames <- read.csv(datafile, sep = ";", header = FALSE, nrows = 1,
colClasses = "character")
datacolnames <- as.character(datacolnames)
# read in subsection of file with appropriate dates
datecolclass <- c("character", "character", rep.int("numeric", 7))
data <- read.csv(datafile, sep = ";", header = FALSE,
skip = rowstart, nrows = rowstoread,
quote = "", na.strings = "?",
colClasses = datecolclass, col.names = datacolnames)
# add data column with POSIXct formatted combined date and time information
data$posix <- as.POSIXct(paste(data$Date, data$Time), tz = "", "%d/%m/%Y %H:%M:%S")
## Plotting code section
# generate a png graphic file with a grid of four plots
png(filename = pngfile, width = 480, height = 480, units = "px")
par(mfcol = c(2, 2))
# first plot - Global_active_power vs Time
plot(data$posix, data$Global_active_power, type = "l",
main = "", xlab = "", ylab = "Global Active Power")
# second plot - overlaid line plots of Sub_metering_1,2,3
plot(data$posix, data$Sub_metering_1, type = "l",
main = "", xlab = "", ylab = "Energy sub metering")
lines(data$posix, data$Sub_metering_2, col = "red")
lines(data$posix, data$Sub_metering_3, col = "blue")
legend("topright", lty = 1, col = c("black", "red", "blue"), bty = "n",
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
# third plot - Voltage vs Time
plot(data$posix, data$Voltage, type = "l",
main = "", xlab = "datetime", ylab = "Voltage")
# fourth plot - Global_reactive_power vs Time
plot(data$posix, data$Global_reactive_power, type = "l",
main = "", xlab = "datetime", ylab = "Global_reactive_power")
dev.off()
|
/plot4.R
|
no_license
|
dorght/ExData_Plotting1
|
R
| false
| false
| 3,428
|
r
|
## plot4.R generates a single png graphic file with a grid of four plots:
## Global_active_power vs Time
## Voltage vs Time
## Sub_metering_1, Sub_metering_2, Sub_metering_3 vs Time
## Global_reactive_power vs Time
## Data file supplied by Coursera Exploratory Data Analysis class
## https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip
## referenced to University of California Irvine machine learning databases
## "Individual household electric power consumption Data Set"
## http://archive.ics.uci.edu/ml/datasets/Individual+household+electric+power+consumption
## null observations are marked by "?" in Coursera data,instead of
## two adjectant semi-colons
## Hard coded information, change as required
datafile <- "~/Programming/ExploreData/household_power_consumption.txt"
start <- as.Date("2007-02-01", format = "%Y-%m-%d")
end <- as.Date("2007-02-02", format = "%Y-%m-%d")
pngfile = "~/Programming/ExploreData/ExData_Plotting1/plot4.png"
##
# To save memory space just the needed subsection of data will be read in from file.
# first, though, need to find which rows have the required dates.
# read just date column from data file
datecolclass <- c("character", rep.int("NULL", 8))
data <- read.csv(datafile, sep = ";", quote = "", na.strings = "?",
colClasses = datecolclass)
data$Date <- as.Date(data$Date, format = "%d/%m/%Y")
# find element numbers of dates that match
gooddate <- which(data$Date == start | data$Date == end)
# assumption is made that file data is in chronological order
rowstart <- min(gooddate)
rowstoread <- max(gooddate) - rowstart + 1
# read first line (header) of file to get column names, then convert to vector
datacolnames <- read.csv(datafile, sep = ";", header = FALSE, nrows = 1,
colClasses = "character")
datacolnames <- as.character(datacolnames)
# read in subsection of file with appropriate dates
datecolclass <- c("character", "character", rep.int("numeric", 7))
data <- read.csv(datafile, sep = ";", header = FALSE,
skip = rowstart, nrows = rowstoread,
quote = "", na.strings = "?",
colClasses = datecolclass, col.names = datacolnames)
# add data column with POSIXct formatted combined date and time information
data$posix <- as.POSIXct(paste(data$Date, data$Time), tz = "", "%d/%m/%Y %H:%M:%S")
## Plotting code section
# generate a png graphic file with a grid of four plots
png(filename = pngfile, width = 480, height = 480, units = "px")
par(mfcol = c(2, 2))
# first plot - Global_active_power vs Time
plot(data$posix, data$Global_active_power, type = "l",
main = "", xlab = "", ylab = "Global Active Power")
# second plot - overlaid line plots of Sub_metering_1,2,3
plot(data$posix, data$Sub_metering_1, type = "l",
main = "", xlab = "", ylab = "Energy sub metering")
lines(data$posix, data$Sub_metering_2, col = "red")
lines(data$posix, data$Sub_metering_3, col = "blue")
legend("topright", lty = 1, col = c("black", "red", "blue"), bty = "n",
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
# third plot - Voltage vs Time
plot(data$posix, data$Voltage, type = "l",
main = "", xlab = "datetime", ylab = "Voltage")
# fourth plot - Global_reactive_power vs Time
plot(data$posix, data$Global_reactive_power, type = "l",
main = "", xlab = "datetime", ylab = "Global_reactive_power")
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/random_selection.R
\name{random_selection}
\alias{random_selection}
\title{Randomly decide whether this node be infected of removed}
\usage{
random_selection(freq, vec_prob)
}
\arguments{
\item{freq}{int, the number of incoming links(removed: freq == 1)}
\item{vec_prob}{vector, the probility vector of vec_rate and 1-vec_rate}
}
\value{
results, int
}
\description{
Infection: Randomly decide whether it will infect the target node
use sample(c(1, 0), 1, prob = ) for every incoming link.If it will
be infected then return 1, otherwise return 0.Finally, add the
return value of each incoming link to find out if this node will
be infected.
Removed: Randomly decide whether the target node will be removed
from infected queue. This is a special condition with freq == 1
}
\examples{
random_selection(4, c(0.12, 0.88))
}
|
/man/random_selection.Rd
|
permissive
|
AllToBeNice/SIR_In_Network
|
R
| false
| true
| 899
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/random_selection.R
\name{random_selection}
\alias{random_selection}
\title{Randomly decide whether this node be infected of removed}
\usage{
random_selection(freq, vec_prob)
}
\arguments{
\item{freq}{int, the number of incoming links(removed: freq == 1)}
\item{vec_prob}{vector, the probility vector of vec_rate and 1-vec_rate}
}
\value{
results, int
}
\description{
Infection: Randomly decide whether it will infect the target node
use sample(c(1, 0), 1, prob = ) for every incoming link.If it will
be infected then return 1, otherwise return 0.Finally, add the
return value of each incoming link to find out if this node will
be infected.
Removed: Randomly decide whether the target node will be removed
from infected queue. This is a special condition with freq == 1
}
\examples{
random_selection(4, c(0.12, 0.88))
}
|
# Info about all the experimental design packages in R can be found here:
# http://cran.r-project.org/web/views/ExperimentalDesign.html
install.packages("support.CEs")
install.packages("xtable")
install.packages("sm")
install.packages("lmtest")
install.packages("mlogit")
install.packages("ggplot2")
library(support.CEs)
library(survival)
library(stats)
library(xtable)
library(sm)
library(lmtest)
library(mlogit)
library(ggplot2)
# installs and loads support.CEs package. Dependencies are DoE.base, MASS, simex.
# Also, DoE.base depends on vcd. Make sure all these ducks are in a row.
des2v2 <- rotation.design(attribute.names = list(Environmental_Quality = c("2", "3", "4", "5"),
Farm_Conversion = c(".02", ".04", ".08",".16"),
Annual_10Yr_Tax = c("10", "20", "40","80")),
nalternatives = 2,
nblocks = 2,
row.renames = FALSE,
seed = 987)
# I recoded the Environmental_Quality variable to be numerical and continuous on a scale of 1 to 5.
# This allows the design matrix to be made with the common base. Using environmental quality as a categorical variable did not
# allow me to enter a common base option that was separate from the existing categories.
# ques2v2 <- questionnaire(des2v2, common=c(Environmental_Quality = "1", Farm_Conversion = "0", Annual_10Yr_Tax = "0"), quote=FALSE)
desmat <- make.design.matrix(choice.experiment.design=des2v2,
optout=FALSE,
continuous.attributes=c("Environmental_Quality", "Farm_Conversion","Annual_10Yr_Tax"),
common=c(Environmental_Quality="1", Farm_Conversion="0", Annual_10Yr_Tax="0"),
binary=FALSE)
cevars <- read.csv('\\\\daryl\\users\\nenelow\\My Documents\\Puget Sound Institute\\Survey Data\\141111CESurveyNumericResponses.csv');
cedset <- make.dataset(respondent.dataset=cevars, design.matrix=desmat,
choice.indicators=c("Q1","Q2","Q3","Q4","Q5","Q6","Q7","Q8"))
#Interaction Terms
# Taking the natural log of quality variable to incorporate diminishing returns to
# increased quality
cedset$lnqual <- log(cedset$Environmental_Quality)
# Conversion Level Dummies
rows <- nrow(cedset)
cedset$farmverylow <- 0
cedset$farmlow <- 0
cedset$farmlowmed <- 0
cedset$farmhimed <- 0
cedset$farmhi <- 0
cedset$enqverylow <-0
cedset$enqlow <- 0
cedset$enqlowmed <- 0
cedset$enqhimed <- 0
cedset$enqhi <- 0
for (i in 1:rows)
if (cedset$Farm_Conversion[i]== 0.00){
cedset$farmverylow[i] = 1 } else
if (cedset$Farm_Conversion[i] == 0.02) {
cedset$farmlow[i] = 1 } else
if (cedset$Farm_Conversion[i] == 0.04) {
cedset$farmlowmed[i] = 1 } else
if (cedset$Farm_Conversion[i] == 0.08) {
cedset$farmhimed[i] = 1 } else
if (cedset$Farm_Conversion[i] == 0.16) {
cedset$farmhi[i] = 1 }
for (i in 1:rows)
if (cedset$Environmental_Quality[i] == 1) {
cedset$enqverylow[i] = 1 } else
if (cedset$Environmental_Quality[i] == 2) {
cedset$enqlow[i] = 1 } else
if (cedset$Environmental_Quality[i] == 3) {
cedset$enqlowmed[i] = 1 } else
if (cedset$Environmental_Quality[i] == 4) {
cedset$enqhimed[i] = 1 } else
if (cedset$Environmental_Quality[i] == 5) {
cedset$enqhi[i] = 1 }
# ASC for Status Quo
cedset$ascsq <- 0
numsqs <-0
for (i in 1:rows)
if (cedset$ASC[i] == 0) {
cedset$ascsq[i] = 1
} else cedset$ascsq[i] = 0
# Status Quo Interactions
cedset$sqinc <- cedset$ascsq*cedset$inc
cedset$sqedu <- cedset$ascsq*cedset$edu
cedset$sqact <- cedset$ascsq*cedset$activitydummy
cedset$sqtrips <- cedset$ascsq*cedset$Numrectrips
# Specification 1: Continuous variables, with same interactions as Specification 1,
# except in levels of quality instead of logs.
rescont <- clogit(RES~Environmental_Quality+Farm_Conversion+Annual_10Yr_Tax+ascsq+
sqinc+sqedu+sqact+sqtrips+
strata(STR), data=cedset, weights=WT)
summary(rescont)
wtpcont <- mwtp(rescont, monetary.variables=c("Annual_10Yr_Tax"),
nonmonetary.variables=c("Environmental_Quality", "Farm_Conversion"), nreplications=1000)
wtpcont
gofcont <- gofm(rescont)
gofcont
# Specification 2: Natural log of quality, levels of conversion;
# Interaction between quality and conversion, quality and income, conversion and income,
# Quality and education, quality and activities chosen, quality and number of trips chosen
reslog <- clogit(RES~lnqual+Farm_Conversion+Annual_10Yr_Tax+ascsq+
sqinc+sqedu+sqact+sqtrips+strata(STR),
data=cedset, weights=WT)
summary(reslog)
wtplog <- mwtp(reslog, monetary.variables=c("Annual_10Yr_Tax"),
nonmonetary.variables=c("lnqual", "Farm_Conversion"), nreplications=1000)
wtplog
goflog <- gofm(reslog)
goflog
# Specification 3: Levels of each non-monetary variable separated into binary dummies,
# with the common base level as the omitted dummy. Interactions between high environmental quality
# and number of trips taken, income, and downstream activities.
# Also, some interactions included between specific levels of quality and conversion.
reslev <- clogit(RES~enqlow+enqlowmed+enqhimed+enqhi+
farmlowmed+farmhimed+farmhi+
sqinc+sqedu+sqact+sqtrips+
Annual_10Yr_Tax+
strata(STR), data=cedset, weights=WT)
summary(reslev)
wtplev <- mwtp(reslev, monetary.variables=c("Annual_10Yr_Tax"),
nonmonetary.variables=c("enqlowmed", "enqhimed", "enqhi",
"farmlow", "farmlowmed", "farmhimed", "farmhi"),
nreplications=1000)
wtplev
goflev <- gofm(reslev)
goflev
# Likelihood Ratio Test of Model Goodness of Fit
llr <- lrtest(rescont,reslog,reslev)
# Specification 4: use glm from stats package. Note: it has a very low goodness of fit, hence will not use.
wts <- as.numeric(cedset$WT)
lince <- glm(RES~lnqual+Farm_Conversion+Annual_10Yr_Tax+qualrec+convrec+qualinc+convinc, data=cedset, weights=wts)
wtplin <- mwtp(lince,monetary.variables=c("Annual_10Yr_Tax"), nonmonetary.variables=c("Environmental_Quality", "Farm_Conversion"))
# Now the graphics.
# Histograms for pennies
wat <- hist(cevars$pennieswater, breaks=20, col="blue", main="Relative Importance of Water Quality")
saf <- hist(cevars$penniessafety, breaks=20, col="pink", main="Relative Importance of Safety for Swimming and Fishing")
fis <- hist(cevars$penniesfish, breaks=20, col="lightblue", main="Relative Importance of Fish Habitat")
veg <- hist(cevars$penniesveg, breaks=20, col="green", main="Relative Importance of Vegetation")
bir <- hist(cevars$penniesbird, breaks=20, col="orange", main="Relative Importance of Bird Habitat")
# Kernel density plots for marginal WTPs for all specifications
margslog <- wtplog$mwtps
hlq <- hist(margslog[,1], breaks=100, col="lightblue", main="WTP for Additional Water Quality (Natural Logs)")
dlq <- density(margslog[,1])
plot(dlq, main="WTP for Additional Water Quality (in Natural Logs)")
dcq <- density(margslog[,2])
plot(dcq, main="WTP for Additional Farmland Conversion")
margscont <- wtpcont$mwtps
dq <- density(margscont[,1])
plot(dq, main="Marginal WTP for Additional Environmental Quality")
dc <- density(margscont[,2])
plot(dc, main="Marginal WTP for Additional Farmland Conversion")
margslev <- wtplev$mwtps
dqpoor <- density(margslev[,1])
plot(dqpoor, main="WTP for Poor Environmental Quality")
dqfair <- density(margslev[,2])
plot(dqfair, main="WTP for Fair Environmental Quality")
dqgood <- density(margslev[,3])
plot(dqgood, main="WTP for Good Environmental Quality")
dqvgood <- density(margslev[,4])
plot(dqvgood, main="WTP for Very Good Environmental Quality")
dcfour <- density(margslev[,5])
plot(dcfour, main="WTP for 4% Farmland Conversion")
dceight <- density(margslev[,6])
plot(dceight, main="WTP for 8% Farmland Conversion")
dcsixteen <- density(margslev[,7])
plot(dcsixteen, main="WTP for 16% Farmland Conversion")
# Comparison
mgslevfr <- as.data.frame(margslev)
attach(mgslevfr)
# Trying to get it into shape for an mlogit with random parameters
cedtoml <- mlogit.data(cedset, shape="long", choice="RES", alt.var="ALT", id.var="ID")
# Now for the confusing part.
ml <- mlogit(RES~lnqual+Farm_Conversion+Annual_10Yr_Tax, cedtoml, na.action=na.omit, alt.subset=c(1,2,3))
# Gives error message:
# Error in if (abs(x - oldx) < ftol) { :
# missing value where TRUE/FALSE needed
# Yet:
x <- is.na(cedtoml)
length(x[x==TRUE])
# Will show you that there are no missing values in the entire dataset!
# And, the following nonsensical formula works:
# ml <- mlogit(lnqual~Farm_Conversion+Annual_10Yr_Tax | 0, data=cedtoml)
# So, there is something wrong with the RES variable. But it is a logical variable with no missing values.
class(cedtoml$RES)
# [1] "logical"
str(cedtoml)
# Will give you the structure of the cedtoml object - what data type it is and all of its variables
# A funny work-around. Create a numeric out of the logical variable.
cedtoml$choice <- as.numeric(cedtoml$RES)
# Odds and Ends
# Interaction terms with specific levels - omitted from regressions.
cedset$hihi <- cedset$enqhi*cedset$farmhi
cedset$hihimed <- cedset$enqhi*cedset$farmhimed
cedset$lowlow <- cedset$enqlow*cedset$farmlow
cedset$lowlowmed <- cedset$enqlow*cedset$farmlowmed
cedset$hitrips <- (cedset$enqhi)*cedset$Numrectrips
cedset$hiinc <- (cedset$enqhi)*cedset$income
cedset$hiedu <- (cedset$enqhi)*cedset$edu
cedset$hiact <- (cedset$enqhi)*cedset$activitydummy
cedset$farmhitrips <- (cedset$farmhi)*cedset$Numrectrips
cedset$farmhiinc <- (cedset$farmhi)*cedset$income
cedset$farmhiedu <- (cedset$farmhi)*cedset$edu
cedset$farmhiact <- (cedset$farmhi)*cedset$activitydummy
cedset$himedtrips <- (cedset$enqhimed)*cedset$Numrectrips
cedset$himedinc <- (cedset$enqhimed)*cedset$income
cedset$himededu <- (cedset$enqhimed)*cedset$edu
cedset$himedact <- (cedset$enqhimed)*cedset$activitydummy
cedset$qualgen <- cedset$lnqual*cedset$gender
cedset$qualgenn <-cedset$Environmental_Quality*cedset$gender
cedset$convgen <-cedset$Farm_Conversion*cedset$gender
# Does a higher level of conversion increase WTP for quality?
cedset$qualconv <- cedset$lnqual*cedset$Farm_Conversion
cedset$qualconvn <- cedset$Environmental_Quality*cedset$Farm_Conversion
# Does higher income increase WTP for quality?
cedset$qualinc <-cedset$lnqual*cedset$income
cedset$qualincn <- cedset$Environmental_Quality*cedset$income
# Does higher income increase WTP/WTA for conversion?
cedset$convinc <-cedset$Farm_Conversion*cedset$income
# Does having been to Puget Sound increase WTP/WTA for quality and conversion?
cedset$qualrec <-cedset$lnqual*cedset$Recdummy
cedset$convrec <-cedset$Farm_Conversion*cedset$Recdummy
# Does having more education increase WTP for quality? Result: not statistically significant.
cedset$qualedu <-cedset$lnqual*cedset$edu
cedset$qualedun <- cedset$Environmental_Quality*cedset$edu
cedset$convedu <-cedset$Farm_Conversion*cedset$edu
# Does participating in downstream activities affect WTP for quality?
cedset$qualact <- cedset$lnqual*cedset$activitydummy
cedset$qualactn <- cedset$Environmental_Quality*cedset$activitydummy
# Does having taken more rec trips to Puget Sound increase WTP for quality?
cedset$qualtrips <- cedset$lnqual*cedset$Numrectrips
cedset$qualtripsn <- cedset$Environmental_Quality*cedset$Numrectrips
|
/CE/141110 CE Analysis Script Enelow.R
|
no_license
|
SarahUBC/wf_ce
|
R
| false
| false
| 11,982
|
r
|
# Info about all the experimental design packages in R can be found here:
# http://cran.r-project.org/web/views/ExperimentalDesign.html
install.packages("support.CEs")
install.packages("xtable")
install.packages("sm")
install.packages("lmtest")
install.packages("mlogit")
install.packages("ggplot2")
library(support.CEs)
library(survival)
library(stats)
library(xtable)
library(sm)
library(lmtest)
library(mlogit)
library(ggplot2)
# installs and loads support.CEs package. Dependencies are DoE.base, MASS, simex.
# Also, DoE.base depends on vcd. Make sure all these ducks are in a row.
des2v2 <- rotation.design(attribute.names = list(Environmental_Quality = c("2", "3", "4", "5"),
Farm_Conversion = c(".02", ".04", ".08",".16"),
Annual_10Yr_Tax = c("10", "20", "40","80")),
nalternatives = 2,
nblocks = 2,
row.renames = FALSE,
seed = 987)
# I recoded the Environmental_Quality variable to be numerical and continuous on a scale of 1 to 5.
# This allows the design matrix to be made with the common base. Using environmental quality as a categorical variable did not
# allow me to enter a common base option that was separate from the existing categories.
# ques2v2 <- questionnaire(des2v2, common=c(Environmental_Quality = "1", Farm_Conversion = "0", Annual_10Yr_Tax = "0"), quote=FALSE)
desmat <- make.design.matrix(choice.experiment.design=des2v2,
optout=FALSE,
continuous.attributes=c("Environmental_Quality", "Farm_Conversion","Annual_10Yr_Tax"),
common=c(Environmental_Quality="1", Farm_Conversion="0", Annual_10Yr_Tax="0"),
binary=FALSE)
cevars <- read.csv('\\\\daryl\\users\\nenelow\\My Documents\\Puget Sound Institute\\Survey Data\\141111CESurveyNumericResponses.csv');
cedset <- make.dataset(respondent.dataset=cevars, design.matrix=desmat,
choice.indicators=c("Q1","Q2","Q3","Q4","Q5","Q6","Q7","Q8"))
#Interaction Terms
# Taking the natural log of quality variable to incorporate diminishing returns to
# increased quality
cedset$lnqual <- log(cedset$Environmental_Quality)
# Conversion Level Dummies
rows <- nrow(cedset)
cedset$farmverylow <- 0
cedset$farmlow <- 0
cedset$farmlowmed <- 0
cedset$farmhimed <- 0
cedset$farmhi <- 0
cedset$enqverylow <-0
cedset$enqlow <- 0
cedset$enqlowmed <- 0
cedset$enqhimed <- 0
cedset$enqhi <- 0
for (i in 1:rows)
if (cedset$Farm_Conversion[i]== 0.00){
cedset$farmverylow[i] = 1 } else
if (cedset$Farm_Conversion[i] == 0.02) {
cedset$farmlow[i] = 1 } else
if (cedset$Farm_Conversion[i] == 0.04) {
cedset$farmlowmed[i] = 1 } else
if (cedset$Farm_Conversion[i] == 0.08) {
cedset$farmhimed[i] = 1 } else
if (cedset$Farm_Conversion[i] == 0.16) {
cedset$farmhi[i] = 1 }
for (i in 1:rows)
if (cedset$Environmental_Quality[i] == 1) {
cedset$enqverylow[i] = 1 } else
if (cedset$Environmental_Quality[i] == 2) {
cedset$enqlow[i] = 1 } else
if (cedset$Environmental_Quality[i] == 3) {
cedset$enqlowmed[i] = 1 } else
if (cedset$Environmental_Quality[i] == 4) {
cedset$enqhimed[i] = 1 } else
if (cedset$Environmental_Quality[i] == 5) {
cedset$enqhi[i] = 1 }
# ASC for Status Quo
cedset$ascsq <- 0
numsqs <-0
for (i in 1:rows)
if (cedset$ASC[i] == 0) {
cedset$ascsq[i] = 1
} else cedset$ascsq[i] = 0
# Status Quo Interactions
cedset$sqinc <- cedset$ascsq*cedset$inc
cedset$sqedu <- cedset$ascsq*cedset$edu
cedset$sqact <- cedset$ascsq*cedset$activitydummy
cedset$sqtrips <- cedset$ascsq*cedset$Numrectrips
# Specification 1: Continuous variables, with same interactions as Specification 1,
# except in levels of quality instead of logs.
rescont <- clogit(RES~Environmental_Quality+Farm_Conversion+Annual_10Yr_Tax+ascsq+
sqinc+sqedu+sqact+sqtrips+
strata(STR), data=cedset, weights=WT)
summary(rescont)
wtpcont <- mwtp(rescont, monetary.variables=c("Annual_10Yr_Tax"),
nonmonetary.variables=c("Environmental_Quality", "Farm_Conversion"), nreplications=1000)
wtpcont
gofcont <- gofm(rescont)
gofcont
# Specification 2: Natural log of quality, levels of conversion;
# Interaction between quality and conversion, quality and income, conversion and income,
# Quality and education, quality and activities chosen, quality and number of trips chosen
reslog <- clogit(RES~lnqual+Farm_Conversion+Annual_10Yr_Tax+ascsq+
sqinc+sqedu+sqact+sqtrips+strata(STR),
data=cedset, weights=WT)
summary(reslog)
wtplog <- mwtp(reslog, monetary.variables=c("Annual_10Yr_Tax"),
nonmonetary.variables=c("lnqual", "Farm_Conversion"), nreplications=1000)
wtplog
goflog <- gofm(reslog)
goflog
# Specification 3: Levels of each non-monetary variable separated into binary dummies,
# with the common base level as the omitted dummy. Interactions between high environmental quality
# and number of trips taken, income, and downstream activities.
# Also, some interactions included between specific levels of quality and conversion.
reslev <- clogit(RES~enqlow+enqlowmed+enqhimed+enqhi+
farmlowmed+farmhimed+farmhi+
sqinc+sqedu+sqact+sqtrips+
Annual_10Yr_Tax+
strata(STR), data=cedset, weights=WT)
summary(reslev)
wtplev <- mwtp(reslev, monetary.variables=c("Annual_10Yr_Tax"),
nonmonetary.variables=c("enqlowmed", "enqhimed", "enqhi",
"farmlow", "farmlowmed", "farmhimed", "farmhi"),
nreplications=1000)
wtplev
goflev <- gofm(reslev)
goflev
# Likelihood Ratio Test of Model Goodness of Fit
llr <- lrtest(rescont,reslog,reslev)
# Specification 4: use glm from stats package. Note: it has a very low goodness of fit, hence will not use.
wts <- as.numeric(cedset$WT)
lince <- glm(RES~lnqual+Farm_Conversion+Annual_10Yr_Tax+qualrec+convrec+qualinc+convinc, data=cedset, weights=wts)
wtplin <- mwtp(lince,monetary.variables=c("Annual_10Yr_Tax"), nonmonetary.variables=c("Environmental_Quality", "Farm_Conversion"))
# Now the graphics.
# Histograms for pennies
wat <- hist(cevars$pennieswater, breaks=20, col="blue", main="Relative Importance of Water Quality")
saf <- hist(cevars$penniessafety, breaks=20, col="pink", main="Relative Importance of Safety for Swimming and Fishing")
fis <- hist(cevars$penniesfish, breaks=20, col="lightblue", main="Relative Importance of Fish Habitat")
veg <- hist(cevars$penniesveg, breaks=20, col="green", main="Relative Importance of Vegetation")
bir <- hist(cevars$penniesbird, breaks=20, col="orange", main="Relative Importance of Bird Habitat")
# Kernel density plots for marginal WTPs for all specifications
margslog <- wtplog$mwtps
hlq <- hist(margslog[,1], breaks=100, col="lightblue", main="WTP for Additional Water Quality (Natural Logs)")
dlq <- density(margslog[,1])
plot(dlq, main="WTP for Additional Water Quality (in Natural Logs)")
dcq <- density(margslog[,2])
plot(dcq, main="WTP for Additional Farmland Conversion")
margscont <- wtpcont$mwtps
dq <- density(margscont[,1])
plot(dq, main="Marginal WTP for Additional Environmental Quality")
dc <- density(margscont[,2])
plot(dc, main="Marginal WTP for Additional Farmland Conversion")
margslev <- wtplev$mwtps
dqpoor <- density(margslev[,1])
plot(dqpoor, main="WTP for Poor Environmental Quality")
dqfair <- density(margslev[,2])
plot(dqfair, main="WTP for Fair Environmental Quality")
dqgood <- density(margslev[,3])
plot(dqgood, main="WTP for Good Environmental Quality")
dqvgood <- density(margslev[,4])
plot(dqvgood, main="WTP for Very Good Environmental Quality")
dcfour <- density(margslev[,5])
plot(dcfour, main="WTP for 4% Farmland Conversion")
dceight <- density(margslev[,6])
plot(dceight, main="WTP for 8% Farmland Conversion")
dcsixteen <- density(margslev[,7])
plot(dcsixteen, main="WTP for 16% Farmland Conversion")
# Comparison
mgslevfr <- as.data.frame(margslev)
attach(mgslevfr)
# Trying to get it into shape for an mlogit with random parameters
cedtoml <- mlogit.data(cedset, shape="long", choice="RES", alt.var="ALT", id.var="ID")
# Now for the confusing part.
ml <- mlogit(RES~lnqual+Farm_Conversion+Annual_10Yr_Tax, cedtoml, na.action=na.omit, alt.subset=c(1,2,3))
# Gives error message:
# Error in if (abs(x - oldx) < ftol) { :
# missing value where TRUE/FALSE needed
# Yet:
x <- is.na(cedtoml)
length(x[x==TRUE])
# Will show you that there are no missing values in the entire dataset!
# And, the following nonsensical formula works:
# ml <- mlogit(lnqual~Farm_Conversion+Annual_10Yr_Tax | 0, data=cedtoml)
# So, there is something wrong with the RES variable. But it is a logical variable with no missing values.
class(cedtoml$RES)
# [1] "logical"
str(cedtoml)
# Will give you the structure of the cedtoml object - what data type it is and all of its variables
# A funny work-around. Create a numeric out of the logical variable.
cedtoml$choice <- as.numeric(cedtoml$RES)
# Odds and Ends
# Interaction terms with specific levels - omitted from regressions.
cedset$hihi <- cedset$enqhi*cedset$farmhi
cedset$hihimed <- cedset$enqhi*cedset$farmhimed
cedset$lowlow <- cedset$enqlow*cedset$farmlow
cedset$lowlowmed <- cedset$enqlow*cedset$farmlowmed
cedset$hitrips <- (cedset$enqhi)*cedset$Numrectrips
cedset$hiinc <- (cedset$enqhi)*cedset$income
cedset$hiedu <- (cedset$enqhi)*cedset$edu
cedset$hiact <- (cedset$enqhi)*cedset$activitydummy
cedset$farmhitrips <- (cedset$farmhi)*cedset$Numrectrips
cedset$farmhiinc <- (cedset$farmhi)*cedset$income
cedset$farmhiedu <- (cedset$farmhi)*cedset$edu
cedset$farmhiact <- (cedset$farmhi)*cedset$activitydummy
cedset$himedtrips <- (cedset$enqhimed)*cedset$Numrectrips
cedset$himedinc <- (cedset$enqhimed)*cedset$income
cedset$himededu <- (cedset$enqhimed)*cedset$edu
cedset$himedact <- (cedset$enqhimed)*cedset$activitydummy
cedset$qualgen <- cedset$lnqual*cedset$gender
cedset$qualgenn <-cedset$Environmental_Quality*cedset$gender
cedset$convgen <-cedset$Farm_Conversion*cedset$gender
# Does a higher level of conversion increase WTP for quality?
cedset$qualconv <- cedset$lnqual*cedset$Farm_Conversion
cedset$qualconvn <- cedset$Environmental_Quality*cedset$Farm_Conversion
# Does higher income increase WTP for quality?
cedset$qualinc <-cedset$lnqual*cedset$income
cedset$qualincn <- cedset$Environmental_Quality*cedset$income
# Does higher income increase WTP/WTA for conversion?
cedset$convinc <-cedset$Farm_Conversion*cedset$income
# Does having been to Puget Sound increase WTP/WTA for quality and conversion?
cedset$qualrec <-cedset$lnqual*cedset$Recdummy
cedset$convrec <-cedset$Farm_Conversion*cedset$Recdummy
# Does having more education increase WTP for quality? Result: not statistically significant.
cedset$qualedu <-cedset$lnqual*cedset$edu
cedset$qualedun <- cedset$Environmental_Quality*cedset$edu
cedset$convedu <-cedset$Farm_Conversion*cedset$edu
# Does participating in downstream activities affect WTP for quality?
cedset$qualact <- cedset$lnqual*cedset$activitydummy
cedset$qualactn <- cedset$Environmental_Quality*cedset$activitydummy
# Does having taken more rec trips to Puget Sound increase WTP for quality?
cedset$qualtrips <- cedset$lnqual*cedset$Numrectrips
cedset$qualtripsn <- cedset$Environmental_Quality*cedset$Numrectrips
|
bargraph(~Response|Gender, type = "count", data = OneTrueLove)
|
/inst/snippets/Figure7.04.R
|
no_license
|
klaassenj/Lock5withR
|
R
| false
| false
| 64
|
r
|
bargraph(~Response|Gender, type = "count", data = OneTrueLove)
|
## Read the complete data set
print("Reading 'household_power_consumption.txt'")
Consumption <- read.table("household_power_consumption.txt",
sep=";",
header=TRUE,
na.strings = "?",
colClasses=c(rep("character",2),rep("numeric",2)))
## Transform dates and times
print("Transforming Dates and Times")
Consumption$Date <- as.Date(Consumption$Date, format="%d/%m/%Y")
Consumption$Time <- paste(Consumption$Date,Consumption$Time)
Consumption$Time <- strptime(Consumption$Time, format="%Y-%m-%d %H:%M:%S")
## Create a subset with only 2007-02-01 and 2007-02-02 data
print("Subsetting dates 2007-02-01 and 2007-02-02")
ConsumptionSUB <- subset(Consumption, Date=="2007-02-01" | Date=="2007-02-02")
## Plot the graph
print("Plotting and saving Histogram")
graphics.off()
png(filename = "plot1.png", width = 480, height = 480, units = "px")
hist(ConsumptionSUB$Global_active_power,
col="red",
main="Global Active Power",
xlab="Global Active Power (kilowatts)",
ylab="Frequency")
graphics.off()
|
/Scripts/plot1.R
|
no_license
|
DiegoNavarroNavas/ExData_Plotting1
|
R
| false
| false
| 1,112
|
r
|
## Read the complete data set
print("Reading 'household_power_consumption.txt'")
Consumption <- read.table("household_power_consumption.txt",
sep=";",
header=TRUE,
na.strings = "?",
colClasses=c(rep("character",2),rep("numeric",2)))
## Transform dates and times
print("Transforming Dates and Times")
Consumption$Date <- as.Date(Consumption$Date, format="%d/%m/%Y")
Consumption$Time <- paste(Consumption$Date,Consumption$Time)
Consumption$Time <- strptime(Consumption$Time, format="%Y-%m-%d %H:%M:%S")
## Create a subset with only 2007-02-01 and 2007-02-02 data
print("Subsetting dates 2007-02-01 and 2007-02-02")
ConsumptionSUB <- subset(Consumption, Date=="2007-02-01" | Date=="2007-02-02")
## Plot the graph
print("Plotting and saving Histogram")
graphics.off()
png(filename = "plot1.png", width = 480, height = 480, units = "px")
hist(ConsumptionSUB$Global_active_power,
col="red",
main="Global Active Power",
xlab="Global Active Power (kilowatts)",
ylab="Frequency")
graphics.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DNA Codons to AA.R
\name{dna_codons_to_aa}
\alias{dna_codons_to_aa}
\title{DNA to Amino Acid}
\usage{
dna_codons_to_aa(codons)
}
\value{
None
}
\description{
Based on the given DNA codon an Amino Acid is displayed
}
|
/man/dna_codons_to_aa.Rd
|
no_license
|
rforbiodatascience21/2021_group_3_rpackage
|
R
| false
| true
| 294
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DNA Codons to AA.R
\name{dna_codons_to_aa}
\alias{dna_codons_to_aa}
\title{DNA to Amino Acid}
\usage{
dna_codons_to_aa(codons)
}
\value{
None
}
\description{
Based on the given DNA codon an Amino Acid is displayed
}
|
#1.
adult_db <- read.table(file.choose(), header = FALSE, sep= ",",na.strings = "?" , colClasses = NA, strip.white = TRUE, stringsAsFactors = FALSE )
names(adult_db) = c("age",
"workclass",
"fnlwgt",
"education",
"education_num",
"marital_status",
"occupation",
"relationship",
"race",
"sex",
"capital_gain",
"capital_loss",
"hours_per_week",
"native_country",
"class")
View(adult_db)
adult_db$class[adult_db$class==">50K"] <- 1
adult_db$class[adult_db$class=="<=50K"] <- 0
#2.
apply(adult_db, 2, function(x){sum(is.na(x))})
row.na <- apply(adult_db, 1, function(x){any(is.na(x))})
sum(row.na) #number of row wants to remove
adult_db_nomiss <- adult_db[!row.na,]
#or adult_db_nomiss <- na.omit(adult_db)
#3.
set.seed(1013)
idx = sample(1:nrow(adult_db_nomiss),1000)
adult_db_lim = adult_db_nomiss[idx,]
row.names(adult_db_lim) <- NULL
View(adult_db_lim)
#3a.
No_adult_db<- (adult_db_lim$class== 0)
Yes_adult_db<- (adult_db_lim$class== 1)
hist(adult_db_lim$age[No_adult_db], breaks = 50, main = "Age of adults", xlab = "Age", ylab = "Frequency", col = "red")
hist(adult_db_lim$age[Yes_adult_db], breaks = 50, xlab = "age", ylab = "frequency", col = "blue", add=T)
legend(x = 50, y = 30, legend = c(">50K", "<=50K"),
col=c( "blue" , "red"), pch = 20, cex = 0.75)
#3b.
par(mar=c(5,5,2,2))
height_of_bar <- table(adult_db_lim$race)
barplot(height_of_bar, col=c("black", "red", "green", "blue" , "cyan"),
main = "Race of adults",
names.arg = c("Amer-Indian-Eskimo", "Asian-Pac-Islander", "Black", "other" , "White"),
cex.names = 0.8, xlab = "Race")
legend(x=0.5, y=580, legend = c("Amer-Indian-Eskimo", "Asian-Pac-Islander", "Black", "other" , "White"),col=c("black", "red", "green", "blue" , "cyan"), pch = 20, cex = 0.50)
#3c.
boxplot(adult_db_lim$age, pch=20, col="red", main = "Age of adults", ylab="Age")
boxplot.stats(adult_db_lim$age)$out
#4.
adult_db_numeric <- adult_db_lim[,c("age", "fnlwgt", "education_num", "capital_gain", "capital_loss", "hours_per_week")]
class_val <- as.numeric(adult_db_lim[,c("class")])
adult_db_num_std <- scale(adult_db_numeric)
# or
#m <- apply(adult_db_numeric,2,mean)
#std <- apply(adult_db_numeric,2,sd)
#for (j in 1:6) { # each specified col for (j in cols)
#adult_db_num_std[,j] <- sapply(adult_db_numeric[,j] , function(x) (x- m[j]) / std[j])
#}
apply(adult_db_num_std, 2, mean)
apply(adult_db_num_std, 2, sd)
#5.
#5.a
pr.out <- prcomp(adult_db_num_std[,c("age", "fnlwgt", "education_num", "capital_gain", "capital_loss", "hours_per_week")], retx=TRUE, center=TRUE, scale=TRUE)
names(pr.out)
head(pr.out$x)
principal_components <- pr.out$x
plot(principal_components[,1:2], col = (class_val + 2), pch = 20, main = "First two Principal Components")
legend(x = -8, y = 4, legend = c( "<=50K", ">50K"),
col=c("red", "green"), pch = 20, cex = 0.75)
#5.b
pr.var <- (pr.out$sdev)^2
pve <- pr.var/sum(pr.var)
par(mfrow=c(1,2), oma=c(0,0,2,0))
plot(pve, xlab = "Principal Components", ylab = "Variance", type = "b", ylim = c(0,1), col="red")
plot(cumsum(pve), xlab = "Principal Components", ylab = "Cumulative Variance", type = "b", ylim = c(0,1), col="red")
#5.c
#cumsum(pve)
#0.2289799 0.4037900 0.5654989 0.7243586 0.8731598 1.0000000
#Based on Q.5b for at least 50% of the total variance which is 0.5654989 we need three(3) principal components and for at least 90% of the total variance which is between 0.8731598 and 1.0000000 we need six(6) principal components.
|
/H1.R
|
no_license
|
sarbi127/R
|
R
| false
| false
| 3,781
|
r
|
#1.
adult_db <- read.table(file.choose(), header = FALSE, sep= ",",na.strings = "?" , colClasses = NA, strip.white = TRUE, stringsAsFactors = FALSE )
names(adult_db) = c("age",
"workclass",
"fnlwgt",
"education",
"education_num",
"marital_status",
"occupation",
"relationship",
"race",
"sex",
"capital_gain",
"capital_loss",
"hours_per_week",
"native_country",
"class")
View(adult_db)
adult_db$class[adult_db$class==">50K"] <- 1
adult_db$class[adult_db$class=="<=50K"] <- 0
#2.
apply(adult_db, 2, function(x){sum(is.na(x))})
row.na <- apply(adult_db, 1, function(x){any(is.na(x))})
sum(row.na) #number of row wants to remove
adult_db_nomiss <- adult_db[!row.na,]
#or adult_db_nomiss <- na.omit(adult_db)
#3.
set.seed(1013)
idx = sample(1:nrow(adult_db_nomiss),1000)
adult_db_lim = adult_db_nomiss[idx,]
row.names(adult_db_lim) <- NULL
View(adult_db_lim)
#3a.
No_adult_db<- (adult_db_lim$class== 0)
Yes_adult_db<- (adult_db_lim$class== 1)
hist(adult_db_lim$age[No_adult_db], breaks = 50, main = "Age of adults", xlab = "Age", ylab = "Frequency", col = "red")
hist(adult_db_lim$age[Yes_adult_db], breaks = 50, xlab = "age", ylab = "frequency", col = "blue", add=T)
legend(x = 50, y = 30, legend = c(">50K", "<=50K"),
col=c( "blue" , "red"), pch = 20, cex = 0.75)
#3b.
par(mar=c(5,5,2,2))
height_of_bar <- table(adult_db_lim$race)
barplot(height_of_bar, col=c("black", "red", "green", "blue" , "cyan"),
main = "Race of adults",
names.arg = c("Amer-Indian-Eskimo", "Asian-Pac-Islander", "Black", "other" , "White"),
cex.names = 0.8, xlab = "Race")
legend(x=0.5, y=580, legend = c("Amer-Indian-Eskimo", "Asian-Pac-Islander", "Black", "other" , "White"),col=c("black", "red", "green", "blue" , "cyan"), pch = 20, cex = 0.50)
#3c.
boxplot(adult_db_lim$age, pch=20, col="red", main = "Age of adults", ylab="Age")
boxplot.stats(adult_db_lim$age)$out
#4.
adult_db_numeric <- adult_db_lim[,c("age", "fnlwgt", "education_num", "capital_gain", "capital_loss", "hours_per_week")]
class_val <- as.numeric(adult_db_lim[,c("class")])
adult_db_num_std <- scale(adult_db_numeric)
# or
#m <- apply(adult_db_numeric,2,mean)
#std <- apply(adult_db_numeric,2,sd)
#for (j in 1:6) { # each specified col for (j in cols)
#adult_db_num_std[,j] <- sapply(adult_db_numeric[,j] , function(x) (x- m[j]) / std[j])
#}
apply(adult_db_num_std, 2, mean)
apply(adult_db_num_std, 2, sd)
#5.
#5.a
pr.out <- prcomp(adult_db_num_std[,c("age", "fnlwgt", "education_num", "capital_gain", "capital_loss", "hours_per_week")], retx=TRUE, center=TRUE, scale=TRUE)
names(pr.out)
head(pr.out$x)
principal_components <- pr.out$x
plot(principal_components[,1:2], col = (class_val + 2), pch = 20, main = "First two Principal Components")
legend(x = -8, y = 4, legend = c( "<=50K", ">50K"),
col=c("red", "green"), pch = 20, cex = 0.75)
#5.b
pr.var <- (pr.out$sdev)^2
pve <- pr.var/sum(pr.var)
par(mfrow=c(1,2), oma=c(0,0,2,0))
plot(pve, xlab = "Principal Components", ylab = "Variance", type = "b", ylim = c(0,1), col="red")
plot(cumsum(pve), xlab = "Principal Components", ylab = "Cumulative Variance", type = "b", ylim = c(0,1), col="red")
#5.c
#cumsum(pve)
#0.2289799 0.4037900 0.5654989 0.7243586 0.8731598 1.0000000
#Based on Q.5b for at least 50% of the total variance which is 0.5654989 we need three(3) principal components and for at least 90% of the total variance which is between 0.8731598 and 1.0000000 we need six(6) principal components.
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_minimalfeatures.R
\name{get_minimalfeatures}
\alias{get_minimalfeatures}
\title{Extract the coordinates of the apartments with characteristics asked from the client}
\usage{
get_minimalfeatures(min_price, max_price, min_surface, max_surface, N,
min_room, max_room, castorus_table)
}
\arguments{
\item{min_price}{minimum price of the apartment}
\item{max_price}{maximum price of the apartment}
\item{min_surface}{minimum surface of the apartment}
\item{max_surface}{maximum surface of the apartment}
\item{N}{arrondissement in Paris}
\item{min_room}{minimum number of rooms}
\item{max_room}{maximum number of rooms}
\item{castorus_table}{dataframe with the informations}
}
\value{
dataframe with two columns longitude and latitude
}
\description{
Extract the coordinates of the apartments with characteristics asked from the client
}
|
/man/get_minimalfeatures.Rd
|
no_license
|
paris-appartemnt-project/apartment_project
|
R
| false
| true
| 924
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_minimalfeatures.R
\name{get_minimalfeatures}
\alias{get_minimalfeatures}
\title{Extract the coordinates of the apartments with characteristics asked from the client}
\usage{
get_minimalfeatures(min_price, max_price, min_surface, max_surface, N,
min_room, max_room, castorus_table)
}
\arguments{
\item{min_price}{minimum price of the apartment}
\item{max_price}{maximum price of the apartment}
\item{min_surface}{minimum surface of the apartment}
\item{max_surface}{maximum surface of the apartment}
\item{N}{arrondissement in Paris}
\item{min_room}{minimum number of rooms}
\item{max_room}{maximum number of rooms}
\item{castorus_table}{dataframe with the informations}
}
\value{
dataframe with two columns longitude and latitude
}
\description{
Extract the coordinates of the apartments with characteristics asked from the client
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sf2_2010.R
\name{read_geo,SF2_2010,character-method}
\alias{read_geo,SF2_2010,character-method}
\title{read_geo}
\usage{
\S4method{read_geo}{SF2_2010,character}(sf, path)
}
\arguments{
\item{sf}{a summary file object.}
\item{path}{path to a geo file corresponding to \code{sf}.}
}
\description{
Read a geo file from the 2010 SF2 summary file into a \code{tibble}. See
documentation for generic.
}
|
/man/read_geo-SF2_2010-character-method.Rd
|
permissive
|
andrewraim/sfreader
|
R
| false
| true
| 476
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sf2_2010.R
\name{read_geo,SF2_2010,character-method}
\alias{read_geo,SF2_2010,character-method}
\title{read_geo}
\usage{
\S4method{read_geo}{SF2_2010,character}(sf, path)
}
\arguments{
\item{sf}{a summary file object.}
\item{path}{path to a geo file corresponding to \code{sf}.}
}
\description{
Read a geo file from the 2010 SF2 summary file into a \code{tibble}. See
documentation for generic.
}
|
#' Compute Spearman's rho
#'
#' Missing values are automatically removed.
#'
#' @param x,y Two vectors of the same length.
#' @param alternative Type of test, either "two.sided", "greater" for positive
#' correlations, or "less" for negative correlations.
#' @return \itemize{ \item \code{estimate} the correlation coefficient. \item
#' \code{p.value} the p-value of the test. \item \code{statistic} the t
#' statistic of the test. }
#' @export
spearman.test <- function(x, y, alternative="two.sided"){
if(!is.vector(x) || !is.vector(y)){
stop("spearman: x and y must be vectors.")
}
if(length(x)!=length(y)){
stop("spearman: the vectors do not have equal lengths.")
}
m <- cbind(x,y)
m <- m[complete.cases(m), ]
n <- nrow(m)
x <- rank(m[,1])
y <- rank(m[,2])
# corv <- sum(((x-mean(x))/sqrt(var(x)))*((y-mean(y))/sqrt(var(y)))) / (n-1)
corv <- cor(x, y)
test <- corv * sqrt((n - 2)/(1. - corv^2))
if(alternative == "two.sided"){
sig <- 2 * (1 - pt(abs(test), n - 2))
}
if(alternative == "greater"){
sig <- 1 - pt(test, n - 2)
}
if(alternative == "less"){
sig <- pt(test, n - 2)
}
list(estimate = corv, p.value = sig, statistic = test)
}
#' Compute Kendall's tau
#'
#' Missing values are automatically removed.
#' Also compute 1-alpha confidence interval using
#' the method recommended by Long and Cliff (1997).
#'
#' @param x,y Two vectors of the same length.
#' @param alternative Type of test, either "two.sided", "greater" for positive
#' correlations, or "less" for negative correlations.
#' @param alpha Used to compute 1-alpha confidence interval - default to 0.05.
#' @return \itemize{
#' \item \code{estimate} the correlation coefficient.
#' \item \code{ci} the confidence interval for tau.
#' \item \code{p.value} the p-value of the test.
#' \item \code{statistic} test statistic used to compute the p value.}
#' @export
kendall.test <- function(x, y, alternative="two.sided", alpha = 0.05){
if(!is.vector(x) || !is.vector(y)){
stop("kendall: x and y must be vectors.")
}
if(length(x)!=length(y)){
stop("kendall: the vectors do not have equal lengths.")
}
m <- cbind(x,y)
m <- m[complete.cases(m), ]
n <- nrow(m)
x <- m[,1]
y <- m[,2]
xdif <- outer(x,x,FUN="-")
ydif <- outer(y,y,FUN="-")
tv <- sign(xdif)*sign(ydif)
dbar <- apply(tv,1,mean)
corv <- sum(tv)/(n*(n-1))
A <- sum((dbar-corv)^2)/(n-1)
B <- (n*(n-1)*(-1)*corv^2+sum(tv^2))/(n^2-n-1)
C <- (4*(n-2)*A+2*B)/(n*(n-1))
crit <- qnorm(alpha/2)
cilow <- corv+crit*sqrt(C)
cihi <- corv-crit*sqrt(C)
test <- corv/sqrt((2*(2*n+5))/(9*n*(n-1)))
if(alternative == "two.sided"){
sig <- 2 * (1 - pnorm(abs(test)))
}
if(alternative == "greater"){
sig <- 1 - pnorm(test)
}
if(alternative == "less"){
sig <- pnorm(test)
}
list(estimate = corv, ci = c(cilow,cihi), p.value = sig, statistic = test)
}
#' Compute Pearson's rho
#'
#' Missing values are automatically removed.
#'
#' @param x,y Two vectors of the same length.
#' @return
#' \itemize{
#' \item \code{estimate} the correlation coefficient.
#' \item \code{p.value} the p-value of the test.
#' \item \code{statistic} the t statistic of the test.
#' }
#' @export
pearson.test <- function(x, y, alternative="two.sided"){
if(!is.vector(x) || !is.vector(y)){
stop("pearson: x and y must be vectors.")
}
if(length(x)!=length(y)){
stop("pearson: the vectors do not have equal lengths.")
}
m <- cbind(x,y)
m <- m[complete.cases(m), ]
n <- nrow(m)
x <- m[,1]
y <- m[,2]
# corv <- sum(((x-mean(x))/sqrt(var(x)))*((y-mean(y))/sqrt(var(y)))) / (n-1)
corv <- cor(x, y)
test <- corv * sqrt((n - 2)/(1 - corv^2))
if(alternative == "two.sided"){
sig <- 2 * (1 - pt(abs(test), n - 2))
}
if(alternative == "greater"){
sig <- 1 - pt(test, n - 2)
}
if(alternative == "less"){
sig <- pt(test, n - 2)
}
list(estimate = corv, p.value = sig, statistic = test)
}
#' Compute the percentage bend correlation between x and y.
#'
#' @param x,y Two vectors of the same length.
#' @param beta The bending constant (default 0.2).
#' @return
#' \itemize{
#' \item \code{estimate} the correlation coefficient.
#' \item \code{p.value} the p-value of the test.
#' \item \code{statistic} the t statistic of the test.
#' }
#' @export
pbcor.test <- function(x, y, beta=.2, alternative="two.sided"){
if(!is.vector(x) || !is.vector(y)){
stop("pbcor: x and y must be vectors.")
}
if(length(x)!=length(y)){
stop("pbcor: the vectors do not have equal lengths.")
}
m <- cbind(x,y)
m <- m[complete.cases(m), ]
n <- nrow(m)
x <- m[,1]
y <- m[,2]
temp <- sort(abs(x-median(x)))
omhatx <- temp[floor((1-beta)*n)]
temp <- sort(abs(y-median(y)))
omhaty <- temp[floor((1-beta)*n)]
a <- (x-pbos(x,beta))/omhatx
b <- (y-pbos(y,beta))/omhaty
a <- ifelse(a<=-1,-1,a)
a <- ifelse(a>=1,1,a)
b <- ifelse(b<=-1,-1,b)
b <- ifelse(b>=1,1,b)
corv <- sum(a*b)/sqrt(sum(a^2)*sum(b^2))
test <- corv*sqrt((n - 2)/(1 - corv^2))
if(alternative == "two.sided"){
sig <- 2 * (1 - pt(abs(test), n - 2))
}
if(alternative == "greater"){
sig <- 1 - pt(test, n - 2)
}
if(alternative == "less"){
sig <- pt(test, n - 2)
}
list(estimate=corv, statistic=test, p.value=sig)
}
#' Compute the Winsorized correlation between x and y.
#'
#' @param x,y Two vectors of the same length.
#' @param tr The amount of Winsorization (default 0.2).
#' @return
#' \itemize{
#' \item \code{estimate} the correlation coefficient.
#' \item \code{p.value} the p-value of the test.
#' \item \code{statistic} the t statistic of the test.
#' }
#' @export
wincor.test <- function(x, y, tr=0.2, alternative="two.sided"){
if(!is.vector(x) || !is.vector(y)){
stop("wincor: x and y must be vectors.")
}
if(length(x)!=length(y)){
stop("wincor: the vectors do not have equal lengths.")
}
m <- cbind(x,y)
m <- m[complete.cases(m), ]
n <- nrow(m)
x <- m[,1]
y <- m[,2]
g <- floor(tr*n)
xvec <- winval(x,tr)
yvec <- winval(y,tr)
corv <- cor(xvec,yvec)
test <- corv*sqrt((n-2)/(1.-corv^2))
if(alternative == "two.sided"){
sig <- 2 * (1 - pt(abs(test), n-2*g-2))
}
if(alternative == "greater"){
sig <- 1 - pt(test, n-2*g-2)
}
if(alternative == "less"){
sig <- pt(test, n-2*g-2)
}
list(estimate=corv, statistic=test, p.value=sig)
}
#' Winsorize the data in the vector x.
#'
#' Called by \code{wincor}.
#'
#' @param x A vector.
#' @param tr The amount of Winsorization, between 0 and 1 (default 0.2).
#'
#' @export
winval <- function(x, tr=0.2){
y <- sort(x)
n <- length(x)
ibot <- floor(tr*n)+1
itop <- length(x)-ibot+1
xbot <- y[ibot]
xtop <- y[itop]
winval <- ifelse(x<=xbot,xbot,x)
winval <- ifelse(winval>=xtop,xtop,winval)
winval
}
#' Compute the one-step percentage bend measure of location
pbos <- function(x, beta=.2){
n <- length(x)
temp <- sort(abs(x-median(x)))
omhatx <- temp[floor((1-beta)*n)]
psi <- (x-median(x))/omhatx
i1 <- length(psi[psi<(-1)])
i2 <- length(psi[psi>1])
sx <- ifelse(psi<(-1),0,x)
sx <- ifelse(psi>1,0,sx)
pbos <- (sum(sx) + omhatx*(i2-i1)) / (n-i1-i2)
pbos
}
|
/R/correlation.R
|
no_license
|
FigTop/bootcorci
|
R
| false
| false
| 7,197
|
r
|
#' Compute Spearman's rho
#'
#' Missing values are automatically removed.
#'
#' @param x,y Two vectors of the same length.
#' @param alternative Type of test, either "two.sided", "greater" for positive
#' correlations, or "less" for negative correlations.
#' @return \itemize{ \item \code{estimate} the correlation coefficient. \item
#' \code{p.value} the p-value of the test. \item \code{statistic} the t
#' statistic of the test. }
#' @export
spearman.test <- function(x, y, alternative="two.sided"){
if(!is.vector(x) || !is.vector(y)){
stop("spearman: x and y must be vectors.")
}
if(length(x)!=length(y)){
stop("spearman: the vectors do not have equal lengths.")
}
m <- cbind(x,y)
m <- m[complete.cases(m), ]
n <- nrow(m)
x <- rank(m[,1])
y <- rank(m[,2])
# corv <- sum(((x-mean(x))/sqrt(var(x)))*((y-mean(y))/sqrt(var(y)))) / (n-1)
corv <- cor(x, y)
test <- corv * sqrt((n - 2)/(1. - corv^2))
if(alternative == "two.sided"){
sig <- 2 * (1 - pt(abs(test), n - 2))
}
if(alternative == "greater"){
sig <- 1 - pt(test, n - 2)
}
if(alternative == "less"){
sig <- pt(test, n - 2)
}
list(estimate = corv, p.value = sig, statistic = test)
}
#' Compute Kendall's tau
#'
#' Missing values are automatically removed.
#' Also compute 1-alpha confidence interval using
#' the method recommended by Long and Cliff (1997).
#'
#' @param x,y Two vectors of the same length.
#' @param alternative Type of test, either "two.sided", "greater" for positive
#' correlations, or "less" for negative correlations.
#' @param alpha Used to compute 1-alpha confidence interval - default to 0.05.
#' @return \itemize{
#' \item \code{estimate} the correlation coefficient.
#' \item \code{ci} the confidence interval for tau.
#' \item \code{p.value} the p-value of the test.
#' \item \code{statistic} test statistic used to compute the p value.}
#' @export
kendall.test <- function(x, y, alternative="two.sided", alpha = 0.05){
if(!is.vector(x) || !is.vector(y)){
stop("kendall: x and y must be vectors.")
}
if(length(x)!=length(y)){
stop("kendall: the vectors do not have equal lengths.")
}
m <- cbind(x,y)
m <- m[complete.cases(m), ]
n <- nrow(m)
x <- m[,1]
y <- m[,2]
xdif <- outer(x,x,FUN="-")
ydif <- outer(y,y,FUN="-")
tv <- sign(xdif)*sign(ydif)
dbar <- apply(tv,1,mean)
corv <- sum(tv)/(n*(n-1))
A <- sum((dbar-corv)^2)/(n-1)
B <- (n*(n-1)*(-1)*corv^2+sum(tv^2))/(n^2-n-1)
C <- (4*(n-2)*A+2*B)/(n*(n-1))
crit <- qnorm(alpha/2)
cilow <- corv+crit*sqrt(C)
cihi <- corv-crit*sqrt(C)
test <- corv/sqrt((2*(2*n+5))/(9*n*(n-1)))
if(alternative == "two.sided"){
sig <- 2 * (1 - pnorm(abs(test)))
}
if(alternative == "greater"){
sig <- 1 - pnorm(test)
}
if(alternative == "less"){
sig <- pnorm(test)
}
list(estimate = corv, ci = c(cilow,cihi), p.value = sig, statistic = test)
}
#' Compute Pearson's rho
#'
#' Missing values are automatically removed.
#'
#' @param x,y Two vectors of the same length.
#' @return
#' \itemize{
#' \item \code{estimate} the correlation coefficient.
#' \item \code{p.value} the p-value of the test.
#' \item \code{statistic} the t statistic of the test.
#' }
#' @export
pearson.test <- function(x, y, alternative="two.sided"){
if(!is.vector(x) || !is.vector(y)){
stop("pearson: x and y must be vectors.")
}
if(length(x)!=length(y)){
stop("pearson: the vectors do not have equal lengths.")
}
m <- cbind(x,y)
m <- m[complete.cases(m), ]
n <- nrow(m)
x <- m[,1]
y <- m[,2]
# corv <- sum(((x-mean(x))/sqrt(var(x)))*((y-mean(y))/sqrt(var(y)))) / (n-1)
corv <- cor(x, y)
test <- corv * sqrt((n - 2)/(1 - corv^2))
if(alternative == "two.sided"){
sig <- 2 * (1 - pt(abs(test), n - 2))
}
if(alternative == "greater"){
sig <- 1 - pt(test, n - 2)
}
if(alternative == "less"){
sig <- pt(test, n - 2)
}
list(estimate = corv, p.value = sig, statistic = test)
}
#' Compute the percentage bend correlation between x and y.
#'
#' @param x,y Two vectors of the same length.
#' @param beta The bending constant (default 0.2).
#' @return
#' \itemize{
#' \item \code{estimate} the correlation coefficient.
#' \item \code{p.value} the p-value of the test.
#' \item \code{statistic} the t statistic of the test.
#' }
#' @export
pbcor.test <- function(x, y, beta=.2, alternative="two.sided"){
if(!is.vector(x) || !is.vector(y)){
stop("pbcor: x and y must be vectors.")
}
if(length(x)!=length(y)){
stop("pbcor: the vectors do not have equal lengths.")
}
m <- cbind(x,y)
m <- m[complete.cases(m), ]
n <- nrow(m)
x <- m[,1]
y <- m[,2]
temp <- sort(abs(x-median(x)))
omhatx <- temp[floor((1-beta)*n)]
temp <- sort(abs(y-median(y)))
omhaty <- temp[floor((1-beta)*n)]
a <- (x-pbos(x,beta))/omhatx
b <- (y-pbos(y,beta))/omhaty
a <- ifelse(a<=-1,-1,a)
a <- ifelse(a>=1,1,a)
b <- ifelse(b<=-1,-1,b)
b <- ifelse(b>=1,1,b)
corv <- sum(a*b)/sqrt(sum(a^2)*sum(b^2))
test <- corv*sqrt((n - 2)/(1 - corv^2))
if(alternative == "two.sided"){
sig <- 2 * (1 - pt(abs(test), n - 2))
}
if(alternative == "greater"){
sig <- 1 - pt(test, n - 2)
}
if(alternative == "less"){
sig <- pt(test, n - 2)
}
list(estimate=corv, statistic=test, p.value=sig)
}
#' Compute the Winsorized correlation between x and y.
#'
#' @param x,y Two vectors of the same length.
#' @param tr The amount of Winsorization (default 0.2).
#' @return
#' \itemize{
#' \item \code{estimate} the correlation coefficient.
#' \item \code{p.value} the p-value of the test.
#' \item \code{statistic} the t statistic of the test.
#' }
#' @export
wincor.test <- function(x, y, tr=0.2, alternative="two.sided"){
if(!is.vector(x) || !is.vector(y)){
stop("wincor: x and y must be vectors.")
}
if(length(x)!=length(y)){
stop("wincor: the vectors do not have equal lengths.")
}
m <- cbind(x,y)
m <- m[complete.cases(m), ]
n <- nrow(m)
x <- m[,1]
y <- m[,2]
g <- floor(tr*n)
xvec <- winval(x,tr)
yvec <- winval(y,tr)
corv <- cor(xvec,yvec)
test <- corv*sqrt((n-2)/(1.-corv^2))
if(alternative == "two.sided"){
sig <- 2 * (1 - pt(abs(test), n-2*g-2))
}
if(alternative == "greater"){
sig <- 1 - pt(test, n-2*g-2)
}
if(alternative == "less"){
sig <- pt(test, n-2*g-2)
}
list(estimate=corv, statistic=test, p.value=sig)
}
#' Winsorize the data in the vector x.
#'
#' Called by \code{wincor}.
#'
#' @param x A vector.
#' @param tr The amount of Winsorization, between 0 and 1 (default 0.2).
#'
#' @export
winval <- function(x, tr=0.2){
y <- sort(x)
n <- length(x)
ibot <- floor(tr*n)+1
itop <- length(x)-ibot+1
xbot <- y[ibot]
xtop <- y[itop]
winval <- ifelse(x<=xbot,xbot,x)
winval <- ifelse(winval>=xtop,xtop,winval)
winval
}
#' Compute the one-step percentage bend measure of location
pbos <- function(x, beta=.2){
n <- length(x)
temp <- sort(abs(x-median(x)))
omhatx <- temp[floor((1-beta)*n)]
psi <- (x-median(x))/omhatx
i1 <- length(psi[psi<(-1)])
i2 <- length(psi[psi>1])
sx <- ifelse(psi<(-1),0,x)
sx <- ifelse(psi>1,0,sx)
pbos <- (sum(sx) + omhatx*(i2-i1)) / (n-i1-i2)
pbos
}
|
# Author: 大队委
library('caret')
library('randomForest')
library('rpart')
#setwd("~/Documents/TrustScience")
match = read.csv('../data/match.csv',stringsAsFactors = F,encoding = 'UTF-8',header = F)
stat = read.csv('../data/stat.csv',stringsAsFactors = F,encoding = 'UTF-8',header = F)
colnames(stat) = c('shishen','win','lose','total','win_rate')
stat$match_rate = stat$total/nrow(match)*100
stat$win_rate = as.integer(gsub("%","",stat$win_rate))
stat = stat[,c('shishen','win_rate','match_rate')]
train_index = sample(nrow(match),0.7*nrow(match))
match_train = match[train_index,]
match_test = match[-train_index,]
result = data.frame()
for(i in 1:nrow(match_train)){
win_rate1 = stat$win_rate[stat$shishen == match$V1[i]]
win_rate2 = stat$win_rate[stat$shishen == match$V2[i]]
win_rate3 = stat$win_rate[stat$shishen == match$V3[i]]
win_rate4 = stat$win_rate[stat$shishen == match$V4[i]]
win_rate5 = stat$win_rate[stat$shishen == match$V5[i]]
match_rate1 = stat$match_rate[stat$shishen == match$V1[i]]
match_rate2 = stat$match_rate[stat$shishen == match$V2[i]]
match_rate3 = stat$match_rate[stat$shishen == match$V3[i]]
match_rate4 = stat$match_rate[stat$shishen == match$V4[i]]
match_rate5 = stat$match_rate[stat$shishen == match$V5[i]]
win_rate6 = stat$win_rate[stat$shishen == match$V6[i]]
win_rate7 = stat$win_rate[stat$shishen == match$V7[i]]
win_rate8 = stat$win_rate[stat$shishen == match$V8[i]]
win_rate9 = stat$win_rate[stat$shishen == match$V9[i]]
win_rate10 = stat$win_rate[stat$shishen == match$V10[i]]
match_rate6 = stat$match_rate[stat$shishen == match$V6[i]]
match_rate7 = stat$match_rate[stat$shishen == match$V7[i]]
match_rate8 = stat$match_rate[stat$shishen == match$V8[i]]
match_rate9 = stat$match_rate[stat$shishen == match$V9[i]]
match_rate10 = stat$match_rate[stat$shishen == match$V10[i]]
win_win_rate = mean(c(win_rate1,win_rate2,win_rate3,win_rate4,win_rate5))
win_match_rate = mean(c(match_rate1,match_rate2,match_rate3,match_rate4,match_rate5))
lose_win_rate = mean(c(win_rate6,win_rate7,win_rate8,win_rate9,win_rate10))
lose_match_rate = mean(c(match_rate6,match_rate7,match_rate8,match_rate9,match_rate10))
result = rbind(result,c(win_win_rate-lose_win_rate,win_match_rate-lose_match_rate))
}
colnames(result) = c('win_rate.diff','match_rate.diff')
half = floor(nrow(match_train)/2)
outcome = c(rep(1,half),rep(-1,nrow(match_train)-half))
result[(half+1):nrow(match_train),] = -result[(half+1):nrow(match_train),]
dat = cbind(result,outcome)
## linear regression
model.lm = lm(outcome~win_rate.diff+match_rate.diff, data = dat)
summary(model.lm)
stat$score_lm = coefficients(model.lm)[2] * stat$win_rate + coefficients(model.lm)[3] * stat$match_rate
## logistic regression
dat$outcome_glm = as.factor(dat$outcome)
model.glm = glm(outcome_glm~win_rate.diff+match_rate.diff, data = dat, family = 'binomial')
summary(model.glm)
stat$score_glm = coefficients(model.glm)[2] * stat$win_rate + coefficients(model.glm)[3] * stat$match_rate
for(i in 1:nrow(match)){
team1_score_lm = mean(sapply(match[i,1:5],function(x){stat$score_lm[stat$shishen == x]}))
team2_score_lm = mean(sapply(match[i,6:10],function(x){stat$score_lm[stat$shishen == x]}))
team1_score_glm = mean(sapply(match[i,1:5],function(x){stat$score_glm[stat$shishen == x]}))
team2_score_glm = mean(sapply(match[i,6:10],function(x){stat$score_glm[stat$shishen == x]}))
match$score_diff_lm[i] = team1_score_lm - team2_score_lm + coefficients(model.lm)[1]
match$score_diff_glm[i] = exp(team1_score_glm - team2_score_glm+coefficients(model.glm)[1])/(1+exp(team1_score_glm - team2_score_glm+coefficients(model.glm)[1]))
}
for(threshold in seq(0,0.6,0.1)){
train_match_num = sum(match$score_diff_lm[train_index] >= threshold | match$score_diff_lm[train_index] <= -threshold)
train_accuracy = sum(match$score_diff_lm[train_index] >= threshold)/train_match_num*100
test_match_num = sum(match$score_diff_lm[-train_index] >= threshold | match$score_diff_lm[-train_index] <= -threshold)
test_accuracy = sum(match$score_diff_lm[-train_index] >= threshold)/test_match_num*100
cat("threshold:",threshold," train_accuracy:",train_accuracy,' train_match_num:',train_match_num,'\n')
cat("threshold:",threshold," test_accuracy:",test_accuracy,' test_match_num:',test_match_num,'\n')
}
for(threshold in seq(0.4,0.6,0.05)){
train_accuracy = sum(match$score_diff_glm[train_index] >= threshold)/nrow(match_train)*100
test_accuracy = sum(match$score_diff_glm[-train_index] >= threshold)/nrow(match_test)*100
cat("threshold:",threshold," train_accuracy:",train_accuracy,' test_accuracy:',test_accuracy,'\n')
}
## Visualization - shishen match_rate & win_rate
plot(x = stat$win_rate, y = stat$match_rate,xlim = c(20,100),ylim = c(0,30),xlab = 'win_rate',ylab = 'match_rate',cex = 0.1)
text(stat$win_rate, stat$match_rate, labels = stat$shishen,family = "Heiti SC Light", cex = 0.6, pos = 3, col = 'blue')
grid(NULL,NULL)
## Visualization - logistic regression
|
/utils/linreg.R
|
no_license
|
lozy219/TrustScience
|
R
| false
| false
| 5,054
|
r
|
# Author: 大队委
library('caret')
library('randomForest')
library('rpart')
#setwd("~/Documents/TrustScience")
match = read.csv('../data/match.csv',stringsAsFactors = F,encoding = 'UTF-8',header = F)
stat = read.csv('../data/stat.csv',stringsAsFactors = F,encoding = 'UTF-8',header = F)
colnames(stat) = c('shishen','win','lose','total','win_rate')
stat$match_rate = stat$total/nrow(match)*100
stat$win_rate = as.integer(gsub("%","",stat$win_rate))
stat = stat[,c('shishen','win_rate','match_rate')]
train_index = sample(nrow(match),0.7*nrow(match))
match_train = match[train_index,]
match_test = match[-train_index,]
result = data.frame()
for(i in 1:nrow(match_train)){
win_rate1 = stat$win_rate[stat$shishen == match$V1[i]]
win_rate2 = stat$win_rate[stat$shishen == match$V2[i]]
win_rate3 = stat$win_rate[stat$shishen == match$V3[i]]
win_rate4 = stat$win_rate[stat$shishen == match$V4[i]]
win_rate5 = stat$win_rate[stat$shishen == match$V5[i]]
match_rate1 = stat$match_rate[stat$shishen == match$V1[i]]
match_rate2 = stat$match_rate[stat$shishen == match$V2[i]]
match_rate3 = stat$match_rate[stat$shishen == match$V3[i]]
match_rate4 = stat$match_rate[stat$shishen == match$V4[i]]
match_rate5 = stat$match_rate[stat$shishen == match$V5[i]]
win_rate6 = stat$win_rate[stat$shishen == match$V6[i]]
win_rate7 = stat$win_rate[stat$shishen == match$V7[i]]
win_rate8 = stat$win_rate[stat$shishen == match$V8[i]]
win_rate9 = stat$win_rate[stat$shishen == match$V9[i]]
win_rate10 = stat$win_rate[stat$shishen == match$V10[i]]
match_rate6 = stat$match_rate[stat$shishen == match$V6[i]]
match_rate7 = stat$match_rate[stat$shishen == match$V7[i]]
match_rate8 = stat$match_rate[stat$shishen == match$V8[i]]
match_rate9 = stat$match_rate[stat$shishen == match$V9[i]]
match_rate10 = stat$match_rate[stat$shishen == match$V10[i]]
win_win_rate = mean(c(win_rate1,win_rate2,win_rate3,win_rate4,win_rate5))
win_match_rate = mean(c(match_rate1,match_rate2,match_rate3,match_rate4,match_rate5))
lose_win_rate = mean(c(win_rate6,win_rate7,win_rate8,win_rate9,win_rate10))
lose_match_rate = mean(c(match_rate6,match_rate7,match_rate8,match_rate9,match_rate10))
result = rbind(result,c(win_win_rate-lose_win_rate,win_match_rate-lose_match_rate))
}
colnames(result) = c('win_rate.diff','match_rate.diff')
half = floor(nrow(match_train)/2)
outcome = c(rep(1,half),rep(-1,nrow(match_train)-half))
result[(half+1):nrow(match_train),] = -result[(half+1):nrow(match_train),]
dat = cbind(result,outcome)
## linear regression
model.lm = lm(outcome~win_rate.diff+match_rate.diff, data = dat)
summary(model.lm)
stat$score_lm = coefficients(model.lm)[2] * stat$win_rate + coefficients(model.lm)[3] * stat$match_rate
## logistic regression
dat$outcome_glm = as.factor(dat$outcome)
model.glm = glm(outcome_glm~win_rate.diff+match_rate.diff, data = dat, family = 'binomial')
summary(model.glm)
stat$score_glm = coefficients(model.glm)[2] * stat$win_rate + coefficients(model.glm)[3] * stat$match_rate
for(i in 1:nrow(match)){
team1_score_lm = mean(sapply(match[i,1:5],function(x){stat$score_lm[stat$shishen == x]}))
team2_score_lm = mean(sapply(match[i,6:10],function(x){stat$score_lm[stat$shishen == x]}))
team1_score_glm = mean(sapply(match[i,1:5],function(x){stat$score_glm[stat$shishen == x]}))
team2_score_glm = mean(sapply(match[i,6:10],function(x){stat$score_glm[stat$shishen == x]}))
match$score_diff_lm[i] = team1_score_lm - team2_score_lm + coefficients(model.lm)[1]
match$score_diff_glm[i] = exp(team1_score_glm - team2_score_glm+coefficients(model.glm)[1])/(1+exp(team1_score_glm - team2_score_glm+coefficients(model.glm)[1]))
}
for(threshold in seq(0,0.6,0.1)){
train_match_num = sum(match$score_diff_lm[train_index] >= threshold | match$score_diff_lm[train_index] <= -threshold)
train_accuracy = sum(match$score_diff_lm[train_index] >= threshold)/train_match_num*100
test_match_num = sum(match$score_diff_lm[-train_index] >= threshold | match$score_diff_lm[-train_index] <= -threshold)
test_accuracy = sum(match$score_diff_lm[-train_index] >= threshold)/test_match_num*100
cat("threshold:",threshold," train_accuracy:",train_accuracy,' train_match_num:',train_match_num,'\n')
cat("threshold:",threshold," test_accuracy:",test_accuracy,' test_match_num:',test_match_num,'\n')
}
for(threshold in seq(0.4,0.6,0.05)){
train_accuracy = sum(match$score_diff_glm[train_index] >= threshold)/nrow(match_train)*100
test_accuracy = sum(match$score_diff_glm[-train_index] >= threshold)/nrow(match_test)*100
cat("threshold:",threshold," train_accuracy:",train_accuracy,' test_accuracy:',test_accuracy,'\n')
}
## Visualization - shishen match_rate & win_rate
plot(x = stat$win_rate, y = stat$match_rate,xlim = c(20,100),ylim = c(0,30),xlab = 'win_rate',ylab = 'match_rate',cex = 0.1)
text(stat$win_rate, stat$match_rate, labels = stat$shishen,family = "Heiti SC Light", cex = 0.6, pos = 3, col = 'blue')
grid(NULL,NULL)
## Visualization - logistic regression
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rTensor_Misc.R
\name{ttl}
\alias{ttl}
\title{Tensor Times List}
\usage{
ttl(tnsr, list_mat, ms = NULL)
}
\arguments{
\item{tnsr}{Tensor object with K modes}
\item{list_mat}{a list of matrices}
\item{ms}{a vector of modes to contract on (order should match the order of \code{list_mat})}
}
\value{
Tensor object with K modes
}
\description{
Contracted (m-Mode) product between a Tensor of arbitrary number of modes and a list of matrices. The result is folded back into Tensor.
}
\details{
Performs \code{ttm} repeated for a single Tensor and a list of matrices on multiple modes. For instance, suppose we want to do multiply a Tensor object \code{tnsr} with three matrices \code{mat1}, \code{mat2}, \code{mat3} on modes 1, 2, and 3. We could do \code{ttm(ttm(ttm(tnsr,mat1,1),mat2,2),3)}, or we could do \code{ttl(tnsr,list(mat1,mat2,mat3),c(1,2,3))}. The order of the matrices in the list should obviously match the order of the modes. This is a common operation for various Tensor decompositions such as CP and Tucker. For the math on the m-Mode Product, see Kolda and Bader (2009).
}
\note{
The returned Tensor does not drop any modes equal to 1.
}
\examples{
tnsr <- new("Tensor",3L,c(3L,4L,5L),data=runif(60))
lizt <- list('mat1' = matrix(runif(30),ncol=3),
'mat2' = matrix(runif(40),ncol=4),
'mat3' = matrix(runif(50),ncol=5))
ttl(tnsr,lizt,ms=c(1,2,3))
}
\references{
T. Kolda, B. Bader, "Tensor decomposition and applications". SIAM Applied Mathematics and Applications 2009.
}
\seealso{
\code{\link{ttm}}
}
|
/man/ttl.Rd
|
no_license
|
cran/rTensor
|
R
| false
| true
| 1,597
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rTensor_Misc.R
\name{ttl}
\alias{ttl}
\title{Tensor Times List}
\usage{
ttl(tnsr, list_mat, ms = NULL)
}
\arguments{
\item{tnsr}{Tensor object with K modes}
\item{list_mat}{a list of matrices}
\item{ms}{a vector of modes to contract on (order should match the order of \code{list_mat})}
}
\value{
Tensor object with K modes
}
\description{
Contracted (m-Mode) product between a Tensor of arbitrary number of modes and a list of matrices. The result is folded back into Tensor.
}
\details{
Performs \code{ttm} repeated for a single Tensor and a list of matrices on multiple modes. For instance, suppose we want to do multiply a Tensor object \code{tnsr} with three matrices \code{mat1}, \code{mat2}, \code{mat3} on modes 1, 2, and 3. We could do \code{ttm(ttm(ttm(tnsr,mat1,1),mat2,2),3)}, or we could do \code{ttl(tnsr,list(mat1,mat2,mat3),c(1,2,3))}. The order of the matrices in the list should obviously match the order of the modes. This is a common operation for various Tensor decompositions such as CP and Tucker. For the math on the m-Mode Product, see Kolda and Bader (2009).
}
\note{
The returned Tensor does not drop any modes equal to 1.
}
\examples{
tnsr <- new("Tensor",3L,c(3L,4L,5L),data=runif(60))
lizt <- list('mat1' = matrix(runif(30),ncol=3),
'mat2' = matrix(runif(40),ncol=4),
'mat3' = matrix(runif(50),ncol=5))
ttl(tnsr,lizt,ms=c(1,2,3))
}
\references{
T. Kolda, B. Bader, "Tensor decomposition and applications". SIAM Applied Mathematics and Applications 2009.
}
\seealso{
\code{\link{ttm}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/guardduty_operations.R
\name{guardduty_create_sample_findings}
\alias{guardduty_create_sample_findings}
\title{Generates example findings of types specified by the list of finding
types}
\usage{
guardduty_create_sample_findings(DetectorId, FindingTypes)
}
\arguments{
\item{DetectorId}{[required] The ID of the detector to create sample findings for.}
\item{FindingTypes}{The types of sample findings to generate.}
}
\description{
Generates example findings of types specified by the list of finding
types. If 'NULL' is specified for \code{findingTypes}, the API generates
example findings of all supported finding types.
}
\section{Request syntax}{
\preformatted{svc$create_sample_findings(
DetectorId = "string",
FindingTypes = list(
"string"
)
)
}
}
\keyword{internal}
|
/paws/man/guardduty_create_sample_findings.Rd
|
permissive
|
sanchezvivi/paws
|
R
| false
| true
| 862
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/guardduty_operations.R
\name{guardduty_create_sample_findings}
\alias{guardduty_create_sample_findings}
\title{Generates example findings of types specified by the list of finding
types}
\usage{
guardduty_create_sample_findings(DetectorId, FindingTypes)
}
\arguments{
\item{DetectorId}{[required] The ID of the detector to create sample findings for.}
\item{FindingTypes}{The types of sample findings to generate.}
}
\description{
Generates example findings of types specified by the list of finding
types. If 'NULL' is specified for \code{findingTypes}, the API generates
example findings of all supported finding types.
}
\section{Request syntax}{
\preformatted{svc$create_sample_findings(
DetectorId = "string",
FindingTypes = list(
"string"
)
)
}
}
\keyword{internal}
|
\alias{gtkRecentManagerSetScreen}
\name{gtkRecentManagerSetScreen}
\title{gtkRecentManagerSetScreen}
\description{Sets the screen for a recent manager; the screen is used to
track the user's currently configured recently used documents
storage.}
\usage{gtkRecentManagerSetScreen(object, screen)}
\arguments{
\item{\code{object}}{[\code{\link{GtkRecentManager}}] a \code{\link{GtkRecentManager}}}
\item{\code{screen}}{[\code{\link{GdkScreen}}] a \code{\link{GdkScreen}}}
}
\details{ Since 2.10}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
/man/gtkRecentManagerSetScreen.Rd
|
no_license
|
cran/RGtk2.10
|
R
| false
| false
| 569
|
rd
|
\alias{gtkRecentManagerSetScreen}
\name{gtkRecentManagerSetScreen}
\title{gtkRecentManagerSetScreen}
\description{Sets the screen for a recent manager; the screen is used to
track the user's currently configured recently used documents
storage.}
\usage{gtkRecentManagerSetScreen(object, screen)}
\arguments{
\item{\code{object}}{[\code{\link{GtkRecentManager}}] a \code{\link{GtkRecentManager}}}
\item{\code{screen}}{[\code{\link{GdkScreen}}] a \code{\link{GdkScreen}}}
}
\details{ Since 2.10}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
\name{StatComp20015-package}
\alias{StatComp20015-package}
\alias{StatComp20015}
\docType{package}
\title{
Final project of Statistical Computation
}
\description{
The R package contains several functions related to the SI model,SIS model, and SIR model.It also contains all of homeworks.
}
\author{
XuZiling, email optional.
Maintainer: XuZiling <xuziling@mail.ustc.edu.cn>
}
\references{
This optional section can contain literature or other references for
background information.
}
\keyword{ package }
\seealso{
Optional links to other man pages
}
|
/man/StatComp20015-package.Rd
|
no_license
|
XuZiling98/StatComp20015
|
R
| false
| false
| 585
|
rd
|
\name{StatComp20015-package}
\alias{StatComp20015-package}
\alias{StatComp20015}
\docType{package}
\title{
Final project of Statistical Computation
}
\description{
The R package contains several functions related to the SI model,SIS model, and SIR model.It also contains all of homeworks.
}
\author{
XuZiling, email optional.
Maintainer: XuZiling <xuziling@mail.ustc.edu.cn>
}
\references{
This optional section can contain literature or other references for
background information.
}
\keyword{ package }
\seealso{
Optional links to other man pages
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/event_vector.R
\name{convolve.event_term}
\alias{convolve.event_term}
\title{Convolve an event-related design matrix with an HRF.}
\usage{
\method{convolve}{event_term}(
x,
hrf,
sampling_frame,
drop.empty = TRUE,
summate = TRUE,
precision = 0.3,
...
)
}
\arguments{
\item{x}{A data frame containing the input design matrix.}
\item{hrf}{A Hemodynamic Response Function to convolve the design matrix with.}
\item{sampling_frame}{A data frame specifying the sampling frame for the analysis.}
\item{drop.empty}{Logical. If TRUE, empty rows in the design matrix will be removed.}
\item{summate}{Logical. If TRUE, the convolved design matrix will be summed.}
\item{precision}{Numeric. The desired precision for the calculations.}
\item{...}{Additional arguments to be passed to the function.}
}
\value{
A convolved design matrix, in tibble format.
}
\description{
This function takes an event-related design matrix and convolves it with
a specified Hemodynamic Response Function (HRF) to create a new design matrix
suitable for fMRI analysis. It also supports additional arguments for
flexibility and customization.
}
|
/man/convolve.event_term.Rd
|
no_license
|
bbuchsbaum/fmrireg
|
R
| false
| true
| 1,210
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/event_vector.R
\name{convolve.event_term}
\alias{convolve.event_term}
\title{Convolve an event-related design matrix with an HRF.}
\usage{
\method{convolve}{event_term}(
x,
hrf,
sampling_frame,
drop.empty = TRUE,
summate = TRUE,
precision = 0.3,
...
)
}
\arguments{
\item{x}{A data frame containing the input design matrix.}
\item{hrf}{A Hemodynamic Response Function to convolve the design matrix with.}
\item{sampling_frame}{A data frame specifying the sampling frame for the analysis.}
\item{drop.empty}{Logical. If TRUE, empty rows in the design matrix will be removed.}
\item{summate}{Logical. If TRUE, the convolved design matrix will be summed.}
\item{precision}{Numeric. The desired precision for the calculations.}
\item{...}{Additional arguments to be passed to the function.}
}
\value{
A convolved design matrix, in tibble format.
}
\description{
This function takes an event-related design matrix and convolves it with
a specified Hemodynamic Response Function (HRF) to create a new design matrix
suitable for fMRI analysis. It also supports additional arguments for
flexibility and customization.
}
|
## This was supplemental file 1 from the Q-Gen publication:
## http://bmcbioinformatics.biomedcentral.com/articles/10.1186/s12859-015-0707-9
## http://static-content.springer.com/esm/art%3A10.1186%2Fs12859-015-0707-9/MediaObjects/12859_2015_707_MOESM1_ESM.txt
library(qusage)
#Q-Gen function
qusage_gen<-function(resids,labels,estimates,dof,std.errors,geneSets,var.equal=TRUE){
if(var.equal){labels<-rep("Resids",ncol(resids))}
if(nrow(resids)!=length(estimates)){return("Error: Number of rows in residual matrix do not equal length of estimate vectors")}
if(nrow(resids)!=length(dof)){return("Error: Number of rows in residual matrix do not equal length of dof vectors")}
if(nrow(resids)!=length(std.errors)){return("Error: Number of rows in residual matrix do not equal length of std,errors vectors")}
names(estimates)<-rownames(resids)
names(dof)<-rownames(resids)
names(std.errors)<-rownames(resids)
qlist<-list(mean=estimates,SD=std.errors,dof=dof,labels=labels)
results<-newQSarray(qlist)
cat("Aggregating gene data for gene sets.")
results<-aggregateGeneSet(results, geneSets,n.points=2^14)
cat("Done. \nCalculating VIF's on residual matrix.")
results<-calcVIF(resids,results,useCAMERA=FALSE)
cat("\nQ-Gen analysis complete.")
results
}
######################
#Modification of Original Qusage Example to illustrate Q-Gen
######################
##create example data - a set of 500 genes normally distributed across 20 patients
eset = matrix(rnorm(500*20),500,20, dimnames=list(1:500,1:20))
labels = c(rep("A",10),rep("B",10))
##create a number of gene sets with varying levels of differential expression.
geneSets = list()
for(i in 0:10){
genes = ((30*i)+1):(30*(i+1))
eset[genes,labels=="B"] = eset[genes,labels=="B"] + rnorm(1)
geneSets[[paste("Set",i)]] = genes
}
#Fitting a simple linear model for B vs A to get residual matrix
#Note these can come from conducting the models manually in R or they can simply come
#from result files from analyses conducted in other statistical software or packages
resids<- t(apply(eset,1,function(x){lm(x~labels)$residuals }))
rownames(resids)<-1:500
colnames(resids)<-labels
#Calculating estimates,standarderrors,and dof
difference<-apply(eset,1,function(x){result=t.test(x[11:20],x[1:10],var.equal=T)$estimate
return(result[1]-result[2])})
deg.of.freedom<-apply(eset,1,function(x){t.test(x[11:20],x[1:10],var.equal=T)$parameter})
standarderror<-difference/apply(eset,1,function(x){t.test(x[11:20],x[1:10],var.equal=T)$statistic})
##Generalized Qusage Analysis derived from any linear mixed model
##Results object is the standard qusage object and all qusage functions can be applied directly
results = qusage_gen(resids,labels,difference,deg.of.freedom,standarderror,geneSets,T)
plot(results)
|
/etc/Q-Gen/q-gen.R
|
permissive
|
lianos/multiGSEA
|
R
| false
| false
| 2,833
|
r
|
## This was supplemental file 1 from the Q-Gen publication:
## http://bmcbioinformatics.biomedcentral.com/articles/10.1186/s12859-015-0707-9
## http://static-content.springer.com/esm/art%3A10.1186%2Fs12859-015-0707-9/MediaObjects/12859_2015_707_MOESM1_ESM.txt
library(qusage)
#Q-Gen function
qusage_gen<-function(resids,labels,estimates,dof,std.errors,geneSets,var.equal=TRUE){
if(var.equal){labels<-rep("Resids",ncol(resids))}
if(nrow(resids)!=length(estimates)){return("Error: Number of rows in residual matrix do not equal length of estimate vectors")}
if(nrow(resids)!=length(dof)){return("Error: Number of rows in residual matrix do not equal length of dof vectors")}
if(nrow(resids)!=length(std.errors)){return("Error: Number of rows in residual matrix do not equal length of std,errors vectors")}
names(estimates)<-rownames(resids)
names(dof)<-rownames(resids)
names(std.errors)<-rownames(resids)
qlist<-list(mean=estimates,SD=std.errors,dof=dof,labels=labels)
results<-newQSarray(qlist)
cat("Aggregating gene data for gene sets.")
results<-aggregateGeneSet(results, geneSets,n.points=2^14)
cat("Done. \nCalculating VIF's on residual matrix.")
results<-calcVIF(resids,results,useCAMERA=FALSE)
cat("\nQ-Gen analysis complete.")
results
}
######################
#Modification of Original Qusage Example to illustrate Q-Gen
######################
##create example data - a set of 500 genes normally distributed across 20 patients
eset = matrix(rnorm(500*20),500,20, dimnames=list(1:500,1:20))
labels = c(rep("A",10),rep("B",10))
##create a number of gene sets with varying levels of differential expression.
geneSets = list()
for(i in 0:10){
genes = ((30*i)+1):(30*(i+1))
eset[genes,labels=="B"] = eset[genes,labels=="B"] + rnorm(1)
geneSets[[paste("Set",i)]] = genes
}
#Fitting a simple linear model for B vs A to get residual matrix
#Note these can come from conducting the models manually in R or they can simply come
#from result files from analyses conducted in other statistical software or packages
resids<- t(apply(eset,1,function(x){lm(x~labels)$residuals }))
rownames(resids)<-1:500
colnames(resids)<-labels
#Calculating estimates,standarderrors,and dof
difference<-apply(eset,1,function(x){result=t.test(x[11:20],x[1:10],var.equal=T)$estimate
return(result[1]-result[2])})
deg.of.freedom<-apply(eset,1,function(x){t.test(x[11:20],x[1:10],var.equal=T)$parameter})
standarderror<-difference/apply(eset,1,function(x){t.test(x[11:20],x[1:10],var.equal=T)$statistic})
##Generalized Qusage Analysis derived from any linear mixed model
##Results object is the standard qusage object and all qusage functions can be applied directly
results = qusage_gen(resids,labels,difference,deg.of.freedom,standarderror,geneSets,T)
plot(results)
|
final_data<-function(data_dir,dat,filter_distance=1,col_remove=c("")){
weather=read.csv(file.path(data_dir, "weather.csv"))
if(!is.null(dat$Date)){
dat$Date=as.Date(dat$Date)
}
weather=weather[weather$Station==1,]
weather$Date=as.Date(weather$Date,format="%m/%d/%Y")
#convert to numeric, write python script for cleanup of test dataset
library(sqldf)
# Haversine formula (hf)
hf <- function(long1, lat1, long2, lat2) {
long1=long1*pi/180
lat1=lat1*pi/180
long2=long2*pi/180
lat2=lat2*pi/180
R <- 6371 # Earth mean radius [km]
delta.long <- (long2 - long1)
delta.lat <- (lat2 - lat1)
a <- sin(delta.lat/2)^2 + cos(lat1) * cos(lat2) * sin(delta.long/2)^2
c <- 2 * asin(sqrt(a))
d = R * c
return(d) # Distance in km
}
#variable removals
dat$AddressAccuracy=NULL
#dat$Block=NULL
#dat$Longitude=NULL
#dat$Latitude=NULL
dat$Trap=NULL
weather=weather[weather$Station==1,]
weather$Station=NULL
weather$Depth=NULL
weather$Water1=NULL
weather$SnowFall=NULL
#weather$CodeSum=NULL
dat_final=sqldf("SELECT dat.*,weather.* FROM dat LEFT OUTER JOIN weather ON weather.Date==dat.Date")
dat_final$Date=NULL
return(dat_final)
}
|
/nile/final_data_no_spray_long_and_lat.R
|
no_license
|
stylianos-kampakis/kaggle_competitions
|
R
| false
| false
| 1,264
|
r
|
final_data<-function(data_dir,dat,filter_distance=1,col_remove=c("")){
weather=read.csv(file.path(data_dir, "weather.csv"))
if(!is.null(dat$Date)){
dat$Date=as.Date(dat$Date)
}
weather=weather[weather$Station==1,]
weather$Date=as.Date(weather$Date,format="%m/%d/%Y")
#convert to numeric, write python script for cleanup of test dataset
library(sqldf)
# Haversine formula (hf)
hf <- function(long1, lat1, long2, lat2) {
long1=long1*pi/180
lat1=lat1*pi/180
long2=long2*pi/180
lat2=lat2*pi/180
R <- 6371 # Earth mean radius [km]
delta.long <- (long2 - long1)
delta.lat <- (lat2 - lat1)
a <- sin(delta.lat/2)^2 + cos(lat1) * cos(lat2) * sin(delta.long/2)^2
c <- 2 * asin(sqrt(a))
d = R * c
return(d) # Distance in km
}
#variable removals
dat$AddressAccuracy=NULL
#dat$Block=NULL
#dat$Longitude=NULL
#dat$Latitude=NULL
dat$Trap=NULL
weather=weather[weather$Station==1,]
weather$Station=NULL
weather$Depth=NULL
weather$Water1=NULL
weather$SnowFall=NULL
#weather$CodeSum=NULL
dat_final=sqldf("SELECT dat.*,weather.* FROM dat LEFT OUTER JOIN weather ON weather.Date==dat.Date")
dat_final$Date=NULL
return(dat_final)
}
|
#' @title Binary Indicator for Multi-State RDT with Multiple Periods
#'
#' @description Define the binary indicator function to check whether the failure probability satisfies the lower level reliability requirements for each cumulative period (for Multi-state RDT, Multiple Periods)
#'
#' @param pivec Failure probability for each separate period.
#' @param Rvec Lower level reliability requirements for each cumulative period from the begining of the test.
#' @return 0 -- No; 1 -- Yes.
#' @examples
#' MP_Indicator(pivec = c(0.1, 0.2), Rvec = c(0.8, 0.6))
#' MP_Indicator(pivec = c(0.1, 0.2, 0.1), Rvec = c(0.8, 0.6, 0.4))
#' MP_Indicator(pivec = c(0.1, 0.3), Rvec = c(0.8, 0.7))
#' @export
######define the indicator function
MP_Indicator <- function(pivec, Rvec)
{
condition <- sum(cumsum(pivec[(1:length(Rvec))]) <= 1 - Rvec) == length(Rvec)
if(condition)
return(1)
else
return(0)
}
|
/OptimalRDTinR/R/MP_Indicator.R
|
permissive
|
ericchen12377/OptimalRDT_R
|
R
| false
| false
| 904
|
r
|
#' @title Binary Indicator for Multi-State RDT with Multiple Periods
#'
#' @description Define the binary indicator function to check whether the failure probability satisfies the lower level reliability requirements for each cumulative period (for Multi-state RDT, Multiple Periods)
#'
#' @param pivec Failure probability for each separate period.
#' @param Rvec Lower level reliability requirements for each cumulative period from the begining of the test.
#' @return 0 -- No; 1 -- Yes.
#' @examples
#' MP_Indicator(pivec = c(0.1, 0.2), Rvec = c(0.8, 0.6))
#' MP_Indicator(pivec = c(0.1, 0.2, 0.1), Rvec = c(0.8, 0.6, 0.4))
#' MP_Indicator(pivec = c(0.1, 0.3), Rvec = c(0.8, 0.7))
#' @export
######define the indicator function
MP_Indicator <- function(pivec, Rvec)
{
condition <- sum(cumsum(pivec[(1:length(Rvec))]) <= 1 - Rvec) == length(Rvec)
if(condition)
return(1)
else
return(0)
}
|
setwd('c:/users/user/desktop/R')
library(openxlsx)
uptemp <- read.xlsx('Rh.xlsx',colNames=T,cols=c(1,2,3,4))
downtemp <- read.xlsx('Rh.xlsx',colNames=T,cols=c(5,6,7,8))
colors <- c('Blue','Gray','green','Yellow','Orange','Red')
uck <- uptemp[uptemp$treatment1=='CK',]
uhn <- uptemp[uptemp$treatment1=='HN',]
uln <- uptemp[uptemp$treatment1=='LN',]
uw <- uptemp[uptemp$treatment1=='W',]
uwhn <- uptemp[uptemp$treatment1=='WHN',]
uwln <- uptemp[uptemp$treatment1=='WLN',]
dck <- downtemp[downtemp$treatment2=='CK',]
dhn <- downtemp[downtemp$treatment2=='HN',]
dln <- downtemp[downtemp$treatment2=='LN',]
dw <- downtemp[downtemp$treatment2=='W',]
dwhn <- downtemp[downtemp$treatment2=='WHN',]
dwln <- downtemp[downtemp$treatment2=='WLN',]
#png('test.png',width=674,height=359)
opar <- par(no.readonly=T)
par(fig=c(0,0.585,0,0.9),mar=c(4,4,2.1,0))
plot(uck$temperature1,uck$Rh1,type='p',pch=20,bty='l',col='Blue',
ylab='Rh',xlab=' ',ylim=c(0,48),xlim=c(4,26),mgp=c(2,0.5,0))
lines(lowess(uck$temperature1,uck$Rh1,f=0.5),col='Blue')
points(uhn$temperature1,uhn$Rh1,pch=20,col='Gray')
lines(lowess(uhn$temperature1,uhn$Rh1,f=0.5),col='Gray')
points(uln$temperature1,uln$Rh1,pch=20,col='green')
lines(lowess(uln$temperature1,uln$Rh1,f=0.5),col='green')
points(uw$temperature1,uw$Rh1,pch=20,col='Yellow')
lines(lowess(uw$temperature1,uw$Rh1,f=0.5),col='Yellow')
points(uwhn$temperature1,uwhn$Rh1,pch=20,col='Orange')
lines(lowess(uwhn$temperature1,uwhn$Rh1,f=0.5),col='Orange')
points(uwln$temperature1,uwln$Rh1,pch=20,col='Red')
lines(lowess(uwln$temperature1,uwln$Rh1,f=0.5),col='Red')
arrows(x0=5,y0=45,x1=24.5,y1=45,col='black',length=0.15,angle=12)
text(15,47.5,'Rising',col='black')
par(fig=c(0.51,1,0,0.9),mar=c(4,0,2.1,0.5),new=T)
plot(dck$temperature2,dck$Rh2,type='p',pch=20,bty='l',col='Blue',
axes=F,xlab=' ',ylim=c(0,48),xlim=c(26,4))
lines(lowess(dck$temperature2,dck$Rh2,f=0.5),col='Blue')
axis(1,at=c(25,20,15,10,5),labels=c(' ',20,15,10,5),mgp=c(2,0.5,0))
points(dhn$temperature2,dhn$Rh2,pch=20,col='Gray')
lines(lowess(dhn$temperature2,dhn$Rh2,f=0.5),col='Gray')
points(dln$temperature2,dln$Rh2,pch=20,col='green')
lines(lowess(dln$temperature2,dln$Rh2,f=0.5),col='green')
points(dw$temperature2,dw$Rh2,pch=20,col='Yellow')
lines(lowess(dw$temperature2,dw$Rh2,f=0.5),col='Yellow')
points(dwhn$temperature2,dwhn$Rh2,pch=20,col='Orange')
lines(lowess(dwhn$temperature2,dwhn$Rh2,f=0.5),col='Orange')
points(dwln$temperature2,dwln$Rh2,pch=20,col='Red')
lines(lowess(dwln$temperature2,dwln$Rh2,f=0.5),col='Red')
arrows(x0=24.5,y0=45,x1=5,y1=45,col='black',length=0.12,angle=12)
text(15,47.5,'Cooling',col='black')
legend(x=9,y=43,legend=c('CK','HN','LN','W','WHN','WLN'),col=colors,
cex=0.6,pt.cex=1,pch=20)
par(fig=c(0,1,0,0.9),mar=c(4,4,2.1,0.5),new=T)
title(main='xiao wang',xlab='temperature',mgp=c(2,0.5,0))
|
/rplots/R_Basic_Combo_Draw.R
|
no_license
|
1156054203/scripts
|
R
| false
| false
| 2,888
|
r
|
setwd('c:/users/user/desktop/R')
library(openxlsx)
uptemp <- read.xlsx('Rh.xlsx',colNames=T,cols=c(1,2,3,4))
downtemp <- read.xlsx('Rh.xlsx',colNames=T,cols=c(5,6,7,8))
colors <- c('Blue','Gray','green','Yellow','Orange','Red')
uck <- uptemp[uptemp$treatment1=='CK',]
uhn <- uptemp[uptemp$treatment1=='HN',]
uln <- uptemp[uptemp$treatment1=='LN',]
uw <- uptemp[uptemp$treatment1=='W',]
uwhn <- uptemp[uptemp$treatment1=='WHN',]
uwln <- uptemp[uptemp$treatment1=='WLN',]
dck <- downtemp[downtemp$treatment2=='CK',]
dhn <- downtemp[downtemp$treatment2=='HN',]
dln <- downtemp[downtemp$treatment2=='LN',]
dw <- downtemp[downtemp$treatment2=='W',]
dwhn <- downtemp[downtemp$treatment2=='WHN',]
dwln <- downtemp[downtemp$treatment2=='WLN',]
#png('test.png',width=674,height=359)
opar <- par(no.readonly=T)
par(fig=c(0,0.585,0,0.9),mar=c(4,4,2.1,0))
plot(uck$temperature1,uck$Rh1,type='p',pch=20,bty='l',col='Blue',
ylab='Rh',xlab=' ',ylim=c(0,48),xlim=c(4,26),mgp=c(2,0.5,0))
lines(lowess(uck$temperature1,uck$Rh1,f=0.5),col='Blue')
points(uhn$temperature1,uhn$Rh1,pch=20,col='Gray')
lines(lowess(uhn$temperature1,uhn$Rh1,f=0.5),col='Gray')
points(uln$temperature1,uln$Rh1,pch=20,col='green')
lines(lowess(uln$temperature1,uln$Rh1,f=0.5),col='green')
points(uw$temperature1,uw$Rh1,pch=20,col='Yellow')
lines(lowess(uw$temperature1,uw$Rh1,f=0.5),col='Yellow')
points(uwhn$temperature1,uwhn$Rh1,pch=20,col='Orange')
lines(lowess(uwhn$temperature1,uwhn$Rh1,f=0.5),col='Orange')
points(uwln$temperature1,uwln$Rh1,pch=20,col='Red')
lines(lowess(uwln$temperature1,uwln$Rh1,f=0.5),col='Red')
arrows(x0=5,y0=45,x1=24.5,y1=45,col='black',length=0.15,angle=12)
text(15,47.5,'Rising',col='black')
par(fig=c(0.51,1,0,0.9),mar=c(4,0,2.1,0.5),new=T)
plot(dck$temperature2,dck$Rh2,type='p',pch=20,bty='l',col='Blue',
axes=F,xlab=' ',ylim=c(0,48),xlim=c(26,4))
lines(lowess(dck$temperature2,dck$Rh2,f=0.5),col='Blue')
axis(1,at=c(25,20,15,10,5),labels=c(' ',20,15,10,5),mgp=c(2,0.5,0))
points(dhn$temperature2,dhn$Rh2,pch=20,col='Gray')
lines(lowess(dhn$temperature2,dhn$Rh2,f=0.5),col='Gray')
points(dln$temperature2,dln$Rh2,pch=20,col='green')
lines(lowess(dln$temperature2,dln$Rh2,f=0.5),col='green')
points(dw$temperature2,dw$Rh2,pch=20,col='Yellow')
lines(lowess(dw$temperature2,dw$Rh2,f=0.5),col='Yellow')
points(dwhn$temperature2,dwhn$Rh2,pch=20,col='Orange')
lines(lowess(dwhn$temperature2,dwhn$Rh2,f=0.5),col='Orange')
points(dwln$temperature2,dwln$Rh2,pch=20,col='Red')
lines(lowess(dwln$temperature2,dwln$Rh2,f=0.5),col='Red')
arrows(x0=24.5,y0=45,x1=5,y1=45,col='black',length=0.12,angle=12)
text(15,47.5,'Cooling',col='black')
legend(x=9,y=43,legend=c('CK','HN','LN','W','WHN','WLN'),col=colors,
cex=0.6,pt.cex=1,pch=20)
par(fig=c(0,1,0,0.9),mar=c(4,4,2.1,0.5),new=T)
title(main='xiao wang',xlab='temperature',mgp=c(2,0.5,0))
|
library(tidyverse)
cel <-
read_csv(
url(
"https://www.dropbox.com/s/4ebgnkdhhxo5rac/cel_volden_wiseman%20_coursera.csv?raw=1"
)
)
####bar plot for dems variable in the 115th Congress. 0=Republican, 1=Democrat
cel %>%
filter(congress == 115) %>%
ggplot(aes(x = dem)) +
geom_bar()
###prove to yourself your bar plot is right by comparing with a frequency table:
table(filter(cel, congress == 115)$dem)
###use st_name instead, so how counts of how many members of Congress from each state:
cel %>% filter(congress == 115) %>% ggplot(aes(x = st_name)) + geom_bar()
###flip the figure by setting y aesthetic rather than the x
cel %>% filter(congress == 115) %>% ggplot(aes(y = st_name)) + geom_bar()
###let's go back and recode the dem variable to be a categorical variable
party <- recode(cel$dem, `1` = "Democrat", `0` = "Republican")
cel <- add_column(cel, party)
cel %>% filter(congress == 115) %>% ggplot(aes(x = party)) +
geom_bar()
####now add some visual touches
###add axis labels
cel %>% filter(congress == 115) %>% ggplot(aes(x = party)) +
geom_bar() +
labs(x = "Party", y = "Number of Members")
###add colors for the two different bars
cel %>% filter(congress == 115) %>% ggplot(aes(x = party, fill = party)) +
geom_bar() +
labs(x = "Party", y = "Number of Members")
###manually change the colors of the bars
cel %>% filter(congress == 115) %>% ggplot(aes(x = party, fill = party)) +
geom_bar() +
labs(x = "Party", y = "Number of Members") +
scale_fill_manual(values = c("blue", "red"))
###drop the legend with the "guides" command
cel %>% filter(congress == 115) %>% ggplot(aes(x = party, fill = party)) +
geom_bar() +
labs(x = "Party", y = "Number of Members") +
scale_fill_manual(values = c("blue", "red")) +
guides(fill = FALSE)
#####Making more barplots and manipulating more data in R
####Making a barplot of proportions
#####a toy demonstration
#####a bowl of fruit
apple <- rep("apple", 6)
orange <- rep("orange", 3)
banana <- rep("banana", 1)
###put together the fruits in a dataframe
###creates a single columns with fruits
fruit_bowl <- tibble("fruits" = c(apple, orange, banana))
########Let's calculate proportions instead
#####create a table that counts fruits in a second column
fruit_bowl_summary <- fruit_bowl %>%
group_by(fruits) %>%
summarize("count" = n())
fruit_bowl_summary
####calculate proportions
fruit_bowl_summary$proportion <-
fruit_bowl_summary$count / sum(fruit_bowl_summary$count)
fruit_bowl_summary
####add the geom_bar, using "stat" to tell command to plot the exact value for proportion
ggplot(fruit_bowl_summary, aes(x = fruits, y = proportion)) +
geom_bar(stat = "identity")
ggplot(fruit_bowl_summary, aes(x = fruits, y = proportion, fill = fruits)) +
geom_bar(stat = "identity") +
scale_fill_manual(values = c("red", "yellow", "orange")) +
guides(fill = FALSE) +
labs(x = "Fruits", y = "Proportion of Fruits")
####More practice with barplots!
#####
cces <-
read_csv(
url(
"https://www.dropbox.com/s/ahmt12y39unicd2/cces_sample_coursera.csv?raw=1"
)
)
####create counts of Ds, Rs, and Is by region
dem_rep <-
recode(
cces$pid7,
`1` = "Democrat",
`2` = "Democrat",
`3` = "Democrat",
`4` = "Independent",
`5` = "Republican",
`6` = "Republican",
`7` = "Republican"
)
table(dem_rep)
cces <- add_column(cces, dem_rep)
###stacked bars
ggplot(cces, aes(x = region, fill = dem_rep)) +
geom_bar()
###grouped bars
ggplot(cces, aes(x = region, fill = dem_rep)) +
geom_bar(position = "dodge")
##visual touches like relabeling the axes
ggplot(cces, aes(x = region, fill = dem_rep)) +
geom_bar(position = "dodge") +
labs(x = "Region", y = "Count")
|
/week5/2w2_1.R
|
permissive
|
haoqiwang97/2021REUDataScience
|
R
| false
| false
| 3,744
|
r
|
library(tidyverse)
cel <-
read_csv(
url(
"https://www.dropbox.com/s/4ebgnkdhhxo5rac/cel_volden_wiseman%20_coursera.csv?raw=1"
)
)
####bar plot for dems variable in the 115th Congress. 0=Republican, 1=Democrat
cel %>%
filter(congress == 115) %>%
ggplot(aes(x = dem)) +
geom_bar()
###prove to yourself your bar plot is right by comparing with a frequency table:
table(filter(cel, congress == 115)$dem)
###use st_name instead, so how counts of how many members of Congress from each state:
cel %>% filter(congress == 115) %>% ggplot(aes(x = st_name)) + geom_bar()
###flip the figure by setting y aesthetic rather than the x
cel %>% filter(congress == 115) %>% ggplot(aes(y = st_name)) + geom_bar()
###let's go back and recode the dem variable to be a categorical variable
party <- recode(cel$dem, `1` = "Democrat", `0` = "Republican")
cel <- add_column(cel, party)
cel %>% filter(congress == 115) %>% ggplot(aes(x = party)) +
geom_bar()
####now add some visual touches
###add axis labels
cel %>% filter(congress == 115) %>% ggplot(aes(x = party)) +
geom_bar() +
labs(x = "Party", y = "Number of Members")
###add colors for the two different bars
cel %>% filter(congress == 115) %>% ggplot(aes(x = party, fill = party)) +
geom_bar() +
labs(x = "Party", y = "Number of Members")
###manually change the colors of the bars
cel %>% filter(congress == 115) %>% ggplot(aes(x = party, fill = party)) +
geom_bar() +
labs(x = "Party", y = "Number of Members") +
scale_fill_manual(values = c("blue", "red"))
###drop the legend with the "guides" command
cel %>% filter(congress == 115) %>% ggplot(aes(x = party, fill = party)) +
geom_bar() +
labs(x = "Party", y = "Number of Members") +
scale_fill_manual(values = c("blue", "red")) +
guides(fill = FALSE)
#####Making more barplots and manipulating more data in R
####Making a barplot of proportions
#####a toy demonstration
#####a bowl of fruit
apple <- rep("apple", 6)
orange <- rep("orange", 3)
banana <- rep("banana", 1)
###put together the fruits in a dataframe
###creates a single columns with fruits
fruit_bowl <- tibble("fruits" = c(apple, orange, banana))
########Let's calculate proportions instead
#####create a table that counts fruits in a second column
fruit_bowl_summary <- fruit_bowl %>%
group_by(fruits) %>%
summarize("count" = n())
fruit_bowl_summary
####calculate proportions
fruit_bowl_summary$proportion <-
fruit_bowl_summary$count / sum(fruit_bowl_summary$count)
fruit_bowl_summary
####add the geom_bar, using "stat" to tell command to plot the exact value for proportion
ggplot(fruit_bowl_summary, aes(x = fruits, y = proportion)) +
geom_bar(stat = "identity")
ggplot(fruit_bowl_summary, aes(x = fruits, y = proportion, fill = fruits)) +
geom_bar(stat = "identity") +
scale_fill_manual(values = c("red", "yellow", "orange")) +
guides(fill = FALSE) +
labs(x = "Fruits", y = "Proportion of Fruits")
####More practice with barplots!
#####
cces <-
read_csv(
url(
"https://www.dropbox.com/s/ahmt12y39unicd2/cces_sample_coursera.csv?raw=1"
)
)
####create counts of Ds, Rs, and Is by region
dem_rep <-
recode(
cces$pid7,
`1` = "Democrat",
`2` = "Democrat",
`3` = "Democrat",
`4` = "Independent",
`5` = "Republican",
`6` = "Republican",
`7` = "Republican"
)
table(dem_rep)
cces <- add_column(cces, dem_rep)
###stacked bars
ggplot(cces, aes(x = region, fill = dem_rep)) +
geom_bar()
###grouped bars
ggplot(cces, aes(x = region, fill = dem_rep)) +
geom_bar(position = "dodge")
##visual touches like relabeling the axes
ggplot(cces, aes(x = region, fill = dem_rep)) +
geom_bar(position = "dodge") +
labs(x = "Region", y = "Count")
|
is.badval <- function(x)
{
is.na(x) || is.nan(x) || is.infinite(x)
}
is.inty <- function(x)
{
abs(x - round(x)) < 1e-10
}
is.zero <- function(x)
{
abs(x) < 1e-10
}
is.negative <- function(x)
{
x < 0
}
is.annoying <- function(x)
{
length(x) != 1 || is.badval(x)
}
is.string <- function(x)
{
is.character(x) && !is.annoying(x)
}
is.flag <- function(x)
{
is.logical(x) && !is.annoying(x)
}
is.posint <- function(x)
{
is.numeric(x) && !is.annoying(x) && is.inty(x) && !is.negative(x) && !is.zero(x)
}
check_is_posint = function(x)
{
if (!is.numeric(x) || is.annoying(x) || !is.inty(x) || x<1)
{
nm <- deparse(substitute(x))
stop(paste0("argument '", nm, "' must be an integer"), call.=FALSE)
}
invisible(TRUE)
}
comm_check_is_flag = function(x, comm)
{
if (!is.flag(x))
{
nm <- deparse(substitute(x))
comm.stop(paste0("argument '", nm, "' must be TRUE or FALSE"), call.=FALSE, comm=comm)
}
invisible(TRUE)
}
comm_check_is_int = function(x, comm)
{
if (!is.numeric(x) || is.annoying(x) || !is.inty(x))
{
nm <- deparse(substitute(x))
comm.stop(paste0("argument '", nm, "' must be an integer"), call.=FALSE, comm=comm)
}
invisible(TRUE)
}
comm_check_is_function = function(x, comm)
{
if (!is.function(x))
{
nm <- deparse(substitute(x))
comm.stop(paste0("argument '", nm, "' must be a function"), call.=FALSE, comm=comm)
}
invisible(TRUE)
}
comm_check_is_matrix = function(x, comm)
{
if (!is.matrix(x) || !is.numeric(x))
{
nm <- deparse(substitute(x))
comm.stop(paste0("argument '", nm, "' must be a numeric matrix"), call.=FALSE, comm=comm)
}
invisible(TRUE)
}
comm_check_common_matrix_dims = function(x, comm)
{
dims = as.double(dim(x))
alldims = pbdMPI::allgather(dims, comm=comm)
nrows = sapply(alldims, `[`, 1L)
ncols = sapply(alldims, `[`, 2L)
if (any(diff(nrows) != 0) || any(diff(ncols) != 0))
pbdMPI::comm.stop("input matrices must have the same dimension across all processes", comm=comm)
invisible(TRUE)
}
|
/R/01-checks.r
|
permissive
|
RBigData/cop
|
R
| false
| false
| 2,062
|
r
|
is.badval <- function(x)
{
is.na(x) || is.nan(x) || is.infinite(x)
}
is.inty <- function(x)
{
abs(x - round(x)) < 1e-10
}
is.zero <- function(x)
{
abs(x) < 1e-10
}
is.negative <- function(x)
{
x < 0
}
is.annoying <- function(x)
{
length(x) != 1 || is.badval(x)
}
is.string <- function(x)
{
is.character(x) && !is.annoying(x)
}
is.flag <- function(x)
{
is.logical(x) && !is.annoying(x)
}
is.posint <- function(x)
{
is.numeric(x) && !is.annoying(x) && is.inty(x) && !is.negative(x) && !is.zero(x)
}
check_is_posint = function(x)
{
if (!is.numeric(x) || is.annoying(x) || !is.inty(x) || x<1)
{
nm <- deparse(substitute(x))
stop(paste0("argument '", nm, "' must be an integer"), call.=FALSE)
}
invisible(TRUE)
}
comm_check_is_flag = function(x, comm)
{
if (!is.flag(x))
{
nm <- deparse(substitute(x))
comm.stop(paste0("argument '", nm, "' must be TRUE or FALSE"), call.=FALSE, comm=comm)
}
invisible(TRUE)
}
comm_check_is_int = function(x, comm)
{
if (!is.numeric(x) || is.annoying(x) || !is.inty(x))
{
nm <- deparse(substitute(x))
comm.stop(paste0("argument '", nm, "' must be an integer"), call.=FALSE, comm=comm)
}
invisible(TRUE)
}
comm_check_is_function = function(x, comm)
{
if (!is.function(x))
{
nm <- deparse(substitute(x))
comm.stop(paste0("argument '", nm, "' must be a function"), call.=FALSE, comm=comm)
}
invisible(TRUE)
}
comm_check_is_matrix = function(x, comm)
{
if (!is.matrix(x) || !is.numeric(x))
{
nm <- deparse(substitute(x))
comm.stop(paste0("argument '", nm, "' must be a numeric matrix"), call.=FALSE, comm=comm)
}
invisible(TRUE)
}
comm_check_common_matrix_dims = function(x, comm)
{
dims = as.double(dim(x))
alldims = pbdMPI::allgather(dims, comm=comm)
nrows = sapply(alldims, `[`, 1L)
ncols = sapply(alldims, `[`, 2L)
if (any(diff(nrows) != 0) || any(diff(ncols) != 0))
pbdMPI::comm.stop("input matrices must have the same dimension across all processes", comm=comm)
invisible(TRUE)
}
|
x = seq(1, 10000,1)
wort = function(x){
sqrt(x)
}
|
/scripts/assignment2/assignment2.R
|
no_license
|
Vishan07/hello-world
|
R
| false
| false
| 51
|
r
|
x = seq(1, 10000,1)
wort = function(x){
sqrt(x)
}
|
### =========================================================================
### makeTxDbFromBiomart()
### -------------------------------------------------------------------------
###
### For people who want to tap BioMart.
### Typical use:
### txdb <- makeTxDbFromBiomart("hsapiens_gene_ensembl")
### Speed:
### - for biomart="ENSEMBL_MART_ENSEMBL" and dataset="hsapiens_gene_ensembl":
### (1) download takes about 8 min.
### (2) db creation takes about 60-65 sec.
###
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Some helper functions to facilitate working with the biomaRt package.
###
### A thin wrapper to useMart() that checks the user-supplied arguments.
.useMart2 <- function(biomart="ENSEMBL_MART_ENSEMBL",
dataset="hsapiens_gene_ensembl",
host="www.ensembl.org",
port=80)
{
### Could be that the user got the 'biomart' and/or 'dataset' values
### programmatically via calls to listMarts() and/or listDatasets(). Note
### that listMarts() and listDatasets() are returning data frames where
### the columns are factors for the former and "AsIs" character vectors
### for the latter.
if (is.factor(biomart))
biomart <- as.character(biomart)
if (!(isSingleString(biomart) && biomart != ""))
stop("'biomart' must be a single non-empty string")
if (is(dataset, "AsIs"))
dataset <- as.character(dataset)
if (!(isSingleString(dataset) && dataset != ""))
stop("'dataset' must be a single non-empty string")
if (!(isSingleString(host) && host != ""))
stop("'host' must be a single non-empty string")
if (!(isSingleNumber(port) && port > 0))
stop("'port' must be a single positive number")
useMart(biomart=biomart, dataset=dataset, host=host, port=port)
}
### TODO: Share this with normalization of 'filter' arg in the transcripts(),
### exons(), cds(), and genes() extractors.
.normarg_filter <- function(filter)
{
if (is.null(filter) || identical(filter, ""))
return(setNames(list(), character(0)))
if (!is.list(filter))
stop("'filter' must be a named list")
if (length(filter) == 0L)
return(setNames(list(), character(0)))
filter_names <- names(filter)
if (is.null(filter_names))
stop("'filter' must be a named list")
if (any(filter_names %in% c(NA, "")))
stop("names on 'filter' cannot be NA or the empty string")
if (anyDuplicated(filter_names))
stop("names on 'filter' must be unique")
if (!all(sapply(filter, is.atomic)))
stop("'filter' list elements must be atomic")
if (any(sapply(filter, anyNA)))
stop("'filter' list elements cannot contain NAs")
filter
}
### A thin wrapper around getBM() that takes the filters in the form of a named
### list.
.getBM2 <- function(attributes, filter=NULL, ...)
{
filter <- .normarg_filter(filter)
if (length(filter) == 0L) {
bm_filters <- bm_values <- ""
} else {
bm_filters <- names(filter)
bm_values <- unname(filter)
bm_values[elementNROWS(bm_values) == 0L] <- paste0(
"____this_is_a_very_unlikely_valid_value_but_you_never_know_",
"this_is_just_a_dirty_hack_to_work_around_getBM_",
"misinterpretation_of_empty_list_elements_in_values____")
}
getBM(attributes, filters=bm_filters, values=bm_values, ...)
}
.normarg_id_prefix <- function(id_prefix)
{
if (!isSingleString(id_prefix))
stop("'id_prefix' must be a single string")
id_prefix
}
### Add filter created from user-supplied transcript_ids to user-specified
### filter.
.add_tx_id_filter <- function(filter, transcript_ids=NULL, id_prefix="ensembl_")
{
filter <- .normarg_filter(filter)
tx_name_colname <- paste0(id_prefix, "transcript_id")
if (is.null(transcript_ids)) {
if (tx_name_colname %in% names(filter))
warning(wmsg("transcript ids should be specified via the ",
"'transcript_ids' rather than the 'filter' argument"))
return(filter)
}
if (!is.character(transcript_ids))
stop("'transcript_ids' must ba a character vector")
if (any(is.na(transcript_ids)))
stop("'transcript_ids' cannot contain NAs")
if (tx_name_colname %in% names(filter))
stop(wmsg("transcript ids cannot be specified via the ",
"'transcript_ids' and 'filter' arguments ",
"at the same time"))
filter[[tx_name_colname]] <- transcript_ids
filter
}
.getBiomartDbVersion <- function(mart, host, port, biomart)
{
marts <- listMarts(mart=mart, host=host, port=port)
mart_rowidx <- which(as.character(marts$biomart) == biomart)
## This should never happen.
if (length(mart_rowidx) != 1L)
stop("found 0 or more than 1 \"", biomart, "\" BioMart database")
as.character(marts$version)[mart_rowidx]
}
.extractEnsemblReleaseFromDbVersion <- function(db_version)
{
db_version <- tolower(db_version)
sub("^ensembl( plants)?( genes)? ([0-9]+).*$", "\\3", db_version)
}
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Download and preprocess the 'transcripts' data frame.
###
.makeBiomartTranscripts <- function(filter, mart, transcript_ids,
recognized_attribs,
id_prefix="ensembl_")
{
message("Download and preprocess the 'transcripts' data frame ... ",
appendLF=FALSE)
bm_result <- .getBM2(recognized_attribs[['T']], filter,
mart=mart, bmHeader=FALSE)
tx_name_colname <- paste0(id_prefix, "transcript_id")
tx_name <- bm_result[[tx_name_colname]]
if (!is.null(transcript_ids)) {
idx <- !(transcript_ids %in% tx_name)
if (any(idx)) {
bad_ids <- transcript_ids[idx]
stop(wmsg("invalid transcript ids: ",
paste0(bad_ids, collapse=", ")))
}
}
## Those are the strictly required fields.
transcripts0 <- data.frame(
tx_id=integer(0),
tx_chrom=character(0),
tx_strand=character(0),
tx_start=integer(0),
tx_end=integer(0)
)
if (nrow(bm_result) == 0L) {
message("OK")
return(transcripts0)
}
tx_id <- seq_len(nrow(bm_result))
##if (any(duplicated(tx_name)))
## stop(wmsg("the '",
## tx_name_colname,
## "'transcript_id' attribute contains duplicated values"))
if (any(duplicated(bm_result)))
stop(wmsg("the 'transcripts' data frame obtained from biomart ",
"contains duplicated rows"))
tx_type <- as.character(bm_result$transcript_biotype)
tx_chrom <- as.character(bm_result$chromosome_name)
tx_strand <- ifelse(bm_result$strand == 1, "+", "-")
tx_start <- bm_result$transcript_start
tx_end <- bm_result$transcript_end
transcripts <- data.frame(
tx_id=tx_id,
tx_name=tx_name,
tx_type=tx_type,
tx_chrom=tx_chrom,
tx_strand=tx_strand,
tx_start=tx_start,
tx_end=tx_end
)
message("OK")
transcripts
}
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Download and preprocess the 'chrominfo' data frame.
###
### Returns NULL if it fails to fetch the chromosome lengths from the
### remote resource.
.makeBiomartChrominfo <- function(mart, extra_seqnames=NULL,
circ_seqs=DEFAULT_CIRC_SEQS, host, port)
{
biomart <- biomaRt:::martBM(mart)
dataset <- biomaRt:::martDataset(mart)
is_ensembl_mart <- tolower(substr(biomart, 1, 7)) == "ensembl"
is_plants_mart <- tolower(substr(biomart, 1, 11)) == "plants_mart"
if (is_ensembl_mart || is_plants_mart) {
message("Download and preprocess the 'chrominfo' data frame ... ",
appendLF=FALSE)
if (is_ensembl_mart) {
if (tolower(host) == "grch37.ensembl.org") {
## Ensembl GRCh37 mart
chromlengths <- try(fetchChromLengthsFromEnsembl(dataset,
use.grch37=TRUE,
extra_seqnames=extra_seqnames),
silent=TRUE)
} else {
## Ensembl mart
db_version <- .getBiomartDbVersion(mart, host, port, biomart)
ensembl_release <-
.extractEnsemblReleaseFromDbVersion(db_version)
chromlengths <- try(fetchChromLengthsFromEnsembl(dataset,
release=ensembl_release,
extra_seqnames=extra_seqnames),
silent=TRUE)
}
} else {
## Plants mart
chromlengths <- try(fetchChromLengthsFromEnsemblPlants(dataset,
extra_seqnames=extra_seqnames),
silent=TRUE)
}
if (is(chromlengths, "try-error")) {
message("FAILED! (=> skipped)")
return(NULL)
}
chrominfo <- data.frame(
chrom=chromlengths$name,
length=chromlengths$length,
is_circular=make_circ_flags_from_circ_seqs(chromlengths$name,
circ_seqs)
)
message("OK")
return(chrominfo)
}
NULL
}
## User-friendly wrapper to .makeBiomartChrominfo().
getChromInfoFromBiomart <- function(biomart="ENSEMBL_MART_ENSEMBL",
dataset="hsapiens_gene_ensembl",
id_prefix="ensembl_",
host="www.ensembl.org",
port=80)
{
mart <- .useMart2(biomart=biomart, dataset=dataset, host=host, port=port)
id_prefix <- .normarg_id_prefix(id_prefix)
recognized_attribs <- recognizedBiomartAttribs(id_prefix)
transcripts <- .makeBiomartTranscripts(NULL, mart,
transcript_ids=NULL,
recognized_attribs,
id_prefix)
chrominfo <- .makeBiomartChrominfo(mart,
extra_seqnames=transcripts$tx_chrom,
host=host, port=port)
chrominfo[ , 1:2, drop=FALSE]
}
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Download and preprocess the 'splicings' data frame.
###
.extract_numeric_attrib <- function(bm_result, attrib)
{
ans <- bm_result[[attrib]]
if (is.numeric(ans))
return(ans)
if (is.logical(ans) && all(is.na(ans)))
return(as.integer(ans))
stop(wmsg("BioMart fatal data anomaly: ",
"\"", attrib, "\" attribute is not numeric"))
}
.generate_BioMart_data_anomaly_report <- function(error_type, bm_result, idx,
id_prefix, msg)
{
## Part 3.
tx_name_colname <- paste0(id_prefix, "transcript_id")
tx_name <- bm_result[[tx_name_colname]]
first_tx_names <- unique(tx_name[idx])
total_nb_tx <- length(first_tx_names)
first_three_only <- total_nb_tx > 3L
if (first_three_only)
first_tx_names <- first_tx_names[1:3]
bm_result <- S4Vectors:::extract_data_frame_rows(bm_result,
tx_name %in% first_tx_names)
bm_result0 <- bm_result[-match(tx_name_colname, names(bm_result))]
f <- factor(bm_result[[tx_name_colname]], levels=first_tx_names)
first_tx_tables <- split(bm_result0, f)
.DETAILS_INDENT <- " "
options(width=getOption("width")-nchar(.DETAILS_INDENT))
part3 <- lapply(seq_len(length(first_tx_tables)),
function(i) {
tx_table <- first_tx_tables[[i]]
if ("rank" %in% colnames(tx_table)) {
oo <- order(tx_table[["rank"]])
tx_table <-
S4Vectors:::extract_data_frame_rows(tx_table, oo)
} else {
rownames(tx_table) <- NULL
}
subtitle <- paste0(" ", i, ". Transcript ",
names(first_tx_tables)[i],
":")
details <- capture.output(print(tx_table))
c(subtitle, paste0(.DETAILS_INDENT, details))
})
options(width=getOption("width")+nchar(.DETAILS_INDENT))
part3 <- unlist(part3, use.names=FALSE)
if (first_three_only)
part3 <- c(paste(" (Showing only the first 3 out of",
total_nb_tx,
"transcripts.)"),
part3)
## Part 1.
part1 <- paste0(error_type, ": in the following transcripts, ")
## Part 2.
msg[length(msg)] <- paste0(msg[length(msg)], ".")
part2 <- paste0(" ", msg)
## Assemble the parts.
paste(c(part1, part2, part3), collapse="\n")
}
.stop_on_BioMart_data_anomaly <- function(bm_result, idx, id_prefix, msg)
{
msg <- .generate_BioMart_data_anomaly_report("BioMart fatal data anomaly",
bm_result, idx, id_prefix, msg)
new_length <- nchar(msg) + 5L
## 8170L seems to be the maximum possible value for the 'warning.length'
## option on my machine (R-2.15 r58124, 64-bit Ubuntu).
if (new_length > 8170L)
new_length <- 8170L
if (new_length >= getOption("warning.length")) {
old_length <- getOption("warning.length")
on.exit(options(warning.length=old_length))
options(warning.length=new_length)
}
stop(msg)
}
.warning_on_BioMart_data_anomaly <- function(bm_result, idx, id_prefix, msg)
{
msg <- .generate_BioMart_data_anomaly_report("BioMart data anomaly",
bm_result, idx, id_prefix, msg)
new_length <- nchar(msg) + 5L
## 8170L seems to be the maximum possible value for the 'warning.length'
## option on my machine (R-2.15 r58124, 64-bit Ubuntu).
if (new_length > 8170L)
new_length <- 8170L
if (new_length >= getOption("warning.length")) {
old_length <- getOption("warning.length")
on.exit(options(warning.length=old_length))
options(warning.length=new_length)
}
warning(msg)
}
.has_utr <- function(utr_start, utr_end, exon_start, exon_end,
what_utr, bm_result, id_prefix="ensembl_")
{
is_na <- is.na(utr_start)
if (!identical(is_na, is.na(utr_end)))
stop(wmsg("BioMart fatal data anomaly: ",
"NAs in \"", what_utr, "_utr_start\" attribute don't match ",
"NAs in \"", what_utr, "_utr_end\" attribute"))
idx <- which(utr_start > utr_end + 1L)
if (length(idx) != 0L) {
msg <- paste0("the ", what_utr, "' UTRs have a start > end + 1")
.stop_on_BioMart_data_anomaly(bm_result, idx, id_prefix, msg)
}
idx <- which(utr_start < exon_start | exon_end < utr_end)
if (length(idx) != 0L) {
msg <- paste0("the ", what_utr, "' UTRs ",
"are not within the exon limits")
.stop_on_BioMart_data_anomaly(bm_result, idx, id_prefix, msg)
}
!(is_na | utr_start == utr_end + 1L)
}
.extract_cds_ranges_from_C1 <- function(bm_result, id_prefix="ensembl_")
{
cds_start <- .extract_numeric_attrib(bm_result, "genomic_coding_start")
cds_end <- .extract_numeric_attrib(bm_result, "genomic_coding_end")
is_na <- is.na(cds_start)
if (!identical(is_na, is.na(cds_end)))
stop(wmsg("BioMart fatal data anomaly: ",
"NAs in \"genomic_coding_start\" attribute don't match ",
"NAs in \"genomic_coding_end\" attribute"))
## Exons with no CDS get a CDS of width 0.
no_cds_idx <- which(is_na)
exon_start <- bm_result[["exon_chrom_start"]]
cds_start[no_cds_idx] <- exon_start[no_cds_idx]
cds_end[no_cds_idx] <- cds_start[no_cds_idx] - 1L
IRanges(start=cds_start, end=cds_end)
}
### These errors in UTR representation are non fatal but trigger rejection of
### the corresponding transcripts with a warning.
.BIOMART_UTR_ERROR <- c(
"located on the + strand, \"5_utr_start\" must match \"exon_chrom_start\"",
"located on the + strand, \"3_utr_end\" must match \"exon_chrom_end\"",
"located on the - strand, \"3_utr_start\" must match \"exon_chrom_start\"",
"located on the - strand, \"5_utr_end\" must match \"exon_chrom_end\""
)
.warning_on_BioMart_utr_anomaly <- function(bm_result, idx, id_prefix,
utr_anomaly)
{
msg <- c(.BIOMART_UTR_ERROR[[utr_anomaly]],
" (these transcripts were dropped)")
.warning_on_BioMart_data_anomaly(bm_result, idx, id_prefix, msg)
}
.extract_cds_ranges_from_C2 <- function(bm_result, id_prefix="ensembl_")
{
strand <- bm_result[["strand"]]
if (!all(strand %in% c(1, -1)))
stop(wmsg("BioMart fatal data anomaly: ",
"\"strand\" attribute should be 1 or -1"))
cds_start <- exon_start <- bm_result[["exon_chrom_start"]]
cds_end <- exon_end <- bm_result[["exon_chrom_end"]]
utr_anomaly <- integer(nrow(bm_result))
utr5_start <- .extract_numeric_attrib(bm_result, "5_utr_start")
utr5_end <- .extract_numeric_attrib(bm_result, "5_utr_end")
utr3_start <- .extract_numeric_attrib(bm_result, "3_utr_start")
utr3_end <- .extract_numeric_attrib(bm_result, "3_utr_end")
has_utr5 <- .has_utr(utr5_start, utr5_end, exon_start, exon_end,
"5", bm_result, id_prefix)
has_utr3 <- .has_utr(utr3_start, utr3_end, exon_start, exon_end,
"3", bm_result, id_prefix)
idx <- which(strand == 1 & has_utr5)
bad_idx <- idx[utr5_start[idx] != exon_start[idx]]
if (length(bad_idx) != 0L)
.warning_on_BioMart_utr_anomaly(bm_result, bad_idx, id_prefix,
utr_anomaly[bad_idx] <- 1L)
cds_start[idx] <- utr5_end[idx] + 1L
idx <- which(strand == 1 & has_utr3)
bad_idx <- idx[utr3_end[idx] != exon_end[idx]]
if (length(bad_idx) != 0L)
.warning_on_BioMart_utr_anomaly(bm_result, bad_idx, id_prefix,
utr_anomaly[bad_idx] <- 2L)
cds_end[idx] <- utr3_start[idx] - 1L
idx <- which(strand == -1 & has_utr3)
bad_idx <- idx[utr3_start[idx] != exon_start[idx]]
if (length(bad_idx) != 0L)
.warning_on_BioMart_utr_anomaly(bm_result, bad_idx, id_prefix,
utr_anomaly[bad_idx] <- 3L)
cds_start[idx] <- utr3_end[idx] + 1L
idx <- which(strand == -1 & has_utr5)
bad_idx <- idx[utr5_end[idx] != exon_end[idx]]
if (length(bad_idx) != 0L)
.warning_on_BioMart_utr_anomaly(bm_result, bad_idx, id_prefix,
utr_anomaly[bad_idx] <- 4L)
cds_end[idx] <- utr5_start[idx] - 1L
## Exons with no CDS get a CDS of width 0.
cds_relative_start <- bm_result[["cds_start"]]
no_cds_idx <- which(is.na(cds_relative_start))
cds_end[no_cds_idx] <- cds_start[no_cds_idx] - 1L
ans <- IRanges(start=cds_start, end=cds_end)
mcols(ans) <- DataFrame(utr_anomaly=utr_anomaly)
ans
}
.check_cds <- function(cds_ranges, cds_width, bm_result, id_prefix="ensembl_")
{
idx <- which(width(cds_ranges) != cds_width)
if (length(idx) != 0L) {
msg <- c("the CDS/UTR genomic coordinates are inconsistent with the ",
"\"cds_start\" and \"cds_end\" attributes")
.warning_on_BioMart_data_anomaly(bm_result, idx, id_prefix, msg)
}
tx_name_colname <- paste0(id_prefix, "transcript_id")
tx_name <- bm_result[ , tx_name_colname]
cds_length2 <- sapply(split(width(cds_ranges), tx_name), sum)
cds_length2 <- cds_length2[as.character(tx_name)]
cds_length <- bm_result$cds_length
if (!is.null(cds_length)) {
idx <- which(cds_length2 != cds_length)
if (length(idx) != 0L) {
msg <- c("the CDS length inferred from the CDS/UTR genomic ",
"coordinates doesn't match the \"cds_length\" attribute")
.warning_on_BioMart_data_anomaly(bm_result, idx, id_prefix, msg)
}
}
## Too many transcripts in the ensembl/hsapiens_gene_ensembl dataset don't
## pass the sanity check below (20256 transcripts in Ensembl release 75).
## This makes makeTxDbFromBiomart() a little bit too noisy so we
## comment this out for now.
#idx <- which(cds_length2 %% 3L != 0L)
#if (length(idx) != 0L) {
# msg <- c("the CDS length inferred from the CDS/UTR genomic ",
# "coordinates is not a multiple of 3")
# .warning_on_BioMart_data_anomaly(bm_result, idx, id_prefix, msg)
#}
}
.extract_cds_ranges_from_bm_result <- function(bm_result, id_prefix="ensembl_")
{
if (nrow(bm_result) == 0L)
return(IRanges())
exon_start <- bm_result[["exon_chrom_start"]]
exon_end <- bm_result[["exon_chrom_end"]]
if (!is.numeric(exon_start) || !is.numeric(exon_end))
stop("BioMart data anomaly: \"exon_chrom_start\" and/or ",
"\"exon_chrom_end\" attributes are not numeric")
## BE AWARE that the "cds_start" and "cds_end" attributes that we get
## from BioMart are the CDS coordinates relative to the coding mRNA!
## See IMPORTANT NOTE ABOUT GROUP D1 in findCompatibleMarts.R for more
## information.
cds_relative_start <- .extract_numeric_attrib(bm_result, "cds_start")
cds_relative_end <- .extract_numeric_attrib(bm_result, "cds_end")
is_na <- is.na(cds_relative_start)
if (!identical(is_na, is.na(cds_relative_end)))
stop("BioMart data anomaly: ",
"NAs in \"cds_start\" attribute don't match ",
"NAs in \"cds_end\" attribute")
no_cds_idx <- which(is_na)
cds_width <- cds_relative_end - cds_relative_start + 1L
cds_width[no_cds_idx] <- 0L
C1_attribs <- recognizedBiomartAttribs(id_prefix)[["C1"]]
has_C1_attribs <- all(C1_attribs %in% colnames(bm_result))
C2_attribs <- recognizedBiomartAttribs(id_prefix)[["C2"]]
has_C2_attribs <- all(C2_attribs %in% colnames(bm_result))
if (has_C1_attribs)
ans1 <- .extract_cds_ranges_from_C1(bm_result, id_prefix)
if (has_C2_attribs) {
ans2 <- .extract_cds_ranges_from_C2(bm_result, id_prefix)
utr_anomaly <- mcols(ans2)$utr_anomaly
tx_name_colname <- paste0(id_prefix, "transcript_id")
tx_name <- bm_result[ , tx_name_colname]
invalid_tx <- unique(tx_name[utr_anomaly != 0L])
valid_tx_idx <- !(tx_name %in% invalid_tx)
if (has_C1_attribs) {
## Check that 'ans1' agrees with 'ans2'.
if (!identical(width(ans1)[valid_tx_idx],
width(ans2)[valid_tx_idx]))
stop(wmsg("BioMart fatal data anomaly: ",
"CDS genomic coordinates are inconsistent with ",
"UTR genomic coordinates"))
cds_idx <- which(valid_tx_idx & width(ans1) != 0L)
if (!identical(start(ans1)[cds_idx], start(ans2)[cds_idx]))
stop(wmsg("BioMart fatal data anomaly: ",
"CDS genomic coordinates are inconsistent with ",
"UTR genomic coordinates"))
}
ans1 <- ans2
} else {
valid_tx_idx <- seq_along(nrow(bm_result))
}
## More checking of the CDS of the "valid" transcripts ("valid" here means
## with no UTR anomalies).
.check_cds(ans1[valid_tx_idx], cds_width[valid_tx_idx],
S4Vectors:::extract_data_frame_rows(bm_result, valid_tx_idx),
id_prefix="ensembl_")
ans1
}
.make_cds_df_from_ranges <- function(cds_ranges)
{
no_cds_idx <- which(width(cds_ranges) == 0L)
cds_start <- start(cds_ranges)
cds_start[no_cds_idx] <- NA_integer_
cds_end <- end(cds_ranges)
cds_end[no_cds_idx] <- NA_integer_
ans <- data.frame(cds_start=cds_start, cds_end=cds_end)
utr_anomaly <- mcols(cds_ranges)$utr_anomaly
if (!is.null(utr_anomaly))
ans$utr_anomaly <- utr_anomaly
ans
}
.makeBiomartSplicings <- function(filter, mart, transcripts_tx_id,
recognized_attribs, id_prefix="ensembl_")
{
## Those are the strictly required fields.
splicings0 <- data.frame(
tx_id=integer(0),
exon_rank=integer(0),
exon_start=integer(0),
exon_end=integer(0)
)
if (length(transcripts_tx_id) == 0L)
return(splicings0)
message("Download and preprocess the 'splicings' data frame ... ",
appendLF=FALSE)
available_attribs <- listAttributes(mart)$name
has_group <- sapply(recognized_attribs[c("E2", "C1", "C2", "D1", "D2")],
function(attribs) all(attribs %in% available_attribs))
get_groups <- c("E1", names(has_group)[has_group])
attributes <- unlist(recognized_attribs[get_groups], use.names=FALSE)
bm_result <- .getBM2(attributes, filter, mart=mart, bmHeader=FALSE)
tx_name_colname <- paste0(id_prefix, "transcript_id")
tx_name <- bm_result[[tx_name_colname]]
splicings_tx_id <- transcripts_tx_id[tx_name]
exon_name_colname <- paste0(id_prefix, "exon_id")
splicings <- data.frame(
tx_id=splicings_tx_id,
exon_rank=bm_result$rank,
exon_name=bm_result[[exon_name_colname]],
exon_start=bm_result$exon_chrom_start,
exon_end=bm_result$exon_chrom_end
)
if ((has_group[['C1']] || has_group[['C2']]) && has_group[['D1']]) {
cds_ranges <- .extract_cds_ranges_from_bm_result(bm_result, id_prefix)
splicings <- cbind(splicings, .make_cds_df_from_ranges(cds_ranges))
}
message("OK")
splicings
}
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Download and preprocess the 'genes' data frame.
###
.makeBiomartGenes <- function(filter, mart,
transcripts_tx_id, recognized_attribs,
id_prefix="ensembl_")
{
message("Download and preprocess the 'genes' data frame ... ",
appendLF=FALSE)
attributes <- c(recognized_attribs[['G']],
paste0(id_prefix, "transcript_id"))
bm_result <- .getBM2(attributes, filter, mart=mart, bmHeader=FALSE)
tx_name_colname <- paste0(id_prefix, "transcript_id")
gene_id_colname <- paste0(id_prefix, "gene_id")
tx_name <- bm_result[[tx_name_colname]]
gene_id <- bm_result[[gene_id_colname]]
keep_idx <- which(tx_name %in% names(transcripts_tx_id))
tx_id <- transcripts_tx_id[tx_name[keep_idx]]
gene_id <- gene_id[keep_idx]
message("OK")
data.frame(
tx_id=tx_id,
gene_id=gene_id
)
}
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Prepare the 'metadata' data frame.
###
.prepareBiomartMetadata <- function(mart, is_full_dataset, host, port,
taxonomyId, miRBaseBuild)
{
message("Prepare the 'metadata' data frame ... ",
appendLF=FALSE)
biomart <- biomaRt:::martBM(mart)
dataset <- biomaRt:::martDataset(mart)
mart_url <- biomaRt:::martHost(mart)
mart_url <- sub("^[^/]+//", "", mart_url)
mart_url <- unlist(strsplit(mart_url, "/"))[1]
db_version <- .getBiomartDbVersion(mart, host, port, biomart)
is_plants_mart <- tolower(substr(biomart, 1, 11)) == "plants_mart"
datasets <- listDatasets(mart)
dataset_rowidx <- which(as.character(datasets$dataset) == dataset)
## This should never happen (the earlier call to useMart() would have
## failed in the first place).
if (length(dataset_rowidx) != 1L)
stop(wmsg("the BioMart database \"", biomaRt:::martBM(mart),
"\" has no (or more than one) \"", dataset, "\" datasets"))
description <- as.character(datasets$dataset)[dataset_rowidx]
dataset_version <- as.character(datasets$version)[dataset_rowidx]
ensembl_release <- .extractEnsemblReleaseFromDbVersion(db_version)
if(is_plants_mart){
organism <- get_organism_from_Ensembl_Mart_dataset(description,
release=ensembl_release, url=.ENSEMBL_PLANTS.CURRENT_MYSQL_URL)
}else{
organism <- get_organism_from_Ensembl_Mart_dataset(description,
release=ensembl_release)
}
if(is.na(taxonomyId)){
taxonomyId <- GenomeInfoDb:::.taxonomyId(organism)
}else{
GenomeInfoDb:::.checkForAValidTaxonomyId(taxonomyId)
}
if (!isSingleStringOrNA(miRBaseBuild))
stop(wmsg("'miRBaseBuild' must be a a single string or NA"))
message("OK")
data.frame(
name=c("Data source",
"Organism",
"Taxonomy ID",
"Resource URL",
"BioMart database",
"BioMart database version",
"BioMart dataset",
"BioMart dataset description",
"BioMart dataset version",
"Full dataset",
"miRBase build ID"),
value=c("BioMart",
organism,
taxonomyId,
mart_url,
biomart,
db_version,
dataset,
description,
dataset_version,
ifelse(is_full_dataset, "yes", "no"),
miRBaseBuild)
)
}
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### makeTxDbFromBiomart()
###
makeTxDbFromBiomart <- function(biomart="ENSEMBL_MART_ENSEMBL",
dataset="hsapiens_gene_ensembl",
transcript_ids=NULL,
circ_seqs=DEFAULT_CIRC_SEQS,
filter=NULL,
id_prefix="ensembl_",
host="www.ensembl.org",
port=80,
taxonomyId=NA,
miRBaseBuild=NA)
{
mart <- .useMart2(biomart=biomart, dataset=dataset, host=host, port=port)
id_prefix <- .normarg_id_prefix(id_prefix)
filter <- .add_tx_id_filter(filter, transcript_ids, id_prefix)
valid_filter_names <- listFilters(mart, what="name")
invalid_filter_names <- setdiff(names(filter), valid_filter_names)
if (length(invalid_filter_names) != 0L) {
in1string <- paste0(invalid_filter_names, collapse=", ")
stop(wmsg("Invalid filter name(s): ", in1string,
"\n\nPlease use the listFilters() function from the ",
"biomaRt package to get valid filter names."))
}
is_full_dataset <- length(filter) == 0L
recognized_attribs <- recognizedBiomartAttribs(id_prefix)
transcripts <- .makeBiomartTranscripts(filter, mart,
transcript_ids,
recognized_attribs,
id_prefix)
transcripts_tx_id <- transcripts$tx_id
names(transcripts_tx_id) <- transcripts$tx_name
chrominfo <- .makeBiomartChrominfo(mart,
extra_seqnames=transcripts$tx_chrom,
circ_seqs=circ_seqs,
host, port)
if (!is_full_dataset) {
keep_idx <- which(chrominfo[ , "chrom"] %in% transcripts$tx_chrom)
chrominfo <- S4Vectors:::extract_data_frame_rows(chrominfo, keep_idx)
}
splicings <- .makeBiomartSplicings(filter, mart,
transcripts_tx_id,
recognized_attribs,
id_prefix=id_prefix)
## Drop transcripts with UTR anomalies.
utr_anomaly <- splicings$utr_anomaly
if (!is.null(utr_anomaly)) {
invalid_tx <- unique(splicings[utr_anomaly != 0L, "tx_id"])
if (length(invalid_tx) != 0L) {
message("Drop transcripts with UTR anomalies (",
length(invalid_tx), " transcripts) ... ",
appendLF=FALSE)
keep_idx1 <- !(transcripts$tx_id %in% invalid_tx)
transcripts <- S4Vectors:::extract_data_frame_rows(transcripts,
keep_idx1)
transcripts_tx_id <- transcripts_tx_id[keep_idx1]
keep_idx2 <- !(splicings$tx_id %in% invalid_tx)
splicings <- S4Vectors:::extract_data_frame_rows(splicings,
keep_idx2)
message("OK")
}
splicings$utr_anomaly <- NULL
}
genes <- .makeBiomartGenes(filter, mart, transcripts_tx_id,
recognized_attribs, id_prefix)
metadata <- .prepareBiomartMetadata(mart, is_full_dataset,
host, port, taxonomyId, miRBaseBuild)
message("Make the TxDb object ... ", appendLF=FALSE)
txdb <- makeTxDb(transcripts, splicings,
genes=genes, chrominfo=chrominfo,
metadata=metadata, reassign.ids=TRUE)
message("OK")
txdb
}
|
/R/makeTxDbFromBiomart.R
|
no_license
|
ersgupta/GenomicFeatures
|
R
| false
| false
| 33,784
|
r
|
### =========================================================================
### makeTxDbFromBiomart()
### -------------------------------------------------------------------------
###
### For people who want to tap BioMart.
### Typical use:
### txdb <- makeTxDbFromBiomart("hsapiens_gene_ensembl")
### Speed:
### - for biomart="ENSEMBL_MART_ENSEMBL" and dataset="hsapiens_gene_ensembl":
### (1) download takes about 8 min.
### (2) db creation takes about 60-65 sec.
###
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Some helper functions to facilitate working with the biomaRt package.
###
### A thin wrapper to useMart() that checks the user-supplied arguments.
.useMart2 <- function(biomart="ENSEMBL_MART_ENSEMBL",
dataset="hsapiens_gene_ensembl",
host="www.ensembl.org",
port=80)
{
### Could be that the user got the 'biomart' and/or 'dataset' values
### programmatically via calls to listMarts() and/or listDatasets(). Note
### that listMarts() and listDatasets() are returning data frames where
### the columns are factors for the former and "AsIs" character vectors
### for the latter.
if (is.factor(biomart))
biomart <- as.character(biomart)
if (!(isSingleString(biomart) && biomart != ""))
stop("'biomart' must be a single non-empty string")
if (is(dataset, "AsIs"))
dataset <- as.character(dataset)
if (!(isSingleString(dataset) && dataset != ""))
stop("'dataset' must be a single non-empty string")
if (!(isSingleString(host) && host != ""))
stop("'host' must be a single non-empty string")
if (!(isSingleNumber(port) && port > 0))
stop("'port' must be a single positive number")
useMart(biomart=biomart, dataset=dataset, host=host, port=port)
}
### TODO: Share this with normalization of 'filter' arg in the transcripts(),
### exons(), cds(), and genes() extractors.
.normarg_filter <- function(filter)
{
if (is.null(filter) || identical(filter, ""))
return(setNames(list(), character(0)))
if (!is.list(filter))
stop("'filter' must be a named list")
if (length(filter) == 0L)
return(setNames(list(), character(0)))
filter_names <- names(filter)
if (is.null(filter_names))
stop("'filter' must be a named list")
if (any(filter_names %in% c(NA, "")))
stop("names on 'filter' cannot be NA or the empty string")
if (anyDuplicated(filter_names))
stop("names on 'filter' must be unique")
if (!all(sapply(filter, is.atomic)))
stop("'filter' list elements must be atomic")
if (any(sapply(filter, anyNA)))
stop("'filter' list elements cannot contain NAs")
filter
}
### A thin wrapper around getBM() that takes the filters in the form of a named
### list.
.getBM2 <- function(attributes, filter=NULL, ...)
{
filter <- .normarg_filter(filter)
if (length(filter) == 0L) {
bm_filters <- bm_values <- ""
} else {
bm_filters <- names(filter)
bm_values <- unname(filter)
bm_values[elementNROWS(bm_values) == 0L] <- paste0(
"____this_is_a_very_unlikely_valid_value_but_you_never_know_",
"this_is_just_a_dirty_hack_to_work_around_getBM_",
"misinterpretation_of_empty_list_elements_in_values____")
}
getBM(attributes, filters=bm_filters, values=bm_values, ...)
}
.normarg_id_prefix <- function(id_prefix)
{
if (!isSingleString(id_prefix))
stop("'id_prefix' must be a single string")
id_prefix
}
### Add filter created from user-supplied transcript_ids to user-specified
### filter.
.add_tx_id_filter <- function(filter, transcript_ids=NULL, id_prefix="ensembl_")
{
filter <- .normarg_filter(filter)
tx_name_colname <- paste0(id_prefix, "transcript_id")
if (is.null(transcript_ids)) {
if (tx_name_colname %in% names(filter))
warning(wmsg("transcript ids should be specified via the ",
"'transcript_ids' rather than the 'filter' argument"))
return(filter)
}
if (!is.character(transcript_ids))
stop("'transcript_ids' must ba a character vector")
if (any(is.na(transcript_ids)))
stop("'transcript_ids' cannot contain NAs")
if (tx_name_colname %in% names(filter))
stop(wmsg("transcript ids cannot be specified via the ",
"'transcript_ids' and 'filter' arguments ",
"at the same time"))
filter[[tx_name_colname]] <- transcript_ids
filter
}
.getBiomartDbVersion <- function(mart, host, port, biomart)
{
marts <- listMarts(mart=mart, host=host, port=port)
mart_rowidx <- which(as.character(marts$biomart) == biomart)
## This should never happen.
if (length(mart_rowidx) != 1L)
stop("found 0 or more than 1 \"", biomart, "\" BioMart database")
as.character(marts$version)[mart_rowidx]
}
.extractEnsemblReleaseFromDbVersion <- function(db_version)
{
db_version <- tolower(db_version)
sub("^ensembl( plants)?( genes)? ([0-9]+).*$", "\\3", db_version)
}
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Download and preprocess the 'transcripts' data frame.
###
.makeBiomartTranscripts <- function(filter, mart, transcript_ids,
recognized_attribs,
id_prefix="ensembl_")
{
message("Download and preprocess the 'transcripts' data frame ... ",
appendLF=FALSE)
bm_result <- .getBM2(recognized_attribs[['T']], filter,
mart=mart, bmHeader=FALSE)
tx_name_colname <- paste0(id_prefix, "transcript_id")
tx_name <- bm_result[[tx_name_colname]]
if (!is.null(transcript_ids)) {
idx <- !(transcript_ids %in% tx_name)
if (any(idx)) {
bad_ids <- transcript_ids[idx]
stop(wmsg("invalid transcript ids: ",
paste0(bad_ids, collapse=", ")))
}
}
## Those are the strictly required fields.
transcripts0 <- data.frame(
tx_id=integer(0),
tx_chrom=character(0),
tx_strand=character(0),
tx_start=integer(0),
tx_end=integer(0)
)
if (nrow(bm_result) == 0L) {
message("OK")
return(transcripts0)
}
tx_id <- seq_len(nrow(bm_result))
##if (any(duplicated(tx_name)))
## stop(wmsg("the '",
## tx_name_colname,
## "'transcript_id' attribute contains duplicated values"))
if (any(duplicated(bm_result)))
stop(wmsg("the 'transcripts' data frame obtained from biomart ",
"contains duplicated rows"))
tx_type <- as.character(bm_result$transcript_biotype)
tx_chrom <- as.character(bm_result$chromosome_name)
tx_strand <- ifelse(bm_result$strand == 1, "+", "-")
tx_start <- bm_result$transcript_start
tx_end <- bm_result$transcript_end
transcripts <- data.frame(
tx_id=tx_id,
tx_name=tx_name,
tx_type=tx_type,
tx_chrom=tx_chrom,
tx_strand=tx_strand,
tx_start=tx_start,
tx_end=tx_end
)
message("OK")
transcripts
}
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Download and preprocess the 'chrominfo' data frame.
###
### Returns NULL if it fails to fetch the chromosome lengths from the
### remote resource.
.makeBiomartChrominfo <- function(mart, extra_seqnames=NULL,
circ_seqs=DEFAULT_CIRC_SEQS, host, port)
{
biomart <- biomaRt:::martBM(mart)
dataset <- biomaRt:::martDataset(mart)
is_ensembl_mart <- tolower(substr(biomart, 1, 7)) == "ensembl"
is_plants_mart <- tolower(substr(biomart, 1, 11)) == "plants_mart"
if (is_ensembl_mart || is_plants_mart) {
message("Download and preprocess the 'chrominfo' data frame ... ",
appendLF=FALSE)
if (is_ensembl_mart) {
if (tolower(host) == "grch37.ensembl.org") {
## Ensembl GRCh37 mart
chromlengths <- try(fetchChromLengthsFromEnsembl(dataset,
use.grch37=TRUE,
extra_seqnames=extra_seqnames),
silent=TRUE)
} else {
## Ensembl mart
db_version <- .getBiomartDbVersion(mart, host, port, biomart)
ensembl_release <-
.extractEnsemblReleaseFromDbVersion(db_version)
chromlengths <- try(fetchChromLengthsFromEnsembl(dataset,
release=ensembl_release,
extra_seqnames=extra_seqnames),
silent=TRUE)
}
} else {
## Plants mart
chromlengths <- try(fetchChromLengthsFromEnsemblPlants(dataset,
extra_seqnames=extra_seqnames),
silent=TRUE)
}
if (is(chromlengths, "try-error")) {
message("FAILED! (=> skipped)")
return(NULL)
}
chrominfo <- data.frame(
chrom=chromlengths$name,
length=chromlengths$length,
is_circular=make_circ_flags_from_circ_seqs(chromlengths$name,
circ_seqs)
)
message("OK")
return(chrominfo)
}
NULL
}
## User-friendly wrapper to .makeBiomartChrominfo().
getChromInfoFromBiomart <- function(biomart="ENSEMBL_MART_ENSEMBL",
dataset="hsapiens_gene_ensembl",
id_prefix="ensembl_",
host="www.ensembl.org",
port=80)
{
mart <- .useMart2(biomart=biomart, dataset=dataset, host=host, port=port)
id_prefix <- .normarg_id_prefix(id_prefix)
recognized_attribs <- recognizedBiomartAttribs(id_prefix)
transcripts <- .makeBiomartTranscripts(NULL, mart,
transcript_ids=NULL,
recognized_attribs,
id_prefix)
chrominfo <- .makeBiomartChrominfo(mart,
extra_seqnames=transcripts$tx_chrom,
host=host, port=port)
chrominfo[ , 1:2, drop=FALSE]
}
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Download and preprocess the 'splicings' data frame.
###
.extract_numeric_attrib <- function(bm_result, attrib)
{
ans <- bm_result[[attrib]]
if (is.numeric(ans))
return(ans)
if (is.logical(ans) && all(is.na(ans)))
return(as.integer(ans))
stop(wmsg("BioMart fatal data anomaly: ",
"\"", attrib, "\" attribute is not numeric"))
}
.generate_BioMart_data_anomaly_report <- function(error_type, bm_result, idx,
id_prefix, msg)
{
## Part 3.
tx_name_colname <- paste0(id_prefix, "transcript_id")
tx_name <- bm_result[[tx_name_colname]]
first_tx_names <- unique(tx_name[idx])
total_nb_tx <- length(first_tx_names)
first_three_only <- total_nb_tx > 3L
if (first_three_only)
first_tx_names <- first_tx_names[1:3]
bm_result <- S4Vectors:::extract_data_frame_rows(bm_result,
tx_name %in% first_tx_names)
bm_result0 <- bm_result[-match(tx_name_colname, names(bm_result))]
f <- factor(bm_result[[tx_name_colname]], levels=first_tx_names)
first_tx_tables <- split(bm_result0, f)
.DETAILS_INDENT <- " "
options(width=getOption("width")-nchar(.DETAILS_INDENT))
part3 <- lapply(seq_len(length(first_tx_tables)),
function(i) {
tx_table <- first_tx_tables[[i]]
if ("rank" %in% colnames(tx_table)) {
oo <- order(tx_table[["rank"]])
tx_table <-
S4Vectors:::extract_data_frame_rows(tx_table, oo)
} else {
rownames(tx_table) <- NULL
}
subtitle <- paste0(" ", i, ". Transcript ",
names(first_tx_tables)[i],
":")
details <- capture.output(print(tx_table))
c(subtitle, paste0(.DETAILS_INDENT, details))
})
options(width=getOption("width")+nchar(.DETAILS_INDENT))
part3 <- unlist(part3, use.names=FALSE)
if (first_three_only)
part3 <- c(paste(" (Showing only the first 3 out of",
total_nb_tx,
"transcripts.)"),
part3)
## Part 1.
part1 <- paste0(error_type, ": in the following transcripts, ")
## Part 2.
msg[length(msg)] <- paste0(msg[length(msg)], ".")
part2 <- paste0(" ", msg)
## Assemble the parts.
paste(c(part1, part2, part3), collapse="\n")
}
.stop_on_BioMart_data_anomaly <- function(bm_result, idx, id_prefix, msg)
{
msg <- .generate_BioMart_data_anomaly_report("BioMart fatal data anomaly",
bm_result, idx, id_prefix, msg)
new_length <- nchar(msg) + 5L
## 8170L seems to be the maximum possible value for the 'warning.length'
## option on my machine (R-2.15 r58124, 64-bit Ubuntu).
if (new_length > 8170L)
new_length <- 8170L
if (new_length >= getOption("warning.length")) {
old_length <- getOption("warning.length")
on.exit(options(warning.length=old_length))
options(warning.length=new_length)
}
stop(msg)
}
.warning_on_BioMart_data_anomaly <- function(bm_result, idx, id_prefix, msg)
{
msg <- .generate_BioMart_data_anomaly_report("BioMart data anomaly",
bm_result, idx, id_prefix, msg)
new_length <- nchar(msg) + 5L
## 8170L seems to be the maximum possible value for the 'warning.length'
## option on my machine (R-2.15 r58124, 64-bit Ubuntu).
if (new_length > 8170L)
new_length <- 8170L
if (new_length >= getOption("warning.length")) {
old_length <- getOption("warning.length")
on.exit(options(warning.length=old_length))
options(warning.length=new_length)
}
warning(msg)
}
.has_utr <- function(utr_start, utr_end, exon_start, exon_end,
what_utr, bm_result, id_prefix="ensembl_")
{
is_na <- is.na(utr_start)
if (!identical(is_na, is.na(utr_end)))
stop(wmsg("BioMart fatal data anomaly: ",
"NAs in \"", what_utr, "_utr_start\" attribute don't match ",
"NAs in \"", what_utr, "_utr_end\" attribute"))
idx <- which(utr_start > utr_end + 1L)
if (length(idx) != 0L) {
msg <- paste0("the ", what_utr, "' UTRs have a start > end + 1")
.stop_on_BioMart_data_anomaly(bm_result, idx, id_prefix, msg)
}
idx <- which(utr_start < exon_start | exon_end < utr_end)
if (length(idx) != 0L) {
msg <- paste0("the ", what_utr, "' UTRs ",
"are not within the exon limits")
.stop_on_BioMart_data_anomaly(bm_result, idx, id_prefix, msg)
}
!(is_na | utr_start == utr_end + 1L)
}
.extract_cds_ranges_from_C1 <- function(bm_result, id_prefix="ensembl_")
{
cds_start <- .extract_numeric_attrib(bm_result, "genomic_coding_start")
cds_end <- .extract_numeric_attrib(bm_result, "genomic_coding_end")
is_na <- is.na(cds_start)
if (!identical(is_na, is.na(cds_end)))
stop(wmsg("BioMart fatal data anomaly: ",
"NAs in \"genomic_coding_start\" attribute don't match ",
"NAs in \"genomic_coding_end\" attribute"))
## Exons with no CDS get a CDS of width 0.
no_cds_idx <- which(is_na)
exon_start <- bm_result[["exon_chrom_start"]]
cds_start[no_cds_idx] <- exon_start[no_cds_idx]
cds_end[no_cds_idx] <- cds_start[no_cds_idx] - 1L
IRanges(start=cds_start, end=cds_end)
}
### These errors in UTR representation are non fatal but trigger rejection of
### the corresponding transcripts with a warning.
.BIOMART_UTR_ERROR <- c(
"located on the + strand, \"5_utr_start\" must match \"exon_chrom_start\"",
"located on the + strand, \"3_utr_end\" must match \"exon_chrom_end\"",
"located on the - strand, \"3_utr_start\" must match \"exon_chrom_start\"",
"located on the - strand, \"5_utr_end\" must match \"exon_chrom_end\""
)
.warning_on_BioMart_utr_anomaly <- function(bm_result, idx, id_prefix,
utr_anomaly)
{
msg <- c(.BIOMART_UTR_ERROR[[utr_anomaly]],
" (these transcripts were dropped)")
.warning_on_BioMart_data_anomaly(bm_result, idx, id_prefix, msg)
}
.extract_cds_ranges_from_C2 <- function(bm_result, id_prefix="ensembl_")
{
strand <- bm_result[["strand"]]
if (!all(strand %in% c(1, -1)))
stop(wmsg("BioMart fatal data anomaly: ",
"\"strand\" attribute should be 1 or -1"))
cds_start <- exon_start <- bm_result[["exon_chrom_start"]]
cds_end <- exon_end <- bm_result[["exon_chrom_end"]]
utr_anomaly <- integer(nrow(bm_result))
utr5_start <- .extract_numeric_attrib(bm_result, "5_utr_start")
utr5_end <- .extract_numeric_attrib(bm_result, "5_utr_end")
utr3_start <- .extract_numeric_attrib(bm_result, "3_utr_start")
utr3_end <- .extract_numeric_attrib(bm_result, "3_utr_end")
has_utr5 <- .has_utr(utr5_start, utr5_end, exon_start, exon_end,
"5", bm_result, id_prefix)
has_utr3 <- .has_utr(utr3_start, utr3_end, exon_start, exon_end,
"3", bm_result, id_prefix)
idx <- which(strand == 1 & has_utr5)
bad_idx <- idx[utr5_start[idx] != exon_start[idx]]
if (length(bad_idx) != 0L)
.warning_on_BioMart_utr_anomaly(bm_result, bad_idx, id_prefix,
utr_anomaly[bad_idx] <- 1L)
cds_start[idx] <- utr5_end[idx] + 1L
idx <- which(strand == 1 & has_utr3)
bad_idx <- idx[utr3_end[idx] != exon_end[idx]]
if (length(bad_idx) != 0L)
.warning_on_BioMart_utr_anomaly(bm_result, bad_idx, id_prefix,
utr_anomaly[bad_idx] <- 2L)
cds_end[idx] <- utr3_start[idx] - 1L
idx <- which(strand == -1 & has_utr3)
bad_idx <- idx[utr3_start[idx] != exon_start[idx]]
if (length(bad_idx) != 0L)
.warning_on_BioMart_utr_anomaly(bm_result, bad_idx, id_prefix,
utr_anomaly[bad_idx] <- 3L)
cds_start[idx] <- utr3_end[idx] + 1L
idx <- which(strand == -1 & has_utr5)
bad_idx <- idx[utr5_end[idx] != exon_end[idx]]
if (length(bad_idx) != 0L)
.warning_on_BioMart_utr_anomaly(bm_result, bad_idx, id_prefix,
utr_anomaly[bad_idx] <- 4L)
cds_end[idx] <- utr5_start[idx] - 1L
## Exons with no CDS get a CDS of width 0.
cds_relative_start <- bm_result[["cds_start"]]
no_cds_idx <- which(is.na(cds_relative_start))
cds_end[no_cds_idx] <- cds_start[no_cds_idx] - 1L
ans <- IRanges(start=cds_start, end=cds_end)
mcols(ans) <- DataFrame(utr_anomaly=utr_anomaly)
ans
}
.check_cds <- function(cds_ranges, cds_width, bm_result, id_prefix="ensembl_")
{
idx <- which(width(cds_ranges) != cds_width)
if (length(idx) != 0L) {
msg <- c("the CDS/UTR genomic coordinates are inconsistent with the ",
"\"cds_start\" and \"cds_end\" attributes")
.warning_on_BioMart_data_anomaly(bm_result, idx, id_prefix, msg)
}
tx_name_colname <- paste0(id_prefix, "transcript_id")
tx_name <- bm_result[ , tx_name_colname]
cds_length2 <- sapply(split(width(cds_ranges), tx_name), sum)
cds_length2 <- cds_length2[as.character(tx_name)]
cds_length <- bm_result$cds_length
if (!is.null(cds_length)) {
idx <- which(cds_length2 != cds_length)
if (length(idx) != 0L) {
msg <- c("the CDS length inferred from the CDS/UTR genomic ",
"coordinates doesn't match the \"cds_length\" attribute")
.warning_on_BioMart_data_anomaly(bm_result, idx, id_prefix, msg)
}
}
## Too many transcripts in the ensembl/hsapiens_gene_ensembl dataset don't
## pass the sanity check below (20256 transcripts in Ensembl release 75).
## This makes makeTxDbFromBiomart() a little bit too noisy so we
## comment this out for now.
#idx <- which(cds_length2 %% 3L != 0L)
#if (length(idx) != 0L) {
# msg <- c("the CDS length inferred from the CDS/UTR genomic ",
# "coordinates is not a multiple of 3")
# .warning_on_BioMart_data_anomaly(bm_result, idx, id_prefix, msg)
#}
}
.extract_cds_ranges_from_bm_result <- function(bm_result, id_prefix="ensembl_")
{
if (nrow(bm_result) == 0L)
return(IRanges())
exon_start <- bm_result[["exon_chrom_start"]]
exon_end <- bm_result[["exon_chrom_end"]]
if (!is.numeric(exon_start) || !is.numeric(exon_end))
stop("BioMart data anomaly: \"exon_chrom_start\" and/or ",
"\"exon_chrom_end\" attributes are not numeric")
## BE AWARE that the "cds_start" and "cds_end" attributes that we get
## from BioMart are the CDS coordinates relative to the coding mRNA!
## See IMPORTANT NOTE ABOUT GROUP D1 in findCompatibleMarts.R for more
## information.
cds_relative_start <- .extract_numeric_attrib(bm_result, "cds_start")
cds_relative_end <- .extract_numeric_attrib(bm_result, "cds_end")
is_na <- is.na(cds_relative_start)
if (!identical(is_na, is.na(cds_relative_end)))
stop("BioMart data anomaly: ",
"NAs in \"cds_start\" attribute don't match ",
"NAs in \"cds_end\" attribute")
no_cds_idx <- which(is_na)
cds_width <- cds_relative_end - cds_relative_start + 1L
cds_width[no_cds_idx] <- 0L
C1_attribs <- recognizedBiomartAttribs(id_prefix)[["C1"]]
has_C1_attribs <- all(C1_attribs %in% colnames(bm_result))
C2_attribs <- recognizedBiomartAttribs(id_prefix)[["C2"]]
has_C2_attribs <- all(C2_attribs %in% colnames(bm_result))
if (has_C1_attribs)
ans1 <- .extract_cds_ranges_from_C1(bm_result, id_prefix)
if (has_C2_attribs) {
ans2 <- .extract_cds_ranges_from_C2(bm_result, id_prefix)
utr_anomaly <- mcols(ans2)$utr_anomaly
tx_name_colname <- paste0(id_prefix, "transcript_id")
tx_name <- bm_result[ , tx_name_colname]
invalid_tx <- unique(tx_name[utr_anomaly != 0L])
valid_tx_idx <- !(tx_name %in% invalid_tx)
if (has_C1_attribs) {
## Check that 'ans1' agrees with 'ans2'.
if (!identical(width(ans1)[valid_tx_idx],
width(ans2)[valid_tx_idx]))
stop(wmsg("BioMart fatal data anomaly: ",
"CDS genomic coordinates are inconsistent with ",
"UTR genomic coordinates"))
cds_idx <- which(valid_tx_idx & width(ans1) != 0L)
if (!identical(start(ans1)[cds_idx], start(ans2)[cds_idx]))
stop(wmsg("BioMart fatal data anomaly: ",
"CDS genomic coordinates are inconsistent with ",
"UTR genomic coordinates"))
}
ans1 <- ans2
} else {
valid_tx_idx <- seq_along(nrow(bm_result))
}
## More checking of the CDS of the "valid" transcripts ("valid" here means
## with no UTR anomalies).
.check_cds(ans1[valid_tx_idx], cds_width[valid_tx_idx],
S4Vectors:::extract_data_frame_rows(bm_result, valid_tx_idx),
id_prefix="ensembl_")
ans1
}
.make_cds_df_from_ranges <- function(cds_ranges)
{
no_cds_idx <- which(width(cds_ranges) == 0L)
cds_start <- start(cds_ranges)
cds_start[no_cds_idx] <- NA_integer_
cds_end <- end(cds_ranges)
cds_end[no_cds_idx] <- NA_integer_
ans <- data.frame(cds_start=cds_start, cds_end=cds_end)
utr_anomaly <- mcols(cds_ranges)$utr_anomaly
if (!is.null(utr_anomaly))
ans$utr_anomaly <- utr_anomaly
ans
}
.makeBiomartSplicings <- function(filter, mart, transcripts_tx_id,
recognized_attribs, id_prefix="ensembl_")
{
## Those are the strictly required fields.
splicings0 <- data.frame(
tx_id=integer(0),
exon_rank=integer(0),
exon_start=integer(0),
exon_end=integer(0)
)
if (length(transcripts_tx_id) == 0L)
return(splicings0)
message("Download and preprocess the 'splicings' data frame ... ",
appendLF=FALSE)
available_attribs <- listAttributes(mart)$name
has_group <- sapply(recognized_attribs[c("E2", "C1", "C2", "D1", "D2")],
function(attribs) all(attribs %in% available_attribs))
get_groups <- c("E1", names(has_group)[has_group])
attributes <- unlist(recognized_attribs[get_groups], use.names=FALSE)
bm_result <- .getBM2(attributes, filter, mart=mart, bmHeader=FALSE)
tx_name_colname <- paste0(id_prefix, "transcript_id")
tx_name <- bm_result[[tx_name_colname]]
splicings_tx_id <- transcripts_tx_id[tx_name]
exon_name_colname <- paste0(id_prefix, "exon_id")
splicings <- data.frame(
tx_id=splicings_tx_id,
exon_rank=bm_result$rank,
exon_name=bm_result[[exon_name_colname]],
exon_start=bm_result$exon_chrom_start,
exon_end=bm_result$exon_chrom_end
)
if ((has_group[['C1']] || has_group[['C2']]) && has_group[['D1']]) {
cds_ranges <- .extract_cds_ranges_from_bm_result(bm_result, id_prefix)
splicings <- cbind(splicings, .make_cds_df_from_ranges(cds_ranges))
}
message("OK")
splicings
}
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Download and preprocess the 'genes' data frame.
###
.makeBiomartGenes <- function(filter, mart,
transcripts_tx_id, recognized_attribs,
id_prefix="ensembl_")
{
message("Download and preprocess the 'genes' data frame ... ",
appendLF=FALSE)
attributes <- c(recognized_attribs[['G']],
paste0(id_prefix, "transcript_id"))
bm_result <- .getBM2(attributes, filter, mart=mart, bmHeader=FALSE)
tx_name_colname <- paste0(id_prefix, "transcript_id")
gene_id_colname <- paste0(id_prefix, "gene_id")
tx_name <- bm_result[[tx_name_colname]]
gene_id <- bm_result[[gene_id_colname]]
keep_idx <- which(tx_name %in% names(transcripts_tx_id))
tx_id <- transcripts_tx_id[tx_name[keep_idx]]
gene_id <- gene_id[keep_idx]
message("OK")
data.frame(
tx_id=tx_id,
gene_id=gene_id
)
}
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Prepare the 'metadata' data frame.
###
.prepareBiomartMetadata <- function(mart, is_full_dataset, host, port,
taxonomyId, miRBaseBuild)
{
message("Prepare the 'metadata' data frame ... ",
appendLF=FALSE)
biomart <- biomaRt:::martBM(mart)
dataset <- biomaRt:::martDataset(mart)
mart_url <- biomaRt:::martHost(mart)
mart_url <- sub("^[^/]+//", "", mart_url)
mart_url <- unlist(strsplit(mart_url, "/"))[1]
db_version <- .getBiomartDbVersion(mart, host, port, biomart)
is_plants_mart <- tolower(substr(biomart, 1, 11)) == "plants_mart"
datasets <- listDatasets(mart)
dataset_rowidx <- which(as.character(datasets$dataset) == dataset)
## This should never happen (the earlier call to useMart() would have
## failed in the first place).
if (length(dataset_rowidx) != 1L)
stop(wmsg("the BioMart database \"", biomaRt:::martBM(mart),
"\" has no (or more than one) \"", dataset, "\" datasets"))
description <- as.character(datasets$dataset)[dataset_rowidx]
dataset_version <- as.character(datasets$version)[dataset_rowidx]
ensembl_release <- .extractEnsemblReleaseFromDbVersion(db_version)
if(is_plants_mart){
organism <- get_organism_from_Ensembl_Mart_dataset(description,
release=ensembl_release, url=.ENSEMBL_PLANTS.CURRENT_MYSQL_URL)
}else{
organism <- get_organism_from_Ensembl_Mart_dataset(description,
release=ensembl_release)
}
if(is.na(taxonomyId)){
taxonomyId <- GenomeInfoDb:::.taxonomyId(organism)
}else{
GenomeInfoDb:::.checkForAValidTaxonomyId(taxonomyId)
}
if (!isSingleStringOrNA(miRBaseBuild))
stop(wmsg("'miRBaseBuild' must be a a single string or NA"))
message("OK")
data.frame(
name=c("Data source",
"Organism",
"Taxonomy ID",
"Resource URL",
"BioMart database",
"BioMart database version",
"BioMart dataset",
"BioMart dataset description",
"BioMart dataset version",
"Full dataset",
"miRBase build ID"),
value=c("BioMart",
organism,
taxonomyId,
mart_url,
biomart,
db_version,
dataset,
description,
dataset_version,
ifelse(is_full_dataset, "yes", "no"),
miRBaseBuild)
)
}
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### makeTxDbFromBiomart()
###
makeTxDbFromBiomart <- function(biomart="ENSEMBL_MART_ENSEMBL",
dataset="hsapiens_gene_ensembl",
transcript_ids=NULL,
circ_seqs=DEFAULT_CIRC_SEQS,
filter=NULL,
id_prefix="ensembl_",
host="www.ensembl.org",
port=80,
taxonomyId=NA,
miRBaseBuild=NA)
{
mart <- .useMart2(biomart=biomart, dataset=dataset, host=host, port=port)
id_prefix <- .normarg_id_prefix(id_prefix)
filter <- .add_tx_id_filter(filter, transcript_ids, id_prefix)
valid_filter_names <- listFilters(mart, what="name")
invalid_filter_names <- setdiff(names(filter), valid_filter_names)
if (length(invalid_filter_names) != 0L) {
in1string <- paste0(invalid_filter_names, collapse=", ")
stop(wmsg("Invalid filter name(s): ", in1string,
"\n\nPlease use the listFilters() function from the ",
"biomaRt package to get valid filter names."))
}
is_full_dataset <- length(filter) == 0L
recognized_attribs <- recognizedBiomartAttribs(id_prefix)
transcripts <- .makeBiomartTranscripts(filter, mart,
transcript_ids,
recognized_attribs,
id_prefix)
transcripts_tx_id <- transcripts$tx_id
names(transcripts_tx_id) <- transcripts$tx_name
chrominfo <- .makeBiomartChrominfo(mart,
extra_seqnames=transcripts$tx_chrom,
circ_seqs=circ_seqs,
host, port)
if (!is_full_dataset) {
keep_idx <- which(chrominfo[ , "chrom"] %in% transcripts$tx_chrom)
chrominfo <- S4Vectors:::extract_data_frame_rows(chrominfo, keep_idx)
}
splicings <- .makeBiomartSplicings(filter, mart,
transcripts_tx_id,
recognized_attribs,
id_prefix=id_prefix)
## Drop transcripts with UTR anomalies.
utr_anomaly <- splicings$utr_anomaly
if (!is.null(utr_anomaly)) {
invalid_tx <- unique(splicings[utr_anomaly != 0L, "tx_id"])
if (length(invalid_tx) != 0L) {
message("Drop transcripts with UTR anomalies (",
length(invalid_tx), " transcripts) ... ",
appendLF=FALSE)
keep_idx1 <- !(transcripts$tx_id %in% invalid_tx)
transcripts <- S4Vectors:::extract_data_frame_rows(transcripts,
keep_idx1)
transcripts_tx_id <- transcripts_tx_id[keep_idx1]
keep_idx2 <- !(splicings$tx_id %in% invalid_tx)
splicings <- S4Vectors:::extract_data_frame_rows(splicings,
keep_idx2)
message("OK")
}
splicings$utr_anomaly <- NULL
}
genes <- .makeBiomartGenes(filter, mart, transcripts_tx_id,
recognized_attribs, id_prefix)
metadata <- .prepareBiomartMetadata(mart, is_full_dataset,
host, port, taxonomyId, miRBaseBuild)
message("Make the TxDb object ... ", appendLF=FALSE)
txdb <- makeTxDb(transcripts, splicings,
genes=genes, chrominfo=chrominfo,
metadata=metadata, reassign.ids=TRUE)
message("OK")
txdb
}
|
most_significant <- function(x){
p_value <- c()
for (i in 1:length(colnames(x))){
p_value <- c(p_value, chisq.test(table(factor(x[,i])))$p.value)
}
min_position <- which(p_value == min(p_value))
return (colnames(x)[min_position])
}
|
/most_significant.R
|
no_license
|
anastasiiakhil/Statistics
|
R
| false
| false
| 247
|
r
|
most_significant <- function(x){
p_value <- c()
for (i in 1:length(colnames(x))){
p_value <- c(p_value, chisq.test(table(factor(x[,i])))$p.value)
}
min_position <- which(p_value == min(p_value))
return (colnames(x)[min_position])
}
|
#' Internal API for query objects.
#'
#' @param x `repository` object or an object to be turned into a `query`.
#' @rdname query-internal
new_query <- function (x) {
stopifnot(is_object_store(x))
structure(list(store = x,
filter = list(),
arrange = list(),
top_n = NULL,
type = 'raw'),
class = 'query')
}
#' @param type make `x` be of type `type`.
#' @rdname query-internal
set_type <- function (x, type) {
x$type <- type
x
}
# TODO a general query man page mentioning all categories of functions + specific pages (query, read, etc.)
#' Query the repository of artifacts.
#'
#' @param x Object to be tested (`is_query()`, `is_artifacts()`, `is_commits()`),
#' printed, cast as `query` (`as_query()`, `as_artifacts()`,
#' `as_commits()`) or querried (verbs).
#'
#' @rdname query
#' @name query
NULL
#' @description `as_query` creates a general `query`.
#' @export
#' @rdname query
as_query <- function (x) {
if (is_query(x)) {
return(x)
}
if (is_object_store(x)) {
return(new_query(x))
}
if (is_repository(x)) {
return(new_query(x$store))
}
abort(glue("cannot coerce class '{first(class(x))}' to query"))
}
#' @description `reset_query` drops all filters and sets type to `"raw"`.
#' @export
#' @rdname query
reset_query <- function (x) {
stopifnot(is_query(x))
as_query(x$store)
}
#' @return `TRUE` if `x` inherits from `"query"`.
#'
#' @export
#' @rdname query
is_query <- function (x) inherits(x, 'query')
is_raw <- function (x) is_query(x) && identical(x$type, 'raw')
#' @param indent string prepended to each line.
#' @inheritDotParams base::format
#'
#' @importFrom rlang expr_deparse get_expr
#' @export
#' @rdname query
format.query <- function (x, indent = ' ', ...) {
quos_text <- function (x) {
join(map_chr(x, function (f) expr_deparse(get_expr(f))), ', ')
}
# describe the source repo
lines <- new_vector()
lines$push_back(paste0('<repository:', toString(x$store), '>'))
# print the full query
for (part in c('select', 'filter', 'arrange', 'top_n', 'summarise')) {
if (length(x[[part]])) {
lines$push_back(paste0(part, '(', quos_text(x[[part]]), ')'))
}
}
lines <- stri_paste(indent, lines$data())
join(lines, ' %>%\n ')
}
#' @export
#' @rdname query
print.query <- function (x, ...) {
cat0(format(x), '\n')
invisible(x)
}
#' @param .data `query` object.
#'
#' @name query
#' @rdname query
NULL
#' @importFrom rlang quos
#' @export
#' @rdname query
filter.query <- function (.data, ...) {
dots <- quos(...)
.data$filter <- c(.data$filter, dots)
.data
}
#' @importFrom rlang quos quo
#' @export
#' @rdname query
arrange.query <- function (.data, ...) {
dots <- quos(...)
.data$arrange <- c(.data$arrange, dots)
.data
}
#' @inheritParams top_n
#'
#' @importFrom rlang quos quo abort
#' @export
#' @rdname query
top_n.query <- function (.data, n, wt) {
if (!missing(wt)) {
abort("wt not yet supported in top_n")
}
if (missing(n) || !is.numeric(n) || isFALSE(n > 0)) {
abort("n has to be a non-negative number")
}
.data$top_n <- n
.data
}
#' @export
#' @rdname query
#' @importFrom tibble tibble
summarise.query <- function (.data, ...) {
expr <- quos(...)
if (!length(expr)) {
abort("empty summary not supported")
}
if (!is_all_named(expr)) {
abort("all summary expressions need to be named")
}
if (!only_n_summary(expr)) {
abort("only the n() summary is supported")
}
n <- length(match_ids(.data))
with_names(tibble(n), names(expr))
}
# A stop-gap function: check if the only summary is n() and if so, returns TRUE.
# If there is no summary at all, returns FALSE.
# If there's an unsupported summary, throws an exception.
#' @importFrom rlang abort quo_squash
only_n_summary <- function (expr) {
if (!length(expr)) return(FALSE)
i <- map_lgl(expr, function (s) {
e <- quo_squash(s)
is.call(e) && identical(e, quote(n()))
})
all(i)
}
# --- old code ---------------------------------------------------------
#' @importFrom rlang abort caller_env expr_text eval_tidy quos quo_get_expr
#' @rdname query
update <- function (.data, ...) {
stopifnot(is_query(.data))
stopif(length(.data$select), length(.data$summarise), length(.data$arrange), length(.data$top_n))
quos <- quos(...)
e <- caller_env()
ids <- match_ids(.data)
lapply(ids, function (id) {
tags <- storage::os_read_tags(.data$store, as_id(id))
newt <- unlist(lapply(seq_along(quos), function (i) {
n <- nth(names(quos), i)
q <- nth(quos, i)
if (nchar(n)) {
return(with_names(list(eval_tidy(q, tags, e)), n))
}
update_tag_values(quo_get_expr(q), tags)
}), recursive = FALSE)
storage::os_update_tags(.data$store, as_id(id), combine(newt, tags))
})
}
# --- internal ---------------------------------------------------------
#' @importFrom rlang quo
update_tag_values <- function (expr, tags) {
what <- nth(expr, 1)
stopifnot(identical(what, quote(append)) || identical(what, quote(remove)))
where <- as.character(nth(expr, 2))
if (!has_name(tags, where)) tags[[where]] <- character()
e <- new.env(parent = emptyenv())
e$append <- function (where, what) union(where, what)
e$remove <- function (where, what) setdiff(where, what)
with_names(list(eval_tidy(expr, tags, e)), where)
}
|
/R/query.R
|
permissive
|
lbartnik/repository
|
R
| false
| false
| 5,446
|
r
|
#' Internal API for query objects.
#'
#' @param x `repository` object or an object to be turned into a `query`.
#' @rdname query-internal
new_query <- function (x) {
stopifnot(is_object_store(x))
structure(list(store = x,
filter = list(),
arrange = list(),
top_n = NULL,
type = 'raw'),
class = 'query')
}
#' @param type make `x` be of type `type`.
#' @rdname query-internal
set_type <- function (x, type) {
x$type <- type
x
}
# TODO a general query man page mentioning all categories of functions + specific pages (query, read, etc.)
#' Query the repository of artifacts.
#'
#' @param x Object to be tested (`is_query()`, `is_artifacts()`, `is_commits()`),
#' printed, cast as `query` (`as_query()`, `as_artifacts()`,
#' `as_commits()`) or querried (verbs).
#'
#' @rdname query
#' @name query
NULL
#' @description `as_query` creates a general `query`.
#' @export
#' @rdname query
as_query <- function (x) {
if (is_query(x)) {
return(x)
}
if (is_object_store(x)) {
return(new_query(x))
}
if (is_repository(x)) {
return(new_query(x$store))
}
abort(glue("cannot coerce class '{first(class(x))}' to query"))
}
#' @description `reset_query` drops all filters and sets type to `"raw"`.
#' @export
#' @rdname query
reset_query <- function (x) {
stopifnot(is_query(x))
as_query(x$store)
}
#' @return `TRUE` if `x` inherits from `"query"`.
#'
#' @export
#' @rdname query
is_query <- function (x) inherits(x, 'query')
is_raw <- function (x) is_query(x) && identical(x$type, 'raw')
#' @param indent string prepended to each line.
#' @inheritDotParams base::format
#'
#' @importFrom rlang expr_deparse get_expr
#' @export
#' @rdname query
format.query <- function (x, indent = ' ', ...) {
quos_text <- function (x) {
join(map_chr(x, function (f) expr_deparse(get_expr(f))), ', ')
}
# describe the source repo
lines <- new_vector()
lines$push_back(paste0('<repository:', toString(x$store), '>'))
# print the full query
for (part in c('select', 'filter', 'arrange', 'top_n', 'summarise')) {
if (length(x[[part]])) {
lines$push_back(paste0(part, '(', quos_text(x[[part]]), ')'))
}
}
lines <- stri_paste(indent, lines$data())
join(lines, ' %>%\n ')
}
#' @export
#' @rdname query
print.query <- function (x, ...) {
cat0(format(x), '\n')
invisible(x)
}
#' @param .data `query` object.
#'
#' @name query
#' @rdname query
NULL
#' @importFrom rlang quos
#' @export
#' @rdname query
filter.query <- function (.data, ...) {
dots <- quos(...)
.data$filter <- c(.data$filter, dots)
.data
}
#' @importFrom rlang quos quo
#' @export
#' @rdname query
arrange.query <- function (.data, ...) {
dots <- quos(...)
.data$arrange <- c(.data$arrange, dots)
.data
}
#' @inheritParams top_n
#'
#' @importFrom rlang quos quo abort
#' @export
#' @rdname query
top_n.query <- function (.data, n, wt) {
if (!missing(wt)) {
abort("wt not yet supported in top_n")
}
if (missing(n) || !is.numeric(n) || isFALSE(n > 0)) {
abort("n has to be a non-negative number")
}
.data$top_n <- n
.data
}
#' @export
#' @rdname query
#' @importFrom tibble tibble
summarise.query <- function (.data, ...) {
expr <- quos(...)
if (!length(expr)) {
abort("empty summary not supported")
}
if (!is_all_named(expr)) {
abort("all summary expressions need to be named")
}
if (!only_n_summary(expr)) {
abort("only the n() summary is supported")
}
n <- length(match_ids(.data))
with_names(tibble(n), names(expr))
}
# A stop-gap function: check if the only summary is n() and if so, returns TRUE.
# If there is no summary at all, returns FALSE.
# If there's an unsupported summary, throws an exception.
#' @importFrom rlang abort quo_squash
only_n_summary <- function (expr) {
if (!length(expr)) return(FALSE)
i <- map_lgl(expr, function (s) {
e <- quo_squash(s)
is.call(e) && identical(e, quote(n()))
})
all(i)
}
# --- old code ---------------------------------------------------------
#' @importFrom rlang abort caller_env expr_text eval_tidy quos quo_get_expr
#' @rdname query
update <- function (.data, ...) {
stopifnot(is_query(.data))
stopif(length(.data$select), length(.data$summarise), length(.data$arrange), length(.data$top_n))
quos <- quos(...)
e <- caller_env()
ids <- match_ids(.data)
lapply(ids, function (id) {
tags <- storage::os_read_tags(.data$store, as_id(id))
newt <- unlist(lapply(seq_along(quos), function (i) {
n <- nth(names(quos), i)
q <- nth(quos, i)
if (nchar(n)) {
return(with_names(list(eval_tidy(q, tags, e)), n))
}
update_tag_values(quo_get_expr(q), tags)
}), recursive = FALSE)
storage::os_update_tags(.data$store, as_id(id), combine(newt, tags))
})
}
# --- internal ---------------------------------------------------------
#' @importFrom rlang quo
update_tag_values <- function (expr, tags) {
what <- nth(expr, 1)
stopifnot(identical(what, quote(append)) || identical(what, quote(remove)))
where <- as.character(nth(expr, 2))
if (!has_name(tags, where)) tags[[where]] <- character()
e <- new.env(parent = emptyenv())
e$append <- function (where, what) union(where, what)
e$remove <- function (where, what) setdiff(where, what)
with_names(list(eval_tidy(expr, tags, e)), where)
}
|
###########################################################################/**
# @set class=CBS
# @RdocMethod getSmoothLocusData
#
# @title "Gets smoothed locus-level data"
#
# \description{
# @get "title".
# }
#
# @synopsis
#
# \arguments{
# \item{fit}{An @see "CBS" object.}
# \item{by}{A @numeric scalar specifying the bin size.}
# \item{...}{Not used.}
# }
#
# \value{
# Returns a @data.frame where the
# first three columns are 'chromosome', 'x' (position),
# and 'count' (number of loci average over for the given bin),
# and the remaining ones are the smoothed locus-level data.
# }
#
# @author "HB"
#
# \seealso{
# @seeclass
# }
#
# @keyword internal
#*/###########################################################################
setMethodS3("getSmoothLocusData", "CBS", function(fit, by, ...) {
# Argument 'by':
by <- Arguments$getNumeric(by, range=c(0,Inf));
chromosomes <- getChromosomes(fit);
data <- getLocusData(fit);
chromosome <- NULL; rm(list="chromosome"); # To please R CMD check
dataS <- NULL;
for (kk in seq_along(chromosomes)) {
chr <- chromosomes[kk];
dataT <- subset(data, chromosome == chr);
x <- dataT$x;
y <- dataT$y;
rx <- range(x, na.rm=TRUE);
bx <- seq(from=rx[1], to=rx[2], by=by);
xS <- bx[-1] - by/2;
yS <- binMeans(y=y, x=x, bx=bx);
count <- attr(yS, "count");
yS[count == 0L] <- NA_real_;
attr(yS, "count") <- NULL;
dataTS <- data.frame(chromosome=chr, x=xS, count=count, y=yS);
dataS <- rbind(dataS, dataTS);
} # for (kk ...)
dataS;
}, protected=TRUE) # getSmoothLocusData()
############################################################################
# HISTORY:
# 2013-10-09
# o Now getSmoothLocusData() for CBS also returns column 'count'.
# 2013-04-18
# o Added getSmoothLocusData() for CBS.
# o Created.
############################################################################
|
/PSCBS/R/CBS.SMOOTH.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 1,914
|
r
|
###########################################################################/**
# @set class=CBS
# @RdocMethod getSmoothLocusData
#
# @title "Gets smoothed locus-level data"
#
# \description{
# @get "title".
# }
#
# @synopsis
#
# \arguments{
# \item{fit}{An @see "CBS" object.}
# \item{by}{A @numeric scalar specifying the bin size.}
# \item{...}{Not used.}
# }
#
# \value{
# Returns a @data.frame where the
# first three columns are 'chromosome', 'x' (position),
# and 'count' (number of loci average over for the given bin),
# and the remaining ones are the smoothed locus-level data.
# }
#
# @author "HB"
#
# \seealso{
# @seeclass
# }
#
# @keyword internal
#*/###########################################################################
setMethodS3("getSmoothLocusData", "CBS", function(fit, by, ...) {
# Argument 'by':
by <- Arguments$getNumeric(by, range=c(0,Inf));
chromosomes <- getChromosomes(fit);
data <- getLocusData(fit);
chromosome <- NULL; rm(list="chromosome"); # To please R CMD check
dataS <- NULL;
for (kk in seq_along(chromosomes)) {
chr <- chromosomes[kk];
dataT <- subset(data, chromosome == chr);
x <- dataT$x;
y <- dataT$y;
rx <- range(x, na.rm=TRUE);
bx <- seq(from=rx[1], to=rx[2], by=by);
xS <- bx[-1] - by/2;
yS <- binMeans(y=y, x=x, bx=bx);
count <- attr(yS, "count");
yS[count == 0L] <- NA_real_;
attr(yS, "count") <- NULL;
dataTS <- data.frame(chromosome=chr, x=xS, count=count, y=yS);
dataS <- rbind(dataS, dataTS);
} # for (kk ...)
dataS;
}, protected=TRUE) # getSmoothLocusData()
############################################################################
# HISTORY:
# 2013-10-09
# o Now getSmoothLocusData() for CBS also returns column 'count'.
# 2013-04-18
# o Added getSmoothLocusData() for CBS.
# o Created.
############################################################################
|
output$ui_dynamic_trace_customize <- renderUI({
bsCollapse(
bsCollapsePanel(title = "View Options", id = "dynamic_trace_collapse",
fluidRow(
column(3, numericInput("dynamic_trace_chain", label = h5(style = "color: white;", "Chain (0 = all)"), min = 0, max = object@nChains, step = 1, value = 0)),
column(3, radioButtons("dynamic_trace_stack", label = h5("Lines"), choices = list(Normal = "normal", Stacked = "stacked"), selected = "normal", inline = FALSE)),
column(3, radioButtons("dynamic_trace_grid", label = h5("Grid"), choices = list(Show = "show", Hide = "hide"), selected = "hide", inline = FALSE)),
column(3, radioButtons("dynamic_trace_warmup_shade", label = h5("Warmup shading"), choices = list(Show = "show", Hide = "hide"), selected = "show", inline = FALSE))
),
hr(),
h5(style = "color: white;", "Controlling the dynamic trace plot"),
helpText(style = "color: white; font-size: 12px;", "Use your mouse to highlight areas in the plot to zoom into. To zoom back out just double-click.",
"You can also use the range selector below the graph for panning and zooming.",
"The number in the small black box in the bottom left corner controls the", em("roll period."),
"If you specify a roll period of N the resulting graph will be a moving average,",
"with each plotted point representing the average of N points in the data.")
)
)
})
|
/assets/projects/BayesianIRT/shinyStan_for_shinyapps/server_files/dynamic_ui/ui_dynamic_trace_customize.R
|
permissive
|
rfarouni/rfarouni.github.io
|
R
| false
| false
| 1,658
|
r
|
output$ui_dynamic_trace_customize <- renderUI({
bsCollapse(
bsCollapsePanel(title = "View Options", id = "dynamic_trace_collapse",
fluidRow(
column(3, numericInput("dynamic_trace_chain", label = h5(style = "color: white;", "Chain (0 = all)"), min = 0, max = object@nChains, step = 1, value = 0)),
column(3, radioButtons("dynamic_trace_stack", label = h5("Lines"), choices = list(Normal = "normal", Stacked = "stacked"), selected = "normal", inline = FALSE)),
column(3, radioButtons("dynamic_trace_grid", label = h5("Grid"), choices = list(Show = "show", Hide = "hide"), selected = "hide", inline = FALSE)),
column(3, radioButtons("dynamic_trace_warmup_shade", label = h5("Warmup shading"), choices = list(Show = "show", Hide = "hide"), selected = "show", inline = FALSE))
),
hr(),
h5(style = "color: white;", "Controlling the dynamic trace plot"),
helpText(style = "color: white; font-size: 12px;", "Use your mouse to highlight areas in the plot to zoom into. To zoom back out just double-click.",
"You can also use the range selector below the graph for panning and zooming.",
"The number in the small black box in the bottom left corner controls the", em("roll period."),
"If you specify a roll period of N the resulting graph will be a moving average,",
"with each plotted point representing the average of N points in the data.")
)
)
})
|
#
# BAN400 - R PROGRAMMING FOR DATA SCIENCE
# LECTURE: MAKING MAPS
#
# In this lecture we will look at various techniques for making maps in R. We
# will start straight away by looking at some of the complicating factors that
# may arise when working with this kind of data, and then go into some simpler
# examples after that, to show that it does not always have to be all that
# complicated.
# EXAMPLE 1: The 1854 cholera outbreak ---------
#
# This example is presented in "Modern Data Science with R" by Baumer, Kaplan
# and Horton. The sp and rgdal-packages have been replaced by the sf-package.
library(mdsr) # Companion R package to the book, containing data
library(sf) # For spatial data, co-authoured by NHH Prof. Roger Bivand
library(ggmap) # For drawing static maps as ggplots
library(tidyverse) # Data wrangling etc
plot(CholeraDeaths) # Simple plot of the data
# When working with spatial data we typically need to deal with shape-files.
# These files are more complicated than what we are used to, and we need
# specialized functions to read them. Administrative units (countries, regions
# etc) typically publish shapefiles for their borders. This is not a simple
# format though.
# Download the file "rtwilson.com/downloads/SnowGIS_SHP.zip" and extract to your
# working directory. This is the cholera data as shapefiles:
folder <- "SnowGIS_SHP/SnowGIS_SHP" # The folder containing the shapefiles.
list.files(folder) # List the files in the folders
st_layers(folder) # List the "layers" in this folder (sets
# of shapefiles).
# We will begin by loading in the "Cholera_Deaths"-layer
ColeraDeaths <- read_sf(folder, layer = "Cholera_Deaths")
summary(ColeraDeaths)
# We have 250 spatial points, and for each point we have an ID-column (that
# seems to only contain zeros) as well as a Count column that contains the
# number of victims at that address. The Geomety column contains the definitions
# of the points, but this could also be the circumference of a country for
# example.
# Extract the coordinates of the points and plot in a coordinate system:
cholera_coords <- as_tibble(st_coordinates(ColeraDeaths))
ggplot(cholera_coords) +
geom_point(aes(x = X, y = Y)) +
coord_quickmap()
# This is still just a very basic plot of the locations of the points in our
# data set on a coordinate system with some sort of logintude and latitude on
# the axes. We want to overlay this on a map. We can download this map as a
# ggplot using the ggmap-package. In order to do that, we need to specify the
# square that we want to download, but note that the coordinates in the data is
# not in the lat/lon-format:
cholera_coords
# The unit here is meters, and not the usual degree/minute/second format. We
# must transform the coordinates, but from what? We don't always know, because
# there are many possible projections (read
# https://en.wikipedia.org/wiki/Map_projection if you want, but this is a very
# deep rabbit hole).
# The book that presents this example lists three commonly used projections:
# - EPSG:4326 (Also known as WGS84), standard for GPS systems and Google earth.
# - EPSG:3857 Used by OpenStreetMap, Google Maps and other services.
# - EPSG:27700 Often used in Britain.
# The map-tiles that we will download later however come in the EPSG:4326-format,
# so let us try to convert the cholera data to that format:
cholera_latlon <-
CholeraDeaths %>%
st_transform(st_crs("+init=epsg:4326")) %>%
st_coordinates %>%
as_tibble %>%
mutate(Count = ColeraDeaths$Count) # Add the counts back in to the data
# Define the box and download map:
london <- make_bbox(cholera_latlon$X,
cholera_latlon$Y,
f = .05)
m <- get_map(london, zoom = 17, source = "osm")
ggmap(m)
# Add the cholera data to the map:
ggmap(m) +
geom_point(aes(x = X, y = Y, size = Count), data = as_tibble(cholera_latlon))
# The points are a little bit off, unfortunately, and this is because we have
# not figured out what kind of coordinate system the original data has. In fact,
# in this case the data set does not come with information about which
# coordinate system was used to encode this data. We have information that this
# in fact is in the EPSG:27700-format, but this is something that we have to
# figure our from the source every time.
# Check the system; no info unfortunately
st_crs(CholeraDeaths)$input
# Set coordinate system fisrst, then transform and plot.
cholera_latlon <-
CholeraDeaths %>%
st_set_crs("+init=epsg:27700") %>%
st_transform(st_crs("+init=epsg:4326")) %>%
st_coordinates %>%
as_tibble %>%
mutate(Count = ColeraDeaths$Count)
london <- make_bbox(cholera_latlon$X,
cholera_latlon$Y,
f = .05)
m <- get_map(london, zoom = 17, source = "osm")
ggmap(m) +
geom_point(aes(x = X, y = Y, size = Count), data = cholera_latlon)
# Voila!
# EXAMPLE 2: Water sources in Africa --------------
# These packages contains shapefiles for the countries of the world (as well as a lot of other information about the countries)
library(rnaturalearth)
library(rnaturalearthdata)
library(rgeos)
# This is a data set that is published on the "Tidy Tuesday"-project, a Github
# repository that publishes a new data set every week for the online data
# science community to analyze. This data set contains information on water sources in the world, but mainly in Africa.
water <- read_csv("https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-05-04/water.csv")
# Here, we make a data frame for all the countries of the world. This requires all of the three packages above to be loaded.
world <- ne_countries(scale = "medium", returnclass = "sf")
# Let us concentrate on just one country, we filter out all other
tanzania <- world[world$name == "Tanzania",]
# We do the same for the water sources data. When making the plots later we will see that there are a couple of strange data points with latitude > 60. These are obviously not in Tanzania, so just filter them out.
tan_data <-
water %>%
filter(country_name == "Tanzania") %>%
filter(lat_deg < 60) # Some weird data points
# We make a simple plot of the borders of the country using the plotting method geom_sf() that is especially built for these kind of shape-objects.
ggplot(st_geometry(tanzania)) +
geom_sf()
# Add the water sources in the normal way. The coordinate systems appears to be
# compatible in this case, and the figure is in any case not so sensitive to
# such discrepancies when we are working on such a large area, and not on the
# street level as we did above.
ggplot(st_geometry(tanzania)) +
geom_sf() +
geom_point(aes(x = lon_deg, y = lat_deg),
data = tan_data)
# Make a nice map. I have used the fira-theme, becuase it is very nice. You need
# to install this yourself by following the link below, and that also requires
# the installation of a font. I you want to use another theme, replace (or take
# out) theme_fira() and scale_color_fira() below.
library(firatheme) # My favourite ggplot theme: https://github.com/vankesteren/firatheme
ggplot(st_geometry(tanzania)) +
geom_sf(fill = "#00000050") +
geom_point(aes(x = lon_deg, y = lat_deg, colour = water_source),
data = tan_data) +
xlab("") +
ylab("") +
labs(colour = "") +
ggtitle("Water sources in Tanzania") +
theme_fira() +
scale_colour_fira(na.value = "darkred") +
theme(axis.line = element_blank(),
panel.grid.major = element_line(colour = "#00000020",
inherit.blank = FALSE),
axis.text = element_text(colour = "#00000050"))
# Downloaded shapefiles for other features of Tanzania here:
# https://mapcruzin.com/free-tanzania-arcgis-maps-shapefiles.htm
# (Just made a Google search for Tanzania shapefiles). Read them in in the same way as for the London-example. The places-file includes a column on type, so we filter it down to just the major cities. We make the coordinates as separate columns and plot these "manually" so that we can plot the names of the cities as well as indicating their positions:
tan_roads <- read_sf("tanzania_roads")
tan_cities <-
read_sf("tanzania_places") %>%
filter(type == "city") %>%
select(name, geometry) %>%
drop_na() %>%
mutate(X = st_coordinates(.$geometry)[,1]) %>%
mutate(Y = st_coordinates(.$geometry)[,2])
tan_waterways <- read_sf("tanzania_waterways")
# We add the roads, waterways and cities to the map. Takes quite a lot of time
# to create, but the result is really nice!
ggplot(st_geometry(tanzania)) +
geom_sf(fill = "#00000050") +
geom_point(aes(x = lon_deg, y = lat_deg, colour = water_source),
data = tan_data,
alpha = .5) +
xlab("") +
ylab("") +
labs(colour = "") +
ggtitle("Water sources in Tanzania") +
theme_fira() +
scale_colour_fira(na.value = "darkred") +
theme(axis.line = element_blank(),
panel.grid.major = element_line(colour = "#00000020",
inherit.blank = FALSE),
axis.text = element_text(colour = "#00000050")) +
geom_sf(data = st_geometry(tan_roads), colour = "#00000050") +
geom_point(aes(x = X, y = Y), data = tan_cities, size = 2) +
geom_text(aes(x = X, y = Y, label = name),
data = tan_cities,
nudge_y = 0.3) +
geom_sf(data = st_geometry(tan_waterways), colour = "darkblue", alpha = .3)
# EXAMPLE 3: Water sources in Africa, continued ------------
# Another important type of plots is to color countries according to some
# variable. We have all seen this kind of plots in the pandemic.
# We start by filtering the "world" data frame above to include the countries of
# Africa, and the information about this in contained in the "continent"
# variable. We need to join the data on the geometry of the countries with the
# water data later, so we need the country names to match exactly. A bit further
# down we have an anti_join() for finding mismatches, and we fix them here with
# a recode:
africa <-
world %>%
filter(continent == "Africa") %>%
select(admin, geometry) %>%
mutate(admin = recode(.$admin,
"Democratic Republic of the Congo" = "Congo - Kinshasa",
"Republic of Congo" = "Congo - Brazzaville",
"United Republic of Tanzania" = "Tanzania")) %>%
rename(country_name = admin)
# We make a basic plot of the countries and see that it looks reasonable:
ggplot(africa, aes(geometry = geometry)) +
geom_sf()
# There is a binary variable in the water source data that indicates whether
# there is water available at the time of the visit. Let us calculate the share
# of active water sources for each country:
water_share <-
water %>%
group_by(country_name) %>%
summarise(share_status = mean(status_id == "y"))
# We use the anti_jon to chech which countries in the water-data that does not
# have matches in the africa data. Identifies that the two Congos and Tanzania
# were spelled differently, fixed that above when filtering down the africa
# data.
anti_join(water_share, africa)
# We join, first a full join to get both the water data and the geometry data in the dame column, and then a semi-join to filter out all countries that are not in the Africa data (there are a few non-African countries in the water data).
water_africa <-
full_join(water_share, africa) %>%
semi_join(africa)
# Basic plot, where we map the fill-aesthetic to the share-variable:
ggplot(water_africa) +
geom_sf(aes(geometry = geometry, fill = share_status))
# Make a prettier plot. Again, drop the firatheme if you have not bothered to
# install it.
ggplot(water_africa) +
geom_sf(aes(geometry = geometry, fill = share_status)) +
scale_fill_gradient(
low = "white",
high = "red") +
ggtitle("Share of active water sources") +
labs(fill = "") +
theme_fira() +
scale_colour_fira(na.value = "darkred") +
theme(axis.line = element_blank(),
panel.grid.major = element_line(colour = "#00000020",
inherit.blank = FALSE),
axis.text = element_text(colour = "#00000050"))
|
/maps/maps.R
|
no_license
|
ceciliamowinckel/ban400-lectures
|
R
| false
| false
| 12,365
|
r
|
#
# BAN400 - R PROGRAMMING FOR DATA SCIENCE
# LECTURE: MAKING MAPS
#
# In this lecture we will look at various techniques for making maps in R. We
# will start straight away by looking at some of the complicating factors that
# may arise when working with this kind of data, and then go into some simpler
# examples after that, to show that it does not always have to be all that
# complicated.
# EXAMPLE 1: The 1854 cholera outbreak ---------
#
# This example is presented in "Modern Data Science with R" by Baumer, Kaplan
# and Horton. The sp and rgdal-packages have been replaced by the sf-package.
library(mdsr) # Companion R package to the book, containing data
library(sf) # For spatial data, co-authoured by NHH Prof. Roger Bivand
library(ggmap) # For drawing static maps as ggplots
library(tidyverse) # Data wrangling etc
plot(CholeraDeaths) # Simple plot of the data
# When working with spatial data we typically need to deal with shape-files.
# These files are more complicated than what we are used to, and we need
# specialized functions to read them. Administrative units (countries, regions
# etc) typically publish shapefiles for their borders. This is not a simple
# format though.
# Download the file "rtwilson.com/downloads/SnowGIS_SHP.zip" and extract to your
# working directory. This is the cholera data as shapefiles:
folder <- "SnowGIS_SHP/SnowGIS_SHP" # The folder containing the shapefiles.
list.files(folder) # List the files in the folders
st_layers(folder) # List the "layers" in this folder (sets
# of shapefiles).
# We will begin by loading in the "Cholera_Deaths"-layer
ColeraDeaths <- read_sf(folder, layer = "Cholera_Deaths")
summary(ColeraDeaths)
# We have 250 spatial points, and for each point we have an ID-column (that
# seems to only contain zeros) as well as a Count column that contains the
# number of victims at that address. The Geomety column contains the definitions
# of the points, but this could also be the circumference of a country for
# example.
# Extract the coordinates of the points and plot in a coordinate system:
cholera_coords <- as_tibble(st_coordinates(ColeraDeaths))
ggplot(cholera_coords) +
geom_point(aes(x = X, y = Y)) +
coord_quickmap()
# This is still just a very basic plot of the locations of the points in our
# data set on a coordinate system with some sort of logintude and latitude on
# the axes. We want to overlay this on a map. We can download this map as a
# ggplot using the ggmap-package. In order to do that, we need to specify the
# square that we want to download, but note that the coordinates in the data is
# not in the lat/lon-format:
cholera_coords
# The unit here is meters, and not the usual degree/minute/second format. We
# must transform the coordinates, but from what? We don't always know, because
# there are many possible projections (read
# https://en.wikipedia.org/wiki/Map_projection if you want, but this is a very
# deep rabbit hole).
# The book that presents this example lists three commonly used projections:
# - EPSG:4326 (Also known as WGS84), standard for GPS systems and Google earth.
# - EPSG:3857 Used by OpenStreetMap, Google Maps and other services.
# - EPSG:27700 Often used in Britain.
# The map-tiles that we will download later however come in the EPSG:4326-format,
# so let us try to convert the cholera data to that format:
cholera_latlon <-
CholeraDeaths %>%
st_transform(st_crs("+init=epsg:4326")) %>%
st_coordinates %>%
as_tibble %>%
mutate(Count = ColeraDeaths$Count) # Add the counts back in to the data
# Define the box and download map:
london <- make_bbox(cholera_latlon$X,
cholera_latlon$Y,
f = .05)
m <- get_map(london, zoom = 17, source = "osm")
ggmap(m)
# Add the cholera data to the map:
ggmap(m) +
geom_point(aes(x = X, y = Y, size = Count), data = as_tibble(cholera_latlon))
# The points are a little bit off, unfortunately, and this is because we have
# not figured out what kind of coordinate system the original data has. In fact,
# in this case the data set does not come with information about which
# coordinate system was used to encode this data. We have information that this
# in fact is in the EPSG:27700-format, but this is something that we have to
# figure our from the source every time.
# Check the system; no info unfortunately
st_crs(CholeraDeaths)$input
# Set coordinate system fisrst, then transform and plot.
cholera_latlon <-
CholeraDeaths %>%
st_set_crs("+init=epsg:27700") %>%
st_transform(st_crs("+init=epsg:4326")) %>%
st_coordinates %>%
as_tibble %>%
mutate(Count = ColeraDeaths$Count)
london <- make_bbox(cholera_latlon$X,
cholera_latlon$Y,
f = .05)
m <- get_map(london, zoom = 17, source = "osm")
ggmap(m) +
geom_point(aes(x = X, y = Y, size = Count), data = cholera_latlon)
# Voila!
# EXAMPLE 2: Water sources in Africa --------------
# These packages contains shapefiles for the countries of the world (as well as a lot of other information about the countries)
library(rnaturalearth)
library(rnaturalearthdata)
library(rgeos)
# This is a data set that is published on the "Tidy Tuesday"-project, a Github
# repository that publishes a new data set every week for the online data
# science community to analyze. This data set contains information on water sources in the world, but mainly in Africa.
water <- read_csv("https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-05-04/water.csv")
# Here, we make a data frame for all the countries of the world. This requires all of the three packages above to be loaded.
world <- ne_countries(scale = "medium", returnclass = "sf")
# Let us concentrate on just one country, we filter out all other
tanzania <- world[world$name == "Tanzania",]
# We do the same for the water sources data. When making the plots later we will see that there are a couple of strange data points with latitude > 60. These are obviously not in Tanzania, so just filter them out.
tan_data <-
water %>%
filter(country_name == "Tanzania") %>%
filter(lat_deg < 60) # Some weird data points
# We make a simple plot of the borders of the country using the plotting method geom_sf() that is especially built for these kind of shape-objects.
ggplot(st_geometry(tanzania)) +
geom_sf()
# Add the water sources in the normal way. The coordinate systems appears to be
# compatible in this case, and the figure is in any case not so sensitive to
# such discrepancies when we are working on such a large area, and not on the
# street level as we did above.
ggplot(st_geometry(tanzania)) +
geom_sf() +
geom_point(aes(x = lon_deg, y = lat_deg),
data = tan_data)
# Make a nice map. I have used the fira-theme, becuase it is very nice. You need
# to install this yourself by following the link below, and that also requires
# the installation of a font. I you want to use another theme, replace (or take
# out) theme_fira() and scale_color_fira() below.
library(firatheme) # My favourite ggplot theme: https://github.com/vankesteren/firatheme
ggplot(st_geometry(tanzania)) +
geom_sf(fill = "#00000050") +
geom_point(aes(x = lon_deg, y = lat_deg, colour = water_source),
data = tan_data) +
xlab("") +
ylab("") +
labs(colour = "") +
ggtitle("Water sources in Tanzania") +
theme_fira() +
scale_colour_fira(na.value = "darkred") +
theme(axis.line = element_blank(),
panel.grid.major = element_line(colour = "#00000020",
inherit.blank = FALSE),
axis.text = element_text(colour = "#00000050"))
# Downloaded shapefiles for other features of Tanzania here:
# https://mapcruzin.com/free-tanzania-arcgis-maps-shapefiles.htm
# (Just made a Google search for Tanzania shapefiles). Read them in in the same way as for the London-example. The places-file includes a column on type, so we filter it down to just the major cities. We make the coordinates as separate columns and plot these "manually" so that we can plot the names of the cities as well as indicating their positions:
tan_roads <- read_sf("tanzania_roads")
tan_cities <-
read_sf("tanzania_places") %>%
filter(type == "city") %>%
select(name, geometry) %>%
drop_na() %>%
mutate(X = st_coordinates(.$geometry)[,1]) %>%
mutate(Y = st_coordinates(.$geometry)[,2])
tan_waterways <- read_sf("tanzania_waterways")
# We add the roads, waterways and cities to the map. Takes quite a lot of time
# to create, but the result is really nice!
ggplot(st_geometry(tanzania)) +
geom_sf(fill = "#00000050") +
geom_point(aes(x = lon_deg, y = lat_deg, colour = water_source),
data = tan_data,
alpha = .5) +
xlab("") +
ylab("") +
labs(colour = "") +
ggtitle("Water sources in Tanzania") +
theme_fira() +
scale_colour_fira(na.value = "darkred") +
theme(axis.line = element_blank(),
panel.grid.major = element_line(colour = "#00000020",
inherit.blank = FALSE),
axis.text = element_text(colour = "#00000050")) +
geom_sf(data = st_geometry(tan_roads), colour = "#00000050") +
geom_point(aes(x = X, y = Y), data = tan_cities, size = 2) +
geom_text(aes(x = X, y = Y, label = name),
data = tan_cities,
nudge_y = 0.3) +
geom_sf(data = st_geometry(tan_waterways), colour = "darkblue", alpha = .3)
# EXAMPLE 3: Water sources in Africa, continued ------------
# Another important type of plots is to color countries according to some
# variable. We have all seen this kind of plots in the pandemic.
# We start by filtering the "world" data frame above to include the countries of
# Africa, and the information about this in contained in the "continent"
# variable. We need to join the data on the geometry of the countries with the
# water data later, so we need the country names to match exactly. A bit further
# down we have an anti_join() for finding mismatches, and we fix them here with
# a recode:
africa <-
world %>%
filter(continent == "Africa") %>%
select(admin, geometry) %>%
mutate(admin = recode(.$admin,
"Democratic Republic of the Congo" = "Congo - Kinshasa",
"Republic of Congo" = "Congo - Brazzaville",
"United Republic of Tanzania" = "Tanzania")) %>%
rename(country_name = admin)
# We make a basic plot of the countries and see that it looks reasonable:
ggplot(africa, aes(geometry = geometry)) +
geom_sf()
# There is a binary variable in the water source data that indicates whether
# there is water available at the time of the visit. Let us calculate the share
# of active water sources for each country:
water_share <-
water %>%
group_by(country_name) %>%
summarise(share_status = mean(status_id == "y"))
# We use the anti_jon to chech which countries in the water-data that does not
# have matches in the africa data. Identifies that the two Congos and Tanzania
# were spelled differently, fixed that above when filtering down the africa
# data.
anti_join(water_share, africa)
# We join, first a full join to get both the water data and the geometry data in the dame column, and then a semi-join to filter out all countries that are not in the Africa data (there are a few non-African countries in the water data).
water_africa <-
full_join(water_share, africa) %>%
semi_join(africa)
# Basic plot, where we map the fill-aesthetic to the share-variable:
ggplot(water_africa) +
geom_sf(aes(geometry = geometry, fill = share_status))
# Make a prettier plot. Again, drop the firatheme if you have not bothered to
# install it.
ggplot(water_africa) +
geom_sf(aes(geometry = geometry, fill = share_status)) +
scale_fill_gradient(
low = "white",
high = "red") +
ggtitle("Share of active water sources") +
labs(fill = "") +
theme_fira() +
scale_colour_fira(na.value = "darkred") +
theme(axis.line = element_blank(),
panel.grid.major = element_line(colour = "#00000020",
inherit.blank = FALSE),
axis.text = element_text(colour = "#00000050"))
|
#' **--- Day 16: Ticket Translation ---**
#'
#' As you're walking to yet another connecting flight, you realize that one of
#' the legs of your re-routed trip coming up is on a high-speed train. However,
#' the train ticket you were given is in a language you don't understand. You
#' should probably figure out what it says before you get to the train station
#' after the next flight.
#'
#' Unfortunately, you can't actually read the words on the ticket. You can,
#' however, read the numbers, and so you figure out the fields these tickets
#' must have and the valid ranges for values in those fields.
#'
#' You collect the rules for ticket fields, the numbers on your ticket, and the
#' numbers on other nearby tickets for the same train service (via the airport
#' security cameras) together into a single document you can reference (your
#' puzzle input).
#'
#' The rules for ticket fields specify a list of fields that exist somewhere
#' on the ticket and the valid ranges of values for each field. For example, a
#' rule like class: 1-3 or 5-7 means that one of the fields in every ticket is
#' named class and can be any value in the ranges 1-3 or 5-7 (inclusive, such
#' that 3 and 5 are both valid in this field, but 4 is not).
#'
#' Each ticket is represented by a single line of comma-separated values. The
#' values are the numbers on the ticket in the order they appear; every ticket
#' has the same format. For example, consider this ticket:
#'
#' .--------------------------------------------------------.
#' | ????: 101 ?????: 102 ??????????: 103 ???: 104 |
#' | |
#' | ??: 301 ??: 302 ???????: 303 ??????? |
#' | ??: 401 ??: 402 ???? ????: 403 ????????? |
#' --------------------------------------------------------'
#'
#' Here, ? represents text in a language you don't understand. This ticket
#' might be represented as 101,102,103,104,301,302,303,401,402,403; of course,
#' the actual train tickets you're looking at are much more complicated. In any
#' case, you've extracted just the numbers in such a way that the first number
#' is always the same specific field, the second number is always a different
#' specific field, and so on - you just don't know what each position actually
#' means!
#'
#' Start by determining which tickets are completely invalid; these are tickets
#' that contain values which aren't valid for any field. Ignore your ticket for
#' now.
#'
#' For example, suppose you have the following notes:
#'
#' class: 1-3 or 5-7
#' row: 6-11 or 33-44
#' seat: 13-40 or 45-50
#'
#' your ticket:
#' 7,1,14
#'
#' nearby tickets:
#' 7,3,47
#' 40,4,50
#' 55,2,20
#' 38,6,12
#'
#' It doesn't matter which position corresponds to which field; you can
#' identify invalid nearby tickets by considering only whether tickets contain
#' values that are not valid for any field. In this example, the values on the
#' first nearby ticket are all valid for at least one field. This is not true
#' of the other three nearby tickets: the values 4, 55, and 12 are are not
#' valid for any field. Adding together all of the invalid values produces your
#' ticket scanning error rate: 4 + 55 + 12 = 71.
#'
#' Consider the validity of the nearby tickets you scanned. What is your ticket
#' scanning error rate?
library(stringr)
library(tidyr)
library(dplyr)
test_input <- c(
"class: 1-3 or 5-7",
"row: 6-11 or 33-44",
"seat: 13-40 or 45-50",
"",
"your ticket:",
"7,1,14",
"",
"nearby tickets:",
"7,3,47",
"40,4,50",
"55,2,20",
"38,6,12"
)
real_input <- readLines('input.txt')
field_test_function <- function(line) {
nums <- as.numeric(str_extract_all(line, '\\d+', simplify = TRUE))
range_one <- nums[1]:nums[2]
range_two <- nums[3]:nums[4]
function(n) {
(n %in% range_one) | (n %in% range_two)
}
}
operating_env <- function(env) {
with(env, {
all_results <- data.frame(
ticket_no = numeric(0) , field_no = numeric(0),
field = character(0), match = logical(0)
)
for (ticket_no in 1:length(nearby_tickets)) {
field_vals <- nearby_tickets[[ticket_no]]
matches <- sapply(field_tests, function(f) { f(field_vals) }) %>%
as.data.frame() %>%
mutate(
ticket_no = ticket_no,
field_no = row_number(),
field_val = field_vals
) %>%
pivot_longer(
-c('ticket_no', 'field_no', 'field_val'),
names_to = 'field',
values_to = 'match'
)
all_results <- bind_rows(all_results, matches)
}
})
env
}
parse_input <- function(input) {
op_env <- new.env()
op_env$field_tests <- list()
op_env$my_ticket <- numeric(0)
op_env$nearby_tickets <- list()
mode <- 'tests'
for (line in input) {
if (line == "") { next }
if (line == 'your ticket:') { mode <- 'my ticket'; next }
if (line == 'nearby tickets:') { mode <- 'other tickets'; next }
if (mode == 'tests') {
name <- str_extract(line, '^[\\w\\s]+(?=:)')
op_env$field_tests[[name]] <- field_test_function(line)
}
if (mode == 'my ticket') {
op_env$my_ticket <- as.numeric(unlist(strsplit(line, ',')))
}
if (mode == 'other tickets') {
ticket_nums <- as.numeric(unlist(strsplit(line, ',')))
op_env$nearby_tickets[[length(op_env$nearby_tickets)+1]] <- ticket_nums
}
}
operating_env(op_env)
}
op_env <- parse_input(real_input)
answer1 <- op_env$all_results %>%
group_by(ticket_no, field_no, field_val) %>%
summarise_at('match', sum) %>%
filter(match == 0) %>%
pull(field_val) %>%
sum()
# Answer: 32842
|
/20201216_day_16/exercise_1.R
|
no_license
|
ericwburden/advent_of_code_2020
|
R
| false
| false
| 5,705
|
r
|
#' **--- Day 16: Ticket Translation ---**
#'
#' As you're walking to yet another connecting flight, you realize that one of
#' the legs of your re-routed trip coming up is on a high-speed train. However,
#' the train ticket you were given is in a language you don't understand. You
#' should probably figure out what it says before you get to the train station
#' after the next flight.
#'
#' Unfortunately, you can't actually read the words on the ticket. You can,
#' however, read the numbers, and so you figure out the fields these tickets
#' must have and the valid ranges for values in those fields.
#'
#' You collect the rules for ticket fields, the numbers on your ticket, and the
#' numbers on other nearby tickets for the same train service (via the airport
#' security cameras) together into a single document you can reference (your
#' puzzle input).
#'
#' The rules for ticket fields specify a list of fields that exist somewhere
#' on the ticket and the valid ranges of values for each field. For example, a
#' rule like class: 1-3 or 5-7 means that one of the fields in every ticket is
#' named class and can be any value in the ranges 1-3 or 5-7 (inclusive, such
#' that 3 and 5 are both valid in this field, but 4 is not).
#'
#' Each ticket is represented by a single line of comma-separated values. The
#' values are the numbers on the ticket in the order they appear; every ticket
#' has the same format. For example, consider this ticket:
#'
#' .--------------------------------------------------------.
#' | ????: 101 ?????: 102 ??????????: 103 ???: 104 |
#' | |
#' | ??: 301 ??: 302 ???????: 303 ??????? |
#' | ??: 401 ??: 402 ???? ????: 403 ????????? |
#' --------------------------------------------------------'
#'
#' Here, ? represents text in a language you don't understand. This ticket
#' might be represented as 101,102,103,104,301,302,303,401,402,403; of course,
#' the actual train tickets you're looking at are much more complicated. In any
#' case, you've extracted just the numbers in such a way that the first number
#' is always the same specific field, the second number is always a different
#' specific field, and so on - you just don't know what each position actually
#' means!
#'
#' Start by determining which tickets are completely invalid; these are tickets
#' that contain values which aren't valid for any field. Ignore your ticket for
#' now.
#'
#' For example, suppose you have the following notes:
#'
#' class: 1-3 or 5-7
#' row: 6-11 or 33-44
#' seat: 13-40 or 45-50
#'
#' your ticket:
#' 7,1,14
#'
#' nearby tickets:
#' 7,3,47
#' 40,4,50
#' 55,2,20
#' 38,6,12
#'
#' It doesn't matter which position corresponds to which field; you can
#' identify invalid nearby tickets by considering only whether tickets contain
#' values that are not valid for any field. In this example, the values on the
#' first nearby ticket are all valid for at least one field. This is not true
#' of the other three nearby tickets: the values 4, 55, and 12 are are not
#' valid for any field. Adding together all of the invalid values produces your
#' ticket scanning error rate: 4 + 55 + 12 = 71.
#'
#' Consider the validity of the nearby tickets you scanned. What is your ticket
#' scanning error rate?
library(stringr)
library(tidyr)
library(dplyr)
test_input <- c(
"class: 1-3 or 5-7",
"row: 6-11 or 33-44",
"seat: 13-40 or 45-50",
"",
"your ticket:",
"7,1,14",
"",
"nearby tickets:",
"7,3,47",
"40,4,50",
"55,2,20",
"38,6,12"
)
real_input <- readLines('input.txt')
field_test_function <- function(line) {
nums <- as.numeric(str_extract_all(line, '\\d+', simplify = TRUE))
range_one <- nums[1]:nums[2]
range_two <- nums[3]:nums[4]
function(n) {
(n %in% range_one) | (n %in% range_two)
}
}
operating_env <- function(env) {
with(env, {
all_results <- data.frame(
ticket_no = numeric(0) , field_no = numeric(0),
field = character(0), match = logical(0)
)
for (ticket_no in 1:length(nearby_tickets)) {
field_vals <- nearby_tickets[[ticket_no]]
matches <- sapply(field_tests, function(f) { f(field_vals) }) %>%
as.data.frame() %>%
mutate(
ticket_no = ticket_no,
field_no = row_number(),
field_val = field_vals
) %>%
pivot_longer(
-c('ticket_no', 'field_no', 'field_val'),
names_to = 'field',
values_to = 'match'
)
all_results <- bind_rows(all_results, matches)
}
})
env
}
parse_input <- function(input) {
op_env <- new.env()
op_env$field_tests <- list()
op_env$my_ticket <- numeric(0)
op_env$nearby_tickets <- list()
mode <- 'tests'
for (line in input) {
if (line == "") { next }
if (line == 'your ticket:') { mode <- 'my ticket'; next }
if (line == 'nearby tickets:') { mode <- 'other tickets'; next }
if (mode == 'tests') {
name <- str_extract(line, '^[\\w\\s]+(?=:)')
op_env$field_tests[[name]] <- field_test_function(line)
}
if (mode == 'my ticket') {
op_env$my_ticket <- as.numeric(unlist(strsplit(line, ',')))
}
if (mode == 'other tickets') {
ticket_nums <- as.numeric(unlist(strsplit(line, ',')))
op_env$nearby_tickets[[length(op_env$nearby_tickets)+1]] <- ticket_nums
}
}
operating_env(op_env)
}
op_env <- parse_input(real_input)
answer1 <- op_env$all_results %>%
group_by(ticket_no, field_no, field_val) %>%
summarise_at('match', sum) %>%
filter(match == 0) %>%
pull(field_val) %>%
sum()
# Answer: 32842
|
#### SETUP ####
# accersi: fetch/summon
# divido: divide!
# expello: banish
# mundus: clean
# percursant: scour
# revelare: reveal
### udpated with routines written for first phylogenetics course at UNL
MAXNO <- 1.797693e+308;
newick_verbotten <- c(".","?","\"","\'");
letter_states <- LETTERS[!LETTERS %in% c("I","O")];
more_letter_states <- tolower(letter_states);
zzzz <- 0.25;
#### HOUSE-CLEANING ####
clear_matrix_na_with_another_cell_value <- function(data,j, k) {
size <- dim(data)
for (i in 1:size[1]) {
if (is.na(data[i,j])) data[i,j] <- data[i,k]
}
return(data)
}
count_characters_in_string <- function(string_to_count) {
j <- strsplit(string_to_count,split="",fixed=TRUE)[[1]];
return(length(j));
}
mundify_nexus_text <- function(nexus_line) {
nexus_line <- gsub("\xd4","",nexus_line);
nexus_line <- gsub("\xd5","",nexus_line);
nexus_line <- gsub("\x87","a",nexus_line);
nexus_line <- gsub("\xfc\xbe\x8d\xa3\xa4\xbc","n",nexus_line);
nexus_line <- gsub("\xfc\xbe\x98\x93\xa0\xbc","ae",nexus_line);
nexus_line <- gsub("\xfc\xbe\x99\x83\xa0\xbc","c",nexus_line);
nexus_line <- gsub("\xfc\xbe\x98\x96\x8c\xbc","",nexus_line);
nexus_line <- gsub("\xfc\xbe\x8c\x93\xa4\xbc","\'",nexus_line);
nexus_line <- gsub("\xfc\xbe\x98\xb3\xa0\xbc","",nexus_line);
nexus_line <- gsub("\'\'\'","\'\"",nexus_line);
nexus_line <- gsub("\'\' ","\" ",nexus_line);
nexus_line <- gsub("\xac","",nexus_line);
nexus_line <- gsub("\xa0","†",nexus_line);
nexus_line <- gsub("\x80","",nexus_line);
nexus_line <- gsub("\xd1","",nexus_line);
nexus_line <- gsub("\xc9","?",nexus_line);
nexus_line <- gsub("\xe1","a",nexus_line);
nexus_line <- gsub("\xe9","e",nexus_line);
nexus_line <- gsub("\x8e","e",nexus_line);
nexus_line <- gsub("\x8f","e",nexus_line);
nexus_line <- gsub("\x92","i",nexus_line);
nexus_line <- gsub("\xbf","o",nexus_line);
nexus_line <- gsub("\x9a","o",nexus_line);
nexus_line <- gsub("\x97","o",nexus_line);
nexus_line <- gsub("\xf6","ö",nexus_line);
nexus_line <- gsub("\xfc","ue",nexus_line);
nexus_line <- gsub("\xb0","˚",nexus_line);
nexus_line <- gsub("\xba","˚",nexus_line);
nexus_line <- gsub("\xfc\xbe\x8e\x93\xa4\xbc","o",nexus_line);
nexus_line <- gsub("\x9f","ue",nexus_line);
nexus_line <- gsub("\xd0","-",nexus_line);
nexus_line <- gsub("\xd2","\"",nexus_line);
nexus_line <- gsub("\xd3","\"",nexus_line);
nexus_line <- gsub("\xfc\xbe\x8d\x86\x90\xbc","\'",nexus_line);
nexus_line <- gsub("\xfc\xbe\x8d\x86\x8c\xbc","ƒ",nexus_line);
nexus_line <- gsub("\xdf","ß",nexus_line);
nexus_line <- gsub("\xa7","ß",nexus_line);
nexus_line <- gsub("\xfc\xbe\x8c\xa6\x88\xbc","≤",nexus_line);
nexus_line <- gsub("\xb3","≥",nexus_line);
nexus_line <- gsub("\xfc\xbe\x8d\x96\x8c\xbc","≈",nexus_line);
nexus_line <- gsub("\xfc\xbe\x98\xa6\x98\xbc","˚",nexus_line);
nexus_line <- gsub("\xb6","∂",nexus_line);
nexus_line <- gsub("\xc6","∆",nexus_line);
nexus_line <- gsub("\xfc\xbe\x8d\xb6\x88\xbc","∑",nexus_line);
nexus_line <- gsub("\xfc\xbe\x99\x86\x88\xbc","Ω",nexus_line);
nexus_line <- gsub("\xa5"," ",nexus_line);
nexus_line <- gsub("Á","A",nexus_line);
nexus_line <- gsub("Ä","A",nexus_line);
nexus_line <- gsub("ä","a",nexus_line);
nexus_line <- gsub("á","a",nexus_line);
nexus_line <- gsub("å","a",nexus_line);
nexus_line <- gsub("Ç","C",nexus_line);
nexus_line <- gsub("ç","c",nexus_line);
nexus_line <- gsub("č","c",nexus_line);
nexus_line <- gsub("é","e",nexus_line);
nexus_line <- gsub("è","e",nexus_line);
nexus_line <- gsub("ê","e",nexus_line);
nexus_line <- gsub("ė","e",nexus_line);
nexus_line <- gsub("î","i",nexus_line);
nexus_line <- gsub("Î","I",nexus_line);
nexus_line <- gsub("ñ","n",nexus_line);
nexus_line <- gsub("Ö","O",nexus_line);
nexus_line <- gsub("Ø","O",nexus_line);
nexus_line <- gsub("ø","o",nexus_line);
nexus_line <- gsub("ó","o",nexus_line);
nexus_line <- gsub("ö","o",nexus_line);
nexus_line <- gsub("õ","o",nexus_line);
nexus_line <- gsub("Š","S",nexus_line);
nexus_line <- gsub("š","s",nexus_line);
nexus_line <- gsub("ů","u",nexus_line);
nexus_line <- gsub("ü","u",nexus_line);
nexus_line <- gsub("’","’",nexus_line);
nexus_line <- gsub("\x88","a",nexus_line);
nexus_line <- gsub("Ä","A",nexus_line);
nexus_line <- gsub("Á","A",nexus_line);
nexus_line <- gsub("á","a",nexus_line);
nexus_line <- gsub("ä","a",nexus_line);
nexus_line <- gsub("Ã¥","a",nexus_line);
nexus_line <- gsub("ç","c",nexus_line);
nexus_line <- gsub("é","e",nexus_line);
nexus_line <- gsub("è","e",nexus_line);
nexus_line <- gsub("ñ","n",nexus_line);
nexus_line <- gsub("Ö","O",nexus_line);
nexus_line <- gsub("ø","o",nexus_line);
nexus_line <- gsub("ö","o",nexus_line);
nexus_line <- gsub("õ","o",nexus_line);
nexus_line <- gsub("ô","o",nexus_line);
nexus_line <- gsub("ü","u",nexus_line);
nexus_line <- gsub("î","i",nexus_line);
nexus_line <- gsub("Š","S",nexus_line);
nexus_line <- gsub("š","s",nexus_line);
nexus_line <- gsub("å","a",nexus_line);
nexus_line <- gsub("ů","u",nexus_line);
nexus_line <- gsub("α","α",nexus_line);
return(nexus_line);
}
# Turn Lophospira sp. or Lophospira sp. A to Lophospira
reduce_genus_sp_to_genus <- function(taxon_name) {
taxon_names <- strsplit(taxon_name," ")[[1]];
indet_species <- c("sp.");
indet_species <- c(indet_species,paste("sp.",LETTERS));
indet_species <- c(indet_species,paste("sp.",letters));
indet_species <- c(indet_species,"spp.");
indet_species <- c(indet_species,paste("spp.",LETTERS));
indet_species <- c(indet_species,paste("spp.",letters));
for (i in 1:100) indet_species <- c(indet_species,paste("sp.",i));
for (i in 1:100) indet_species <- c(indet_species,paste("nov.",i));
for (i in 1:100) indet_species <- c(indet_species,paste("sp. nov.",i));
for (i in 1:100) indet_species <- c(indet_species,paste("indet.",i));
if (sum(taxon_names %in% indet_species)>0)
taxon_names <- taxon_names[(1:length(taxon_names))[!taxon_names %in% indet_species]];
taxon_name <- paste(taxon_names,collapse=" ");
return(taxon_name);
}
accersi_study_name <- function(nexus_file_name) {
n_f_n <- simplify2array(strsplit(nexus_file_name,"/"));
n_f_n <- n_f_n[length(n_f_n)];
n_f_n <- gsub("\\.nex","",n_f_n);
filename_parts <- simplify2array(strsplit(n_f_n,"_"));
if (sum(filename_parts %in% "&")==1) {
author_start <- match("&",filename_parts)-1;
} else if (sum(filename_parts %in% c("et","al"))==2) {
author_start <- match("et",filename_parts)-1;
} else {
author_start <- length(filename_parts)-2;
}
study_group <- filename_parts[1:(author_start-1)];
citation <- paste("(",paste(filename_parts[!filename_parts %in% study_group],collapse=" "),")",sep="");
return(paste(study_group,citation));
}
#### ROUTINES TO READ CHARACTER MATRIX INFORMATION IN NEXUS FILE ####
# routine to read nexus file of Mesquite or Maclade format & return important infromation
accersi_data_from_nexus_file <- function(nexus_file_name, polymorphs=T, UNKNOWN=-11, INAP=-22, rate_partitions="", trend_partitions="") {
# nexus_file_name: name of nexus file (e.g., "Phacopidae_Snow_2000.nex")
# polymorphs: boolean, if TRUE, then recode "1,2" as "-21"; otherwise, treat as unknown
# UNKNOWN: value substituting for "?"
# INAP: value substituting for gap ("-")
# rate_partitions: nameof CHARPARTITION that you want to use for dividing characters into general rate classes.
nexus <- scan(file=nexus_file_name,what=character(),sep="\n");
output <- accersi_data_from_nexus_vector(nexus=nexus,polymorphs=polymorphs,UNKNOWN=UNKNOWN,INAP=INAP,rate_partitions=rate_partitions,trend_partitions=trend_partitions);
return(output)
}
# routine to prompt user for a nexus file of Mesquite or Maclade format & return important infromation
accersi_data_from_chosen_nexus_file <- function(polymorphs=T, UNKNOWN=-11, INAP=-22, rate_partitions="", trend_partitions="") {
# nexus_file_name: name of nexus file (e.g., "Phacopidae_Snow_2000.nex")
# polymorphs: boolean, if TRUE, then recode "1,2" as "-21"; otherwise, treat as unknown
# UNKNOWN: value substituting for "?"
# INAP: value substituting for gap ("-")
# rate_partitions: nameof CHARPARTITION that you want to use for dividing characters into general rate classes.
#print("Choose the nexus file you with to analyze: ");
print("Choose the nexus file that you wish to analyze: ");
flush.console();
Sys.sleep(zzzz);
nexus_file_name <- file.choose();
nexus <- scan(file=nexus_file_name,what=character(),sep="\n");
output <- accersi_data_from_nexus_vector(nexus=nexus,polymorphs=polymorphs,UNKNOWN=UNKNOWN,INAP=INAP,rate_partitions=rate_partitions,trend_partitions=trend_partitions);
return(output)
}
# routine to read nexus information Mesquite or Maclade format & return important infromation
accersi_data_from_nexus_vector <- function(nexus, polymorphs=T, UNKNOWN=-11, INAP=-22, rate_partitions="", trend_partitions="") {
# nexus_file_name: name of nexus file (e.g., "Phacopidae_Snow_2000.nex")
# polymorphs: boolean, if TRUE, then recode "1,2" as "-21"; otherwise, treat as unknown
# UNKNOWN: value substituting for "?"
# INAP: value substituting for gap ("-")
# rate_partitions: nameof CHARPARTITION that you want to use for dividing characters into general rate classes.
ml <- 0;
#i <- 1
for (i in 1:length(nexus)) {
nexus[i] <- mundify_nexus_text(nexus_line = nexus[i]);
# j <- simplify2array(strsplit(nexus[i],split="",fixed=TRUE))[,1];
j <- strsplit(nexus[i],split="",fixed=TRUE)[[1]];
# strsplit(string=nexus[i],pattern="")
if (length(j)>ml) ml <- length(j);
}
ml <- ml+1; # LENGTH OF LONGEST LINE
# file is now a vector of characters. Turn it into a matrix with one char per cell
nexusfile <- matrix("\n",length(nexus),ml)
for (i in 1:length(nexus)) {
j <- strsplit(nexus[i],split="",fixed=TRUE)[[1]];
for (k in 1:length(j)) nexusfile[i,k] <- j[k]
if ((length(j)+2)<ml)
for (k in (length(j)+2):ml) nexusfile[i,k] <- ""
}
top <- match("matrix",tolower(nexus));
if (is.na(top)) top <- match("\tmatrix",tolower(nexus));
if (is.na(top)) {
top <- 0;
ln <- 1; # this is the row with the word "Matrix": character data starts next.
while (top==0) {
em_nexus <- gsub("\t","",nexus[ln]);
nexus_words <- simplify2array(strsplit(em_nexus," ")[[1]]);
if (!is.na(match("matrix",tolower(nexus_words)))) {
top <- ln;
} else ln <- ln+1;
}
}
top <- top+1; # this will give the first row of data
# skip the comment text denoting character numbers (if present)
while(nexusfile[top,1]=="[" || nexusfile[top,1]==" ") top <- top+1;
# get character & state informations
all_states <- c();
missing <- "?";
gap <- "-";
notu <- nchars <- strat <- range <- geog <- 0;
for (i in 2:top) {
while ((nexusfile[i,1]=="[" || nexusfile[i,1]=="\n") && i<top) i <- i+1;
em_nexus <- gsub("\t","",nexus[i]);
em_nexus <- gsub("="," = ",em_nexus);
em_nexus <- gsub(";"," ; ",em_nexus);
nexus_words <- simplify2array(strsplit(em_nexus," ")[[1]]);
nexus_words <- nexus_words[nexus_words!=""];
if (!is.na(match("ntax",tolower(nexus_words))) || !is.na(match("ntaxa",tolower(nexus_words)))) {
j <- 1+match("ntax",tolower(nexus_words));
if (is.na(j)) j <- 1+match("ntaxa",tolower(nexus_words));
while(nexus_words[j]=="=") j <- j+1;
notu <- as.numeric(nexus_words[j]);
}
if (!is.na(match("nchar",tolower(nexus_words))) || !is.na(match("nchars",tolower(nexus_words)))) {
j <- 1+match("nchar",tolower(nexus_words));
if (is.na(j)) j <- 1+match("nchars",tolower(nexus_words));
while(nexus_words[j]=="=") j <- j+1;
nchars <- as.numeric(nexus_words[j]);
}
if (!is.na(match("gap",tolower(nexus_words)))) {
if (nexus_words[match("gap",tolower(nexus_words))+1]=="=") {
j <- 1+match("gap",tolower(nexus_words));
while(nexus_words[j]=="=") j <- j+1;
gap <- nexus_words[j];
}
}
if (!is.na(match("missing",tolower(nexus_words)))) {
if (nexus_words[match("missing",tolower(nexus_words))+1]=="=") {
j <- 1+match("missing",tolower(nexus_words));
while(nexus_words[j]=="=") j <- j+1;
missing <- nexus_words[j];
}
}
if (!is.na(match("symbols",tolower(nexus_words)))) {
j <- match("symbols",tolower(nexus_words))+1;
while (nexus_words[j] %in% c("=","\"")) j <- j+1;
jj <- min(((j+1):length(nexus_words))[tolower(nexus_words)[((j+1):length(nexus_words))] %in% c("missing","gap",";")]-1);
# jj <- j+match(";",nexus_words[(j+1):length(nexus_words)])-1;
all_states <- gsub("\"","",nexus_words[j:jj]);
# if (tolower(all_states) %in% "")
}
if (!is.na(match("fa",tolower(nexus_words))) || !is.na(match("fka",tolower(nexus_words)))) {
nexus_words[tolower(nexus_words)=="fka"] <- "fa";
strat <- as.numeric(nexus_words[match("fa",tolower(nexus_words))-1]);
}
if (!is.na(match("la",tolower(nexus_words))) || !is.na(match("lka",tolower(nexus_words)))) {
nexus_words[tolower(nexus_words)=="lka"] <- "la";
range <- as.numeric(nexus_words[match("la",tolower(nexus_words))-1]);
}
if (!is.na(match("geog",tolower(nexus_words)))) {
geog <- as.numeric(nexus_words[match("geog",tolower(nexus_words))-1]);
} else if (!is.na(match("geography",tolower(nexus_words)))) {
geog <- as.numeric(nexus_words[match("geography",tolower(nexus_words))-1]);
}
}
if (is.null(all_states)) all_states <- 0:9;
extra <- 0;
if (strat>0) {
if (range>0) {
nchars <- nchars-2
extra <- 2
} else {
nchars <- nchars-1
extra <- 1
}
strat_ranges <- matrix(0,notu,2)
}
if (geog>0) {
nchars <- nchars-1
geography <- vector(length=notu)
extra <- extra+1
}
taxa <- vector(length=notu);
nstates <- array(0,dim=nchars);
chmatrix <- matrix(0,notu,nchars);
tx <- 1;
# look for outgroup designation
exclude <- outgroup <- -1;
if (!is.na(match("BEGIN SETS;",nexus))) {
tx_pt <- match("BEGIN SETS;",nexus); # look at taxon partitions
look_for_outgroup <- TRUE;
while (look_for_outgroup) {
tx_pt <- 1+tx_pt;
yyy <- paste(nexusfile[tx_pt,], collapse = "");
yyy <- gsub("-"," - ",yyy);
yyy <- gsub("- "," - ",yyy);
yyy <- gsub(" - "," - ",yyy);
yyy <- gsub(";","",yyy);
yyy <- gsub(","," ,",yyy);
yyy <- gsub("\n","",yyy);
yyy <- gsub("\r","",yyy);
yyy <- gsub("\t","",yyy);
xxx <- tolower(strsplit(yyy," ")[[1]]);
xxx <- xxx[xxx!=""];
if (!is.na(match("outgroup",tolower(xxx)))) {
ttl_ln <- length(xxx);
jj <- 1+match("outgroup",tolower(xxx));
while (xxx[jj]==":" || xxx[jj]=="=") jj <- jj+1;
outgroup <- c();
while (xxx[jj]!="," && jj<=ttl_ln) {
if (xxx[jj]=="-") {
jj <- jj+1;
outgroup <- c(outgroup,((as.numeric(outgroup[length(outgroup)])+1):as.numeric(xxx[jj])));
} else {
outgroup <- c(outgroup,xxx[jj]);
}
jj <- jj+1;
}
look_for_outgroup <- FALSE;
} else {
if (tolower(nexus[tx_pt])=="end;" || tolower(nexus[tx_pt])=="\tend;")
look_for_outgroup <- FALSE;
}
}
# look for characters to exclude
tx_pt <- match("BEGIN SETS;",nexus);
xxx <- strsplit(paste(nexusfile[tx_pt-1,],collapse = "")," ");
while(tolower(xxx[1])!="end") {
tx_pt <- tx_pt+1;
yyy <- paste(nexusfile[tx_pt,], collapse = "");
yyy <- gsub("- "," - ",yyy);
yyy <- gsub(";","",yyy);
yyy <- gsub(","," ,",yyy);
yyy <- gsub("\n","",yyy);
yyy <- gsub("\r","",yyy);
yyy <- gsub("\t","",yyy);
xxx <- tolower(strsplit(yyy," ")[[1]]);
xxx <- xxx[xxx!=""];
if (length(xxx)==0 || is.na(xxx[1])) xxx <- "";
# if (!is.na(xxx) && !is.null(xxx) && xxx!="") {
if (xxx[1]=="charpartition") {
if (xxx[1]=="charpartition" && !is.na(match("exclude",tolower(xxx)))) {
ttl_ln <- length(xxx);
jj <- 1+match("exclude",tolower(xxx));
while (xxx[jj]==":") jj <- jj+1;
exclude <- c();
while (xxx[jj]!="," && jj<ttl_ln) {
if (xxx[jj]=="-") {
jj <- jj+1;
exclude <- c(exclude,((as.numeric(exclude[length(exclude)])+1):as.numeric(xxx[jj])));
} else {
exclude <- c(exclude,as.numeric(xxx[jj]));
}
jj <- jj+1;
}
}
}
# xxx[1];
# tx_pt;
}
}
if (rate_partitions!="") {
ln <- match("BEGIN SETS;",nexus);
got_splits <- F;
while (!got_splits) {
ln <- ln+1;
breakup_this_line <- strsplit(nexus[ln],split=" ")[[1]];
if (!is.na(match(rate_partitions,breakup_this_line))) {
nexus[ln] <- gsub("-"," - ",nexus[ln]); # Mesquite often puts dashes immediately after character or taxon numbers.....
nexus[ln] <- gsub(" -"," -",nexus[ln]);
nexus[ln] <- gsub("- ","- ",nexus[ln]);
breakup_this_line <- strsplit(nexus[ln],split=" ")[[1]];
breakup_this_line <- gsub(",","",breakup_this_line);
breakup_this_line <- gsub(";","",breakup_this_line);
breakup_this_line <- breakup_this_line[breakup_this_line!=""];
breakup_this_line <- breakup_this_line[match(rate_partitions,breakup_this_line):length(breakup_this_line)];
kk <- (1:length(breakup_this_line))[breakup_this_line %in% ":"];
partition_names <- breakup_this_line[kk-1];
kk <- c(kk,length(breakup_this_line)+1); # add last numberso that we can end the partion search easily below
character_rate_partitions <- rep("",nchars);
for (pn in 1:length(partition_names)) {
ll <- kk[pn]+1;
this_part <- as.numeric(breakup_this_line[ll]);
ll <- ll+1;
# while (ll<(kk[pn+1]-1)) {
if (pn < length(partition_names)) {
break_cell <- kk[pn+1]-1;
} else {
break_cell <- kk[pn+1];
}
while (ll<break_cell) {
if (breakup_this_line[ll]=="-") {
ll <- ll+1;
this_part <- c(this_part,as.numeric(breakup_this_line[ll-2]:as.numeric(breakup_this_line[ll])));
} else {
this_part <- c(this_part,as.numeric(breakup_this_line[ll]));
}
ll <- ll+1;
}
character_rate_partitions[this_part] <- partition_names[pn];
}
got_splits<- T;
}
}
} else character_rate_partitions <- rep("imagine",nchars);
if (trend_partitions!="") {
ln <- match("BEGIN SETS;",nexus);
got_splits <- F;
while (!got_splits) {
ln <- ln+1;
breakup_this_line <- strsplit(nexus[ln],split=" ")[[1]];
if (!is.na(match(trend_partitions,breakup_this_line))) {
nexus[ln] <- gsub("-"," - ",nexus[ln]); # Mesquite often puts dashes immediately after character or taxon numbers.....
nexus[ln] <- gsub(" -"," -",nexus[ln]);
nexus[ln] <- gsub("- ","- ",nexus[ln]);
breakup_this_line <- strsplit(nexus[ln],split=" ")[[1]];
breakup_this_line <- gsub(",","",breakup_this_line);
breakup_this_line <- gsub(";","",breakup_this_line);
breakup_this_line <- breakup_this_line[breakup_this_line!=""];
breakup_this_line <- breakup_this_line[match(trend_partitions,breakup_this_line):length(breakup_this_line)];
kk <- (1:length(breakup_this_line))[breakup_this_line %in% ":"];
partition_names <- breakup_this_line[kk-1];
kk <- c(kk,length(breakup_this_line)+1); # add last numberso that we can end the partion search easily below
character_trend_partitions <- rep("",nchars);
for (pn in 1:length(partition_names)) {
ll <- kk[pn]+1;
this_part <- as.numeric(breakup_this_line[ll]);
ll <- ll+1;
# while (ll<(kk[pn+1]-1)) {
if (pn < length(partition_names)) {
break_cell <- kk[pn+1]-1;
} else {
break_cell <- kk[pn+1];
}
while (ll<break_cell) {
if (breakup_this_line[ll]=="-") {
ll <- ll+1;
this_part <- c(this_part,as.numeric(breakup_this_line[ll-2]:as.numeric(breakup_this_line[ll])));
} else {
this_part <- c(this_part,as.numeric(breakup_this_line[ll]));
}
ll <- ll+1;
}
character_trend_partitions[this_part] <- partition_names[pn];
}
got_splits<- T;
}
}
} else character_trend_partitions <- rep("square",nchars);
state_orders <- rep("unordered",nchars);
if (!is.na(match("BEGIN ASSUMPTIONS;",nexus))) {
tx_pt <- 1+match("BEGIN ASSUMPTIONS;",nexus); # look at taxon partitions
while (tolower(nexus[tx_pt])!="end;") {
# yyy <- paste(nexusfile[tx_pt,], collapse = "");
yyy <- gsub("- "," - ",nexus[tx_pt]);
yyy <- gsub(";","",yyy);
yyy <- gsub(","," ,",yyy);
yyy <- gsub("\n","",yyy);
yyy <- gsub("\r","",yyy);
yyy <- gsub("\t","",yyy);
xxx <- tolower(strsplit(yyy," ")[[1]]);
xxx <- xxx[xxx!=""];
if (!is.na(match("ord:",tolower(xxx)))) {
ttl_ln <- length(xxx);
jj <- 1+match("ord:",xxx);
while (xxx[jj]==":") jj <- jj+1;
ordered <- c();
while (xxx[jj]!="," && jj<=ttl_ln) {
if (xxx[jj]=="-") {
jj <- jj+1;
ordered <- c(ordered,((as.numeric(ordered[length(ordered)])+1):as.numeric(xxx[jj])));
} else {
ordered <- c(ordered,as.numeric(xxx[jj]));
}
jj <- jj+1;
}
state_orders[ordered] <- "ordered";
}
tx_pt <- 1+tx_pt;
}
}
mxln <- length(nexusfile[top,]);
s <- top;
# te all of the taxon names
for (tx in 1:notu) {
# first, read taxon name
#### look for quotations###
s <- top+tx-1;
endline <- match("\n",nexusfile[s,]);
if (is.na(endline)) endline <- length(nexusfile[s,]);
if (nexusfile[s,1]=="'" || nexusfile[s,2]=="'") {
jj <- ((1:length(nexusfile[s,]))[nexusfile[s,] %in% "'"]);
i <- max((1:length(nexusfile[s,]))[nexusfile[s,] %in% "'"])
taxa[tx] <- pracma::strcat(nexusfile[s,(jj[1]+1):(jj[2]-1)])
i <- i+1
while (nexusfile[s,i]==" " && i<ncol(nexusfile)) i <- i+1
} else {
i <- 1;
if (nexusfile[s,1]!="\"") {
while (nexusfile[s,i]=="\t") i <- i+1;
taxa[tx] <- nexusfile[s,i]
i <- i+1
while (nexusfile[s,i]!=" " && nexusfile[s,i]!='\t' && i<ncol(nexusfile)) {
if (nexusfile[s,i]!="_") {
taxa[tx] <- paste0(taxa[tx],as.character(nexusfile[s,i]))
} else {
taxa[tx] <- paste0(taxa[tx]," ")
}
i <- i+1
}
} else {
taxa[tx] <- nexusfile[s,2];
i <- 3;
# while (nexusfile[s,i]!=" " && nexusfile[s,i+1]!=" " && i<ncol(nexusfile)) {
while (nexusfile[s,i]!=" " && i<ncol(nexusfile)) {
if (as.character(nexusfile[s,i])!="\"")
taxa[tx] <- paste0(taxa[tx],as.character(nexusfile[s,i]))
i <- i+1;
#print(taxa[tx]);
}
}
# now, get to characters
i <- (i:endline)[!nexusfile[s,i:endline] %in% c(" ","\t")][1];
# while ((nexusfile[s,i]==" " || nexusfile[s,i]=="\t") && i<ncol(nexusfile))
# i <- i+1
}
k <- i;
if ((endline-k)==(nchars+extra)) {
# true if there are no polymorphic characters for the taxon
dummy <- nexusfile[s,k:(endline-1)];
dummy[dummy==missing] <- UNKNOWN;
dummy[dummy==gap] <- INAP;
letterstate <- dummy[!dummy %in% c(UNKNOWN,INAP)];
dummy[!dummy %in% c(UNKNOWN,INAP)] <- sapply(letterstate,switch_letter_state_to_numeric,all_states);
chmatrix[tx,] <- as.numeric(dummy[1:nchars]);
if (strat>0) {
strat_ranges[tx,1] <- strat_ranges[tx,2] <- as.numeric(dummy[strat])
if (range>0) strat_ranges[tx,2] <- as.numeric(dummy[range])
}
if (geog>0) geography[tx]=as.numeric(nexusfile[geog,i])
for (c in 1:nchars) {
if ((chmatrix[tx,c]+1)>nstates[c]) nstates[c] <- chmatrix[tx,c]+1
}
} else {
# for (c in 1:(nchars+extra)) {
c <- 0;
while (c < (nchars+extra)) {
c <- c+1;
#print(c);
if (c<=nchars) {
if (nexusfile[s,i]=="(" || nexusfile[s,i]=="{") {
if (polymorphs==TRUE || polymorphs==1) {
# added 2020-11-28: sometimes polymorphics come in out-of-order
riteparens <- (i:endline)[nexusfile[s,i:endline] %in% c(")","}")];
ddd <- (i+1):(riteparens[1]-1);
polysites <- ddd[!nexusfile[s,ddd] %in% c(",","&"," ")]
polystates <- nexusfile[s,polysites];
for (ps in 1:length(polystates))
if (!polystates[ps] %in% 0:9)
polystates[ps] <- switch_letter_state_to_numeric(polystates[ps],all_states=all_states);
nexusfile[s,polysites] <- sort(as.numeric(polystates));
# nexusfile[s,(i+1):(riteparens[1]-1)] <- sort(nexusfile[s,(i+1):(riteparens[1]-1)]);
i <- i+1;
w <- as.numeric(nexusfile[s,i])
chmatrix[tx,c] <- -1*as.numeric(nexusfile[s,i])
if ((1+w)>nstates[c]) nstates[c] <- 1+w;
i <- i+1
j <- 1
while (nexusfile[s,i]!=")" && nexusfile[s,i]!="}" && i<ncol(nexusfile)) {
if (nexusfile[s,i]!="," && nexusfile[s,i]!=" ") {
w <- as.numeric(nexusfile[s,i])
if ((w+1)>nstates[c]) nstates[c] <- w+1
chmatrix[tx,c] <- chmatrix[tx,c]-((10^j)*w)
i <- i+1
j <- j+1
} else {
i <- i+1
}
}
} else {
chmatrix[tx,c] <- UNKNOWN;
while (nexusfile[s,i]!=')' && nexusfile[s,i]!="}") i <- i+1;
}
} else if (nexusfile[s,i]==missing) {
chmatrix[tx,c] <- UNKNOWN;
} else if (nexusfile[s,i]==gap) {
chmatrix[tx,c] <- INAP;
} else if (nexusfile[s,i]>="A" && nexusfile[s,i]<="Z") {
chmatrix[tx,c] <- switch_letter_state_to_numeric(nexusfile[s,i],all_states=all_states);
} else if (nexusfile[s,i]>="0" && nexusfile[s,i]<="9") {
chmatrix[tx,c] <- as.numeric(nexusfile[s,i]);
}
if ((chmatrix[tx,c]+1)>nstates[c]) nstates[c] <- chmatrix[tx,c]+1;
if (i < (endline-1)) i <- i+1;
} else {
if (c==strat) {
if (nexusfile[s,i]>="0" && nexusfile[s,i]<='9') {
strat_ranges[tx,1]=as.numeric(nexusfile[s,i])
} else if (nexusfile[s,i]>="A" && nexusfile[s,i]<="Z") {
strat_ranges[tx,1]=switch_letter_state_to_numeric(nexusfile[s,i],all_states = all_states);
}
if (range==0) strat_ranges[tx,2] <- strat_ranges[tx,1]
i <- i+1
} else if (c==range) {
if (nexusfile[s,i]>="0" && nexusfile[s,i]<='9') {
strat_ranges[tx,2]=as.numeric(nexusfile[s,i])
} else if (nexusfile[s,i]>="A" && nexusfile[s,i]<="Z") {
strat_ranges[tx,2]=switch_letter_state_to_numeric(nexusfile[s,i],all_states = all_states);
}
i <- i+1
} else if (c==geog) {
if (nexusfile[s,i]>="0" && nexusfile[s,i]<='9') {
geography[tx]=as.numeric(nexusfile[s,i])
} else if (nexusfile[s,i]>="A" && nexusfile[s,i]<="Z") {
geography[tx]=switch_letter_state_to_numeric(nexusfile[s,i],all_states = all_states);
}
}
} # end non-morphological data
# print(nexusfile[s,k:83]);
# print(chmatrix[tx,])
if (nexusfile[s,i+1]=="\n" || i==(mxln-1)) c <- nchars+extra;
}
}
# chmatrix[tx,];
# tx <- tx+1;
# s <- s+1
}
#x <- list(taxa,chmatrix,strat_ranges,geography)
#return (list(taxa,chmatrix,strat_ranges,geography))
chmatrix <- mundify_character_matrix(chmatrix,minst=0,UNKNOWN,INAP); # clean up coding
nstates <- count_states(chmatrix,UNKNOWN,INAP);
tree_found <- 0;
while (s<length(nexus) && tree_found==0) {
while (nexus[s]!= "BEGIN TREES; " && s<length(nexus)) s <- s+1;
if (s<length(nexus)) {
while (tree_found==0 && s<length(nexus)) {
s <- s+1
jj <- strsplit(nexus[s],split=c("\t"," "),fixed=TRUE)[[1]];
jj <- paste(jj,collapse="")
jj <- strsplit(jj,split=" ",fixed=TRUE)[[1]];
if (sum(jj=="TREE")>0 || sum(jj=="tree")>0) tree_found <- 1;
}
# s <- s+notu;
# while (jj[i]=="") jj[i] <- NULL;
# while (j[1]=="\t") j <- j[2:length(j)];
# if (j[1]=="T" && j[2]=="R" && j[3]=="E") {
# while (j!="(") j <- j[2:length(j)];
# }
newick_string <- jj[length(jj)];
newick_string <- fix_newick_ancestors(jj[length(jj)])
tree <- read_newick_string(newick_string);
tree_found <- 1
s <- length(nexus);
}
}
row.names(chmatrix) <- taxa;
unscored_taxa <- c();
for (n in 1:notu) {
if (sum(chmatrix[n,]==UNKNOWN)==nchars)
unscored_taxa <- c(unscored_taxa,n);
}
if (nchars<10) {
colnames(chmatrix) <- 1:nchars;
} else if (nchars<100) {
colnames(chmatrix) <- c(paste(0,(1:9),sep=""),10:nchars);
} else if (nchars<1000) {
colnames(chmatrix) <- c(paste(00,(1:9),sep=""),paste(0,(10:99),sep=""),100:nchars);
}
if (exclude[1]!=-1) {
keepers <- (1:nchars)[!(1:nchars) %in% exclude];
chmatrix <- chmatrix[,keepers];
nstates <- nstates[keepers];
state_orders <- state_orders[keepers];
character_rate_partitions <- character_rate_partitions[keepers];
}
if (strat!=0 && geog!=0 && tree_found==1) {
output <- list(taxa,chmatrix,as.numeric(nstates),state_orders,strat_ranges,geography,tree,as.numeric(outgroup),unscored_taxa,character_rate_partitions,character_trend_partitions);
names(output) <- c("OTUs","Matrix","States","State_Types","Stratigraphic_Ranges","Geography","Tree","Outgroup","Unscored_Taxa","Rate_Partitions","Trend_Partitions");
} else if (strat!=0) {
if (geog!=0) {
output <- list(taxa,chmatrix,as.numeric(nstates),state_orders,strat_ranges,geography,as.numeric(outgroup),unscored_taxa,character_rate_partitions,character_trend_partitions);
names(output) <- c("OTUs","Matrix","States","State_Types","Stratigraphic_Ranges","Geography","Outgroup","Unscored_Taxa","Rate_Partitions","Trend_Partitions");
} else if (tree_found!=0) {
output <- list(taxa,chmatrix,as.numeric(nstates),state_orders,strat_ranges,tree,as.numeric(outgroup),unscored_taxa,character_rate_partitions,character_trend_partitions);
names(output) <- c("OTUs","Matrix","States","State_Types","Stratigraphic_Ranges","Tree","Outgroup","Unscored_Taxa","Rate_Partitions","Trend_Partitions");
} else {
output <- list(taxa,chmatrix,as.numeric(nstates),state_orders,strat_ranges,as.numeric(outgroup),unscored_taxa,character_rate_partitions,character_trend_partitions);
names(output) <- c("OTUs","Matrix","States","State_Types","Stratigraphic_Ranges","Outgroup","Unscored_Taxa","Rate_Partitions","Trend_Partitions");
}
} else if (geog!=0) {
if (tree_found!=0) {
output <- list(taxa,chmatrix,as.numeric(nstates),state_orders,geography,tree,as.numeric(outgroup),unscored_taxa,character_rate_partitions,character_trend_partitions);
names(output) <- c("OTUs","Matrix","States","State_Types","Geography","Tree","Outgroup","Unscored_Taxa","Rate_Partitions","Trend_Partitions");
} else {
output <- list(taxa,chmatrix,as.numeric(nstates),state_orders,geography,as.numeric(outgroup),unscored_taxa,character_rate_partitions,character_trend_partitions);
names(output) <- c("OTUs","Matrix","States","State_Types","Geography","Outgroup","Unscored_Taxa","Rate_Partitions","Trend_Partitions");
}
} else if (tree_found!=0) {
output <- list(taxa,chmatrix,as.numeric(nstates),state_orders,tree,as.numeric(outgroup),unscored_taxa,character_rate_partitions,character_trend_partitions);
names(output) <- c("OTUs","Matrix","States","State_Types","Tree","Outgroup","Unscored_Taxa","Rate_Partitions","Trend_Partitions");
} else {
output <- list(taxa,chmatrix,as.numeric(nstates),state_orders,as.numeric(outgroup),unscored_taxa,character_rate_partitions,character_trend_partitions);
names(output) <- c("OTUs","Matrix","States","State_Types","Outgroup","Unscored_Taxa","Rate_Partitions","Trend_Partitions");
}
return(output)
}
#nexus_name <- "Anopliidae_Hanger_&_Strong_2000.nex";
accersi_matrix_location_from_RData <- function(matrix_name,character_database) {
glossary <- character_database$Glossary;
nexus_names <- pbapply::pbsapply(glossary,accersi_nexus_name_in_RData);
return(match(matrix_name,nexus_names));
}
accersi_matrix_data_from_RData <- function(matrix_name,character_database) {
nn <- accersi_matrix_location_from_RData(matrix_name,character_database);
pathway <- character_database$Glossary[nn];
pathways <- strsplit(pathway,"\\$")[[1]];
pp <- match(pathways[1],names(character_database));
cl <- match(pathways[2],names(character_database[[pp]]));
if (pathways[2]==matrix_name) {
nexus <- unlist(character_database[[pp]][cl]);
return(nexus);
} else {
od <- match(pathways[3],names(character_database[[pp]][[cl]]));
if (pathways[3]==matrix_name) {
nexus <- unlist(character_database[[pp]][[cl]][od]);
return(nexus);
} else {
sf <- match(pathways[3],names(character_database[[pp]][[cl]][[od]]));
nexus <- unlist(character_database[[pp]][[cl]][[od]][sf]);
return(nexus);
}
}
}
accersi_nexus_name_in_RData <- function(glossary) {
return(strsplit(glossary,"\\$")[[1]][length(strsplit(glossary,"\\$")[[1]])])
}
accersi_data_from_RData <- function(matrix_name, character_database, polymorphs=T, UNKNOWN=-11, INAP=-22, rate_partitions="", trend_partitions="") {
# nexus_file_name: name of nexus file (e.g., "Phacopidae_Snow_2000.nex")
# polymorphs: boolean, if TRUE, then recode "1,2" as "-21"; otherwise, treat as unknown
# UNKNOWN: value substituting for "?"
# INAP: value substituting for gap ("-")
# rate_partitions: nameof CHARPARTITION that you want to use for dividing characters into general rate classes.
nexus <- accersi_matrix_data_from_RData(matrix_name=matrix_name,character_database);
output <- accersi_data_from_nexus_vector(nexus=as.character(nexus), polymorphs=polymorphs, UNKNOWN=UNKNOWN, INAP=INAP, rate_partitions=rate_partitions, trend_partitions=trend_partitions);
return(output);
}
#### DEAL WITH TRICKY CHARACTERS ####
switch_letter_state_to_numeric <- function(state,all_states=c(0:9,LETTERS[!LETTERS %in% c("I","O")])) {
# 2017-10-09: now will pass numeric characters through unchanged
# 2019-01-25: simplified greatly!
# 2020-12-01: allows for i's and o's, but assumes that they are not there!
# -1 is for 0 to be zero
return(match(state,all_states)-1);
}
switch_letter_state_to_numeric_old <- function(state) {
# 2017-10-09: now will pass numeric characters through unchanged
# 2019-01-25: simplified greatly!
if (state > 9) {
state <- toupper(state)
poss_letter_states <- toupper(letters[!letters %in% c("i","o")]);
return(9+match(state,poss_letter_states));
} else {
return(state);
}
}
switch_numeric_state_to_letter <- function(state) {
# 2017-10-09: now will pass numeric characters through unchanged
# 2019-01-25: simplified greatly!
if (state > 9) {
# state <- toupper(state)
poss_letter_states <- toupper(letters[!letters %in% c("i","o")]);
return(poss_letter_states[state-9]);
} else {
return(state);
}
}
unravel_polymorph_badass <- function(poly,minst=0) {
combo <- -1*poly;
state_test <- as.numeric(strsplit(x=as.character(combo),split="")[[1]])
if (state_test==sort(state_test,decreasing = T) && length(unique(state_test))==length(state_test)) {
sts <- 1+floor(log10(abs(combo)))
polymorphics <- vector(length=sts)
base <- 10^(sts-1)
for (s in 1:sts) {
polymorphics[s] <- floor(abs(combo)/base)
combo <- combo%%base
base <- base/10
}
} else {
breakpt <- match(max(state_test),state_test);
if (breakpt > 2) {
i <- 1;
while (i < breakpt) {
j <- i+1;
state_test[i] <- (10*state_test[i])+state_test[j];
state_test[j] <- -1;
# print(state_test);
i <- j+1;
}
polymorphics <- state_test[state_test>=minst];
} else if (sum(state_test<minst)>0) {
# this should happen
i <- 1;
while (i < length(state_test)) {
j <- i+1;
state_test[i] <- (10*state_test[i])+state_test[j];
state_test[j] <- -1;
# print(state_test);
i <- j+1;
}
polymorphics <- state_test[state_test>=minst];
} else if ((length(state_test) %% 2)==0) {
i <- 1;
while (i < length(state_test)) {
j <- i+1;
state_test[i] <- (10*state_test[i])+state_test[j];
state_test[j] <- -1;
# print(state_test);
i <- j+1;
}
polymorphics <- state_test[state_test>=minst];
} else {
i <- 1;
while (i < length(state_test)) {
j <- i+1;
state_test[i] <- (10*state_test[i])+state_test[j];
state_test[j] <- -1;
# print(state_test);
i <- j+1;
}
polymorphics <- state_test[state_test>=minst];
}
}
return (polymorphics);
}
unravel_polymorph <- function(poly) {
combo <- -1*poly
sts <- 1+floor(log10(abs(combo)))
polymorphics <- vector(length=sts)
base <- 10^(sts-1)
for (s in 1:sts) {
polymorphics[s] <- floor(abs(combo)/base)
combo <- combo%%base
base <- base/10
}
return (polymorphics)
}
ravel_polymorph <- function(polystates) {
polystates <- sort(polystates,decreasing = TRUE);
polym <- polystates[1];
for (st in 2:length(polystates)) polym <- (10*polym)+polystates[st]
return(-1*polym)
}
ravel_polymorph_for_file <- function(polystates) {
polystates <- sort(polystates,decreasing = FALSE);
return(paste("(",paste(polystates,collapse=""),")",sep=""));
}
#### SUMMARIZE CHARACTER DATA ####
# count taxa scored with something other than missing or inapplicable
count_scored_characters_per_otu <- function(chmatrix,UNKNOWN=-11,INAP=-22) {
nch <- ncol(chmatrix);
notu <- nrow(chmatrix);
#scored <- vector(length=nch)
scored <- c();
for (s in 1:notu)
scored <- c(scored,notu - (sum(chmatrix[s,]==UNKNOWN)+sum(chmatrix[s,]==INAP)));
return(scored);
}
# count missing and/or inapplicable per otu
count_scored_otus_per_character <- function(chmatrix,UNKNOWN=-11,INAP=-22) {
if (is.matrix(chmatrix)) {
nchars <- ncol(chmatrix);
notu <- nrow(chmatrix);
} else {
nchars <- 1;
notu <- length(chmatrix);
dummy <- array(0,dim=c(length(chmatrix),1));
dummy[,1] <- chmatrix;
chmatrix <- dummy;
}
#scored <- vector(length=nch)
scored <- c();
for (c in 1:nchars)
scored <- c(scored,notu - (sum(chmatrix[,c]==UNKNOWN)+sum(chmatrix[,c]==INAP)));
return(scored);
}
# count missing and/or inapplicable per otu
count_scored_otus_per_character_state <- function(chmatrix,chstates,UNKNOWN=-11,INAP=-22) {
nch <- ncol(chmatrix);
#notu <- nrow(chmatrix);
#scored <- vector(length=nch)
scored <- array(0,dim=c(nch,max(chstates)))
for (c in 1:nch) {
for (st in 1:chstates[c]) {
stt <- st-1
scored[c,st] <- sum(chmatrix[,c]==stt)
}
}
return(scored);
}
# count characters with autapomorphic taxa
count_autapomorphic_characters <- function(chmatrix,chstates,UNKNOWN=-11,INAP=-22) {
# count number of characters with an autapomorphic state
nchars <- ncol(chmatrix);
notus_per_chstate <- count_scored_otus_per_character_state(chmatrix,chstates);
autaps <- 0
for (c in 1:nchars) {
if(sum(notus_per_chstate[c,1:chstates[c]]==1)>0) {
autaps <- autaps+1;
}
}
return(autaps);
}
# count states coding only one taxon
count_autapomorphic_states <- function(chmatrix,chstates,UNKNOWN=-11,INAP=-22) {
# count number of states that are autapomorphic
nchars <- ncol(chmatrix);
notus_per_chstate <- count_scored_otus_per_character_state(chmatrix,chstates);
autaps <- 0
for (ch in 1:nchars)
autaps <- autaps+sum(notus_per_chstate[ch,1:chstates[ch]]==1);
return(autaps);
}
# routine to list all characters with at least one autapomorphic state
list_autapomorphic_characters <- function(chmatrix,chstates,UNKNOWN=-11,INAP=-22) {
nchars <- ncol(chmatrix);
#scored <- vector(length=nch)
notus_per_chstate <- count_scored_otus_per_character_state(chmatrix,chstates);
autaps <- c()
for (ch in 1:nchars) {
if(sum(notus_per_chstate[ch,1:chstates[ch]]==1)>0) {
autaps <- c(autaps,ch);
}
}
return(autaps);
}
# routine to list all character states that are autapomorphic
list_autapomorphic_states <- function(chmatrix,chstates,UNKNOWN=-11,INAP=-22) {
nchars <- ncol(chmatrix);
#scored <- vector(length=nch)
notus_per_chstate <- count_scored_otus_per_character_state(chmatrix,chstates);
autaps <- c()
for (c in 1:nchars) {
if(sum(notus_per_chstate[c,1:chstates[c]]==1)>0) {
autaps <- rbind(autaps,c(c,chstates[c]));
}
}
return(autaps);
}
# count the number of scorings that are polymorphic. There can be 1 per character per taxon
count_polymorphic_scorings <- function(chmatrix,UNKNOWN=-11,INAP=-22) {
# count number of characters with an autapomorphic state
if (!is.matrix(chmatrix)) {
chmatrix <- data.frame(ch=chmatrix);
}
nchars <- ncol(chmatrix);
polymorphs <- vector(length=nchars);
for (ch in 1:nchars) {
char_states <- chmatrix[,ch];
char_states <- char_states[char_states!=UNKNOWN];
char_states <- char_states[char_states!=INAP];
polys <- char_states[char_states<0];
polymorphs[ch] <- length(polys);
}
return(polymorphs);
}
# count the number of characters with a polymorphic scoring
count_polymorphic_characters <- function(chmatrix,UNKNOWN=-11,INAP=-22) {
# count number of characters with an autapomorphic state
if (!is.matrix(chmatrix)) {
chmatrix <- data.frame(ch=chmatrix);
}
nchars <- ncol(chmatrix);
polymorphs <- count_polymorphic_scorings(chmatrix,UNKNOWN,INAP);
return(sum(polymorphs>0));
}
# count the number of states per character that show polymorphism
count_polymorphic_states <- function(chmatrix,UNKNOWN=-11,INAP=-22) {
# count number of characters with an autapomorphic state
if (!is.matrix(chmatrix))
chmatrix <- data.frame(ch=chmatrix);
nchars <- ncol(chmatrix);
polymorphic_states <- vector(length=nchars);
for (ch in 1:nchars) {
char_states <- sort(unique(chmatrix[,ch]));
char_states <- char_states[char_states!=UNKNOWN];
char_states <- char_states[char_states!=INAP];
polys <- char_states[char_states<0];
poly_states <- c();
pp <- 0;
while (pp < length(polys)) {
pp <- pp+1;
poly_states <- unique(c(poly_states,unravel_polymorph_badass(polys[pp])));
}
polymorphic_states[ch] <- length(polys);
}
return(polymorphic_states);
}
# get number of states for each character
count_states <- function(chmatrix,UNKNOWN=-11,INAP=-22,include_polymorphs=T) {
if (!is.matrix(chmatrix)) {
chmatrix <- data.frame(ch=chmatrix);
}
nchars <- ncol(chmatrix);
nstates <- c();
for (ch in 1:nchars) {
char_states <- sort(unique(chmatrix[,ch]));
char_states <- char_states[char_states!=UNKNOWN];
char_states <- char_states[char_states!=INAP];
unique_states <- char_states[char_states>=0]
if (sum(char_states<0)>0 && include_polymorphs) {
polys <- char_states[char_states<0];
polystates <- c();
for (pp in polys) {
polystates <- c(polystates,unravel_polymorph(poly=pp));
}
unique_states <- sort(unique(c(unique_states,polystates)));
}
nstates <- c(nstates,length(unique_states));
} # pick up here!!!
return(nstates);
}
# get number of states for each character
list_states <- function(chmatrix,UNKNOWN=-11,INAP=-22,include_polymorphs=T) {
if (!is.matrix(chmatrix)) {
chmatrix <- data.frame(ch=chmatrix);
}
nchars <- ncol(chmatrix);
state_list <- list();
for (ch in 1:nchars) {
char_states <- sort(unique(chmatrix[,ch]));
char_states <- char_states[char_states!=UNKNOWN];
char_states <- char_states[char_states!=INAP];
unique_states <- char_states[char_states>=0]
if (sum(char_states<0)>0 && include_polymorphs) {
polys <- char_states[char_states<0];
polystates <- c();
for (pp in polys) {
polystates <- c(polystates,unravel_polymorph(poly=pp));
}
unique_states <- sort(unique(c(unique_states,polystates)));
}
state_list <- rlist::list.append(state_list,sort(unique_states));
} # pick up here!!!
return(state_list);
}
count_states_old <- function(chmatrix,UNKNOWN=-11,INAP=-22) {
# 2020-09-01: fixed breakdown of polymorphics
nchars <- ncol(chmatrix);
nstates <- c();
for (ch in 1:nchars) {
char_states <- sort(unique(chmatrix[,ch]));
char_states <- char_states[char_states!=UNKNOWN];
char_states <- char_states[char_states!=INAP];
if (sum(char_states<0)>0) {
while (char_states[1]<0) {
char_states <- sort(unique(c(char_states[2:length(char_states)],unravel_polymorph(char_states[1]))));
}
}
nstates <- c(nstates,length(char_states));
}
return(nstates);
}
maximum_state <- function(chmatrix,UNKNOWN=-11,INAP=-22) {
# 2020-09-01: fixed breakdown of polymorphics
nchars <- ncol(chmatrix);
maxstates <- c();
for (ch in 1:nchars) {
char_states <- sort(unique(chmatrix[,ch]));
char_states <- char_states[char_states!=UNKNOWN];
char_states <- char_states[char_states!=INAP];
if (length(char_states)==0) {
char_states <- 0;
} else if (sum(char_states<0)>0) {
while (char_states[1]<0) {
char_states <- sort(unique(c(char_states[2:length(char_states)],unravel_polymorph(char_states[1]))));
}
}
maxstates <- c(maxstates,max(char_states));
}
return(maxstates);
}
minimum_state <- function(chmatrix,UNKNOWN=-11,INAP=-22) {
# 2020-09-01: fixed breakdown of polymorphics
nchars <- ncol(chmatrix);
minstates <- c();
for (ch in 1:nchars) {
char_states <- sort(unique(chmatrix[,ch]));
char_states <- char_states[char_states!=UNKNOWN];
char_states <- char_states[char_states!=INAP];
if (length(char_states)==0) {
char_states <- 0;
} else if (sum(char_states<0)>0) {
while (char_states[1]<0) {
char_states <- sort(unique(c(char_states[2:length(char_states)],unravel_polymorph(char_states[1]))));
}
}
minstates <- c(minstates,min(char_states));
}
return(minstates);
}
#### FIND & RECODE DEPENDENT - INDEPENDENT SETS ####
# modified 2022-02-05
find_independent_character <- function(dchar,independents,chmatrix,UNKNOWN,INAP) {
dstates <- sum(unique(chmatrix[,dchar])>=0)
pre_indies <- independents[independents < dchar];
pi <- p_inds <- length(pre_indies);
ind_char <- 0;
for (pi in p_inds:1) {
ic <- pre_indies[pi];
unique_combos <- unique(chmatrix[,c(ic,dchar)]);
unique_combos_2 <- unique_combos[!unique_combos[,1] %in% c(INAP,UNKNOWN),];
# exclude -- or ?1 pairings
u_c <- nrow(unique_combos);
u_c2 <- nrow(unique_combos_2);
if (is.null(u_c2)) if (length(unique_combos_2)>0) {u_c2 <- 1;} else {u_c2 <- 0;}
missing_match <- (1:u_c)[unique_combos[,1] %in% UNKNOWN][!(1:u_c)[unique_combos[,1] %in% UNKNOWN] %in% (1:u_c)[unique_combos[,2] %in% UNKNOWN]];
inap_match <- (1:u_c)[unique_combos[,1] %in% INAP][!(1:u_c)[unique_combos[,1] %in% INAP] %in% (1:u_c)[unique_combos[,2] %in% c(UNKNOWN,INAP)]]
# sum((unique_combos[,1] %in% INAP) * (unique_combos[,2] %in% INAP));
# double_inap <- double_inap + sum((unique_combos[,1] %in% UNKNOWN) * (!unique_combos[,2] %in% c(UNKNOWN,INAP)));
if (u_c2>=dstates && (length(missing_match)>0 || length(inap_match)>0)) {
scored_matches <- unique(unique_combos_2[,1])[!unique(unique_combos_2[,1]) %in% unique(unique_combos_2[!unique_combos_2[,2] %in% c(INAP,UNKNOWN),1])];
if (length(scored_matches)==1 && sum(scored_matches >= 0)) {
ind_char <- ic;
pi <- 1;
return(ind_char);
}
} else if (u_c2>1) {
unique_combos <- unique_combos_2;
if (!is.matrix(unique_combos)) unique_combos <- array(unique_combos,dim=c(1,2));
unique_combos <- unique_combos[!unique_combos[,2] %in% UNKNOWN,];
if (!is.matrix(unique_combos)) unique_combos <- array(unique_combos,dim=c(1,2));
poly_combos <- subset(unique_combos,unique_combos[,1]<0);
unique_combos <- unique_combos[unique_combos[,1] >= 0,];
unique_combos_ap <- unique_combos[!unique_combos[,2] %in% INAP,];
unique_combos_inap <- unique_combos[unique_combos[,2] %in% INAP,];
if (!is.matrix(unique_combos_ap)) {
if (length(unique_combos_ap)==2) {
unique_combos_ap <- array(unique_combos_ap,dim=c(1,2));
} else {
unique_combos_ap <- cbind(0:5,0:5);
}
}
if (!is.matrix(unique_combos_inap)) {
if (length(unique_combos_inap)==2) {
unique_combos_inap <- array(unique_combos_inap,dim=c(1,2));
} else {
unique_combos_inap <- cbind(0:5,0:5);
}
}
if (nrow(unique_combos_ap)>0 && nrow(unique_combos_inap)>0) {
if (sum(unique_combos_ap[,1] %in% unique_combos_inap[,1])==0) {
ind_char <- ic;
pi <- 1;
return(ind_char);
}
} # end case where ic might be the "parent" character
} else if (unique_combos[1]>=0 && unique_combos[2]==INAP) {
ind_char <- ic;
pi <- 1;
return(ind_char);
}
}
return(ind_char);
}
#transmogrify_additive_dependents_to_multistate <- function(ind_char,dep_chars,chmatrix,INAP,UNKNOWN,multichanges=F,theoretical=F,unknown_inap_sep=F) {
#transmogrify_additive_dependents_to_multistate (ind_char,dep_chars,chmatrix,char_dependencies,INAP,UNKNOWN,theoretical=T)
transmogrify_additive_dependents_to_multistate <- function(ind_char,dep_chars,chmatrix,char_dependencies,INAP=-22,UNKNOWN=-11) {
# theortical: if T, then Q matrix & recoding allows for all theoretically possible combinations
# unknown_inap_sep: code unknowns separately; this is a bad idea that I regret and that we'll cut
notu <- nrow(chmatrix);
all_chars <- c(ind_char,dep_chars);
if (is.null(names(char_dependencies)) || is.na(names(char_dependencies[1])))
names(char_dependencies) <- all_chars;
#indies <- all_chars[match(unique(char_dependencies),all_chars)];
combos <- chmatrix[,all_chars];
#combos[1,2] <- -10;
combos <- combos[!(rowMaxs(combos)==UNKNOWN & rowMins(combos)==UNKNOWN),];
hchars <- ncol(combos);
hstates <- count_states(combos);
names(hstates) <- all_chars;
#combos <- combos[combos[,2]!=INAP,];
ucombos <- unique(combos);
polyms <- unique(which(ucombos < 0 & !ucombos %in% INAP,arr.ind = T)[,1]);
ucombos <- ucombos[!(1:nrow(ucombos)) %in% polyms,];
all_poss_combos <- accersi_all_theoretical_character_state_combinations(obs_combinations=ucombos,char_dependencies = char_dependencies,INAP=INAP);
a_p_c <- nrow(all_poss_combos);
#combo_dependency <- accersi_key_character_for_hierarchical_combination(all_poss_combos,char_dependencies);
#keystates <- accersi_key_states_for_independent_character_in_hierarchical(all_poss_combos = all_poss_combos,char_dependencies = char_dependencies)
key_combos <- accersi_gateway_states_among_hierarchical_characters(all_poss_combos,char_dependencies,hstates=hstates)
ind_states <- sort(unique(ucombos[,1]));
if (ncol(key_combos)>1) {
Q <- construct_Q_matrix_for_divergent_hierarchical_multistate(all_poss_combos,char_dependencies,UNKNOWN=UNKNOWN,INAP=INAP);
# start here!!!! (why (-21)?)
new_multistate <- accersi_hierarchical_multistate(all_poss_combos,obs_combos=chmatrix[,all_chars]);
} else if (length(all_poss_combos)>0 & ncol(key_combos)==1) {
# get rid of missing examples
Q <- construct_Q_matrix_for_hierarchical_multistate(all_poss_combos,char_dependencies,UNKNOWN=UNKNOWN,INAP=INAP);
# Q <- construct_Q_matrix_for_divergent_hierarchical_multistate(all_poss_combos)
new_multistate <- accersi_hierarchical_multistate(all_poss_combos,obs_combos=chmatrix[,all_chars]);
#cbind(new_multistate,chmatrix[,c(ind_char,dep_chars)])
} else {
# only one dependent state exists, making this just a normal character
combos <- chmatrix[,c(ind_char,dep_chars)];
combos <- combos[!(rowMaxs(combos)==UNKNOWN & rowMins(combos)==UNKNOWN),];
combos <- unique(combos);
all_poss_combos <- combos;
new_multistate <- chmatrix[,ind_char];
k <- max(2,nrow(combos));
Q <- construct_Q_matrix_unordered(k);
}
output <- list(all_poss_combos,Q,new_multistate);
names(output) <- c("unique_combinations","Q","new_character");
return(output);
}
#obs_combinations <- ucombos; char_dependencies <- secondary_dependencies;
accersi_all_theoretical_character_state_combinations <- function(obs_combinations,char_dependencies=c(),INAP=-22,UNKNOWN=-11) {
ucombos <- unique(obs_combinations);
ochars <- ncol(ucombos); # no. original characters involved
all_chars <- as.numeric(colnames(ucombos));
#combos <- combos[order(combos[,1],combos[,2],combos[,3]),];
all_poss_combos <- c();
for (nch in ncol(ucombos):1) {
if (is.null(all_poss_combos)) {
all_poss_combos <- array(sort(unique(ucombos[,nch])),dim=c(length(unique(ucombos[,nch])),1));
} else {
new_states <- sort(unique(ucombos[,nch]));
prior_combos <- all_poss_combos;
all_poss_combos <- cbind(rep(new_states[1],nrow(prior_combos)),prior_combos);
for (ns in 2:length(new_states))
all_poss_combos <- rbind(all_poss_combos,cbind(rep(new_states[ns],nrow(prior_combos)),prior_combos));
}
}
colnames(all_poss_combos) <- colnames(ucombos);
state_combos <- all_poss_combos[,1];
for (dc in 2:ncol(all_poss_combos)) {
dummyc <- as.character(all_poss_combos[,dc]);
dummyc <- gsub("-22","-",dummyc);
for (sc in 1:length(state_combos)) state_combos[sc] <- paste(state_combos[sc],dummyc[sc],sep="");
}
rownames(all_poss_combos) <- state_combos;
dependent_chars <- c();
if (length(char_dependencies)>0) {
ind_char_2 <- match(ind_char,all_chars);
for (nch in 1:ochars) if (sum(ucombos[,nch] %in% INAP)>0) dependent_chars <- c(dependent_chars,nch);
for (dc in 1:length(dependent_chars)) {
a_p_c <- nrow(all_poss_combos);
dch <- dependent_chars[dc];
ich <- match(char_dependencies[dependent_chars[dc]],all_chars)
keyst <- unique(ucombos[!ucombos[,c(ich,dch)][,2] %in% INAP,ich]);
keyst <- keyst[keyst != INAP];
keepers1 <- (1:a_p_c)[all_poss_combos[,ich] %in% keyst & !all_poss_combos[,dch] %in% INAP];
keepers2 <- (1:a_p_c)[!all_poss_combos[,ich] %in% keyst & all_poss_combos[,dch] %in% INAP];
keepers <- sort(c(keepers1,keepers2));
all_poss_combos <- all_poss_combos[keepers,];
# if (ich!=ind_char_2) {
# keepers <- keepers[!all_poss_combos[,dch] %in% INAP];
# all_poss_combos <- all_poss_combos[keepers,];
# }
}
}
return(all_poss_combos);
}
accersi_hierarchical_multistate <- function(all_poss_combos,obs_combos) {
# condense hierarchical additive scheme into one multistate
new_multistate <- vector(length=notu);
for (nn in 1:notu) new_multistate[nn] <- match_vector_to_matrix_row(test_vector=obs_combos[nn,],test_matrix=all_poss_combos)-1;
all_states <- c(0:9,letter_states,more_letter_states);
if (nrow(all_poss_combos)>10) if (is.numeric(new_multistate)) new_multistate[!is.na(new_multistate)] <- all_states[1+new_multistate[!is.na(new_multistate)]];
hstates <- count_states(all_poss_combos);
# look for uncoded examples; these will be either polymorphs or unknowns
prob_child <- (1:notu)[is.na(new_multistate)];
#combos <- chmatrix[,c(ind_char,dep_chars)];
pc <- 0;
polymorphs <- unique(obs_combos[obs_combos<0])
polymorphs <- polymorphs[!polymorphs %in% c(UNKNOWN,INAP)];
while (pc < length(prob_child)) {
pc <- pc+1;
pcc <- prob_child[pc];
#obs_combos[pcc,]
if (obs_combos[pcc,1]==UNKNOWN) {
new_multistate[pcc] <- "?";
# new_multistate[pcc] <- UNKNOWN;
} else {
# make it polymorphic for all possible states
# this_combo <- obs_combos[pcc,(0:dchars)+1];
this_combo <- obs_combos[pcc,];
# doofi <- (1:length(this_combo))[this_combo==UNKNOWN];
# this if/else probably is unneeded now!
if (sum(this_combo %in% polymorphs)==0) {
# must be case where 1+ dependent is unknown
set_chars <- (1:length(this_combo))[this_combo!=UNKNOWN];
set_states <- this_combo[set_chars];
poss_combos <- all_poss_combos;
ss <- 0;
while (ss < length(set_chars)) {
ss <- ss+1;
poss_combos <- subset(poss_combos,poss_combos[,set_chars[ss]]==set_states[ss]);
}
polymorph <- 0;
if (nrow(all_poss_combos)>10) polymorph <- c();
for (cp in 1:nrow(poss_combos)) {
this_state <- (row.match(poss_combos[cp,],as.data.frame(all_poss_combos))-1);
if (nrow(all_poss_combos)>10) {
polymorph <- c(polymorph,all_states[this_state]);
# base64encode(this_state);
# for (i in 1:5) print(base64encode(10^(i-1)));
} else {
polymorph <- polymorph-((10^(cp-1))*this_state);
}
}
} else {
set_chars <- (1:ochars)[!this_combo %in% c(UNKNOWN,polymorphs)]; unset_chars <- (1:ochars)[this_combo %in% c(UNKNOWN,polymorphs)];
un <- 0;
missing <- unset_chars[this_combo[unset_chars]==UNKNOWN];
polys <- unset_chars[!this_combo[unset_chars] %in% UNKNOWN];
while (un < length(missing)) {
un <- un+1;
mchar <- missing[un];
rstates <- (1:hstates[mchar])-1;
this_combo[mchar] <- 0;
for (rs in 1:length(rstates))
this_combo[mchar] <- this_combo[rs]-rstates[rs]*(10^(rs-1));
}
# set_chars <- set_chars[set_chars>1];
set_states <- this_combo[set_chars];
poss_combos <- all_poss_combos;
# print(nrow(poss_combos));
# reduce the possible combinations to those consistent
sc <- length(set_chars);
# for (sc in 1:length(set_chars)) {
while (sc > 1) {
poss_combos <- subset(poss_combos,poss_combos[,set_chars[sc]]==set_states[sc]);
# print(nrow(poss_combos));
sc <- sc-1;
}
uc <- 0;
while (uc < length(polys)) {
uc <- uc+1;
poss_combos <- poss_combos[poss_combos[,polys[uc]] %in% unravel_polymorph_badass(this_combo[polys[uc]]),];
}
pstates <- match(rownames(poss_combos),rownames(all_poss_combos));
polymorph <- 0;
if (nrow(all_poss_combos)>10) polymorph <- c();
for (cp in 1:nrow(poss_combos)) {
this_state <- (row.match(poss_combos[cp,],as.data.frame(all_poss_combos))-1);
if (nrow(all_poss_combos)>10) {
polymorph <- c(polymorph,all_states[this_state]);
# base64encode(this_state);
# for (i in 1:5) print(base64encode(10^(i-1)));
} else {
polymorph <- polymorph-((10^(cp-1))*this_state);
}
}
}
# if (nrow(unq_combos)>10) {
# if (is.numeric(new_multistate))
# new_multistate[!is.na(new_multistate)] <- all_states[1+new_multistate[!is.na(new_multistate)]]
new_multistate[pcc] <- paste("{",paste(polymorph,collapse=""),"}",sep="");
new_multistate[pcc] <- gsub("-","",new_multistate[pcc]);
# } else {
# new_multistate[pcc] <- polymorph;
# }
}
}
return(new_multistate);
}
accersi_key_states_for_independent_character_in_hierarchical <- function(all_poss_combos,char_dependencies,INAP=-22,UNKNOWN=-11) {
all_chars <- as.numeric(colnames(all_poss_combos));
indies <- all_chars[match(unique(char_dependencies),all_chars)];
dep_chars <- as.numeric(colnames(all_poss_combos))[as.numeric(colnames(all_poss_combos))!=char_dependencies];
ostates <- count_states(all_poss_combos);
keystates <- array(-1,dim=c(length(dep_chars),max(ostates[match(indies,all_chars)])));
#unkeystates <- sort(unique(ucombos[,1]));
for (dc in 1:length(dep_chars)) {
kc <- match(char_dependencies[dc+1],all_chars)
ks <- unique(combos[!combos[,dc+1] %in% c(INAP,UNKNOWN),kc]);
ks <- ks[ks!=INAP];
keystates[dc,1:length(ks)] <- ks;
# unkeystates <- unkeystates[unkeystates!=ks];
}
rownames(keystates) <- dep_chars; # rownames(keystates) <- c(dep_chars,49);
return(keystates);
}
accersi_key_character_for_hierarchical_combination <- function(all_poss_combos,char_dependencies,INAP=-22,UNKNOWN=-11) {
all_chars <- as.numeric(colnames(all_poss_combos));
ochars <- ncol(all_poss_combos);
hchars <- nrow(all_poss_combos);
combo_dependency <- vector(length=hchars);
for (hc in 1:hchars) combo_dependency[hc] <- max(0,char_dependencies[(1:ochars)[!all_poss_combos[hc,] %in% INAP]]);
return(combo_dependency);
}
accersi_gateway_states_among_hierarchical_characters <- function(all_poss_combos,char_dependencies,hstates) {
all_chars <- as.numeric(colnames(all_poss_combos));
keystates <- accersi_key_states_for_independent_character_in_hierarchical(all_poss_combos = all_poss_combos,char_dependencies = char_dependencies)
key_combos <- array(-1,dim=c(length(unique(char_dependencies)),max(hstates[match(unique(char_dependencies),all_chars)])))
rownames(key_combos) <- unique(char_dependencies);
for (i in 1:nrow(keystates)) {
j <- match(as.numeric(rownames(keystates))[i],all_chars); # get dependency
k <- match(char_dependencies[j],rownames(key_combos)); # match dependency
h <- keystates[i,keystates[i,]>=0];
h <- h[!h %in% key_combos[k,]];
if (length(h)>0) {
g <- 1+sum(key_combos[k,]>-1);
g <- g:(g+length(h)-1);
key_combos[k,g] <- h;
}
}
mx_states <- as.array(colMaxs(key_combos));
new_dim <- orig_dim <- dim(key_combos);
new_dim[2] <- sum(mx_states>=0);
if (min(new_dim)==1) {
orig_rownames <- rownames(key_combos);
key_combos <- array(key_combos[,1:new_dim[2]],dim=c(new_dim[1],new_dim[2]));
rownames(key_combos) <- orig_rownames;
colnames(key_combos) <- letters[1:new_dim[2]];
}
return(key_combos);
}
#### MODIFY & WRITE NEXUS FILES ####
# routine to "clean" character matrix (e.g., remove gaps in coding, standarize minimum states, etc.)
mundify_character_matrix <- function(chmatrix,minst=0,UNKNOWN=-11,INAP=-22) {
notu <- nrow(chmatrix); # replaces spc to standardize coding.
nchars <- ncol(chmatrix);
min_states <- minimum_state(chmatrix,UNKNOWN=UNKNOWN,INAP=INAP);
max_states <- maximum_state(chmatrix,UNKNOWN=UNKNOWN,INAP=INAP);
for (ch in 1:nchars) {
rem <- c((1:notu)[chmatrix[,ch]==UNKNOWN],(1:notu)[chmatrix[,ch]==INAP]);
if (length(rem)>0) {
test <- chmatrix[-rem,ch]
} else test <- chmatrix[,ch];
# check polymorphics for anything that needs to be changed
if (length(rem) < notu) {
polys <- sum(test<0); # taxa with polymorphic scores
coded <- sort(unique(test[test>=0]));
if (polys>0) {
# examps <- test[test<0]
polycoded <- sort(unique(test[test<0]))
for (i in 1:length(polycoded)) {
polystates <- unravel_polymorph(polycoded[i])
coded <- sort(unique(c(coded,polystates)))
# if (min(polystates)<minstch) minstch <- min(polystates)
}
} else {
polycoded <- c();
}
minstch <- min(coded);
# eliminate gaps in states
if (sum(!min(coded):max(coded) %in% coded)>0) {
# new_codes <- match(coded,coded)-(1-minst);
new_codes <- match(coded,coded)-(1-minstch);
for (st in 1:length(coded)) {
if (coded[st]!=new_codes[st]) {
rec <- (1:notu)[chmatrix[,ch]==coded[st]];
chmatrix[rec,ch] <- new_codes[st];
redo_poly <- 1;
while (redo_poly <= length(polycoded)) {
polystates <- unravel_polymorph(polycoded[redo_poly]);
polystates[polystates==coded[st]] <- new_codes[st]
newpolystates <- ravel_polymorph(polystates);
testp <- (1:notu)[chmatrix[,ch]==polycoded[redo_poly]];
# if (newpolystates != polycoded[redo_poly]) {
chmatrix[testp,ch] <- newpolystates;
# }
# polycoded[redo_poly] <- chmatrix[,ch][chmatrix[,ch] %in% polycoded[redo_poly]] <- newpolystates
polycoded[redo_poly] <- newpolystates;
redo_poly <- redo_poly+1;
}
coded[st] <- new_codes[st];
}
}
}
# standardize minimum state
# simple cheat: subtract 1111 to polymorphics
if (minstch!=minst) {
adj <- minst-minstch;
test2 <- (1:notu)[chmatrix[,ch]>=0];
chmatrix[test2,ch] <- chmatrix[test2,ch]+adj;
if (polys>0) {
examps2 <- polycoded;
for (i in 1:length(polycoded)) {
testp <- (1:notu)[chmatrix[,ch]==polycoded[i]];
examps2[i] <- examps2[i]-(adj*floor(10^floor(log10(abs(polycoded[i])))*10/9));
chmatrix[testp,ch] <- examps2[i];
}
polycoded <- examps2;
} # end rescoring of polytomies
} # end rescaling stats
} # end case where rem < notu
} # end search of characters;
return(chmatrix);
}
# routine to remove invariant and/or unscored characters from matrix
remove_invariants_from_character_matrix <- function(chmatrix,minst=0,UNKNOWN=-11,INAP=-22) {
notu <- nrow(chmatrix) # replaces spc to standardize coding.
ncharss <- ncol(chmatrix)
rem_char <- c()
for (ch in 1:ncharss) {
rem <- c((1:notu)[chmatrix[,ch]==UNKNOWN],(1:notu)[chmatrix[,ch]==INAP])
if (length(rem)>0) {
test <- chmatrix[-rem,ch]
} else test <- chmatrix[,ch]
if (length(unique(test))<2) rem_char <- c(rem_char,ch)
}
return(chmatrix[,-rem_char])
}
# generate composite score from 2+ scored taxa
accersi_composite_scores <- function(mini_matrix,return_polymorph=TRUE,UNKNOWN=-11,INAP=-22) {
notu <- nrow(mini_matrix);
nchars <- ncol(mini_matrix);
composite_score <- c();
ch <- 1;
for (ch in 1:nchars) {
composite_states <- unique(mini_matrix[,ch]);
if (length(composite_states)==1) {
composite_score <- c(composite_score,composite_states);
} else {
composite_states <- composite_states[composite_states!=INAP];
if (length(composite_states)>1) {
composite_states <- composite_states[composite_states!=UNKNOWN];
if (length(composite_states)>1) {
polyscored <- composite_states[composite_states<0];
if (length(polyscored)>0) {
accersi_states <- sapply(polyscored,unravel_polymorph);
composite_states <- sort(unique(c(accersi_states,composite_states[composite_states>=0])));
}
composite_score <- c(composite_score,ravel_polymorph(composite_states));
} else {
composite_score <- c(composite_score,composite_states);
}
} else {
composite_score <- c(composite_score,composite_states);
}
}
# ch <- ch+1;
# composite_score;
}
#rbind(mini_matrix,composite_score)
return(composite_score);
}
scribio_nexus_file_from_chmatrix <- function(ch_matrix,new_file_name,UNKNOWN=-11,INAP=-22) {
notu <- nrow(ch_matrix);
taxon_names <- rownames(ch_matrix);
nchars <- ncol(ch_matrix);
nstates <- count_states(chmatrix = ch_matrix);
nexus_file_content <- c();
nexus_file_content <- rbind("#NEXUS","","BEGIN DATA;")
nexus_file_content <- rbind(nexus_file_content,paste(" DIMENSIONS NTAX=",notu," NCHAR=",nchars,";",sep=""));
if (max(nstates)<10) {
state_symbols <- " ";
for (st in 1:max(nstates))
state_symbols <- paste(state_symbols,st-1,sep=" ");
} else {
mxl <- max(nstates)-10;
letter_states <- LETTERS[!LETTERS %in% c("I","O")][1:mxl]
all_states <- c(0:9,letter_states);
state_symbols <- paste(all_states,collapse=" ");
}
nexus_file_content <- rbind(nexus_file_content,paste(" FORMAT DATATYPE = STANDARD RESPECTCASE GAP = - MISSING = ? SYMBOLS = \"",state_symbols,"\";"));
nexus_file_content <- rbind(nexus_file_content," MATRIX");
string_to_count <- taxon_names;
name_lengths <- sapply(string_to_count,count_characters_in_string);
max_name_length <- max(name_lengths);
need_quotes <- c(".","(",")","[","]");
for (nn in 1:notu) {
test_name <- strsplit(taxon_names[nn],split="",fixed=TRUE)[[1]]
if (sum(test_name %in% need_quotes)==0) {
taxon <- gsub(" ","_",taxon_names[nn]);
} else {
taxon <- paste("\"",taxon_names[nn],"\"",sep="");
name_lengths[nn] <- name_lengths[nn]+2;
}
this_line <- paste("\t",taxon,paste(rep(" ",(5+(max_name_length-name_lengths[nn]))),collapse=""),sep="");
otu_code <- c();
for (ch in 1:nchars) {
if (ch_matrix[nn,ch]>=0 && ch_matrix[nn,ch]<=9) {
otu_code <- paste(otu_code,ch_matrix[nn,ch],sep="");
} else if (ch_matrix[nn,ch]>9) {
otu_code <- paste(otu_code,all_states[1+ch_matrix[nn,ch]],sep=""); # note: we need +1 because of state 0
} else if (ch_matrix[nn,ch]==UNKNOWN) {
otu_code <- paste(otu_code,"?",sep="");
} else if (ch_matrix[nn,ch]==INAP) {
otu_code <- paste(otu_code,"-",sep="");
} else if (ch_matrix[nn,ch]<0) {
polystates <- strsplit(as.character(ch_matrix[nn,ch]),split="",fixed=TRUE)[[1]];
polystates <- as.numeric(polystates[polystates!="-"]);
otu_code <- paste(otu_code,ravel_polymorph_for_file(polystates),sep="");
}
}
nexus_file_content <- rbind(nexus_file_content,paste(this_line,otu_code,sep=""));
}
nexus_file_content <- rbind(nexus_file_content,";");
nexus_file_content <- rbind(nexus_file_content,"END;");
nexus_file_content <- rbind(nexus_file_content,"begin mrbayes;");
nexus_file_content <- rbind(nexus_file_content," set autoclose=yes nowarn=yes;");
nexus_file_content <- rbind(nexus_file_content," lset nst=6 rates=invgamma;");
nexus_file_content <- rbind(nexus_file_content," unlink statefreq=(all) revmat=(all) shape=(all) pinvar=(all); ");
nexus_file_content <- rbind(nexus_file_content," prset applyto=(all) ratepr=variable;");
nexus_file_content <- rbind(nexus_file_content," mcmcp ngen= 100000000 relburnin=yes burninfrac=0.25 printfreq=10000 samplefreq=10000 nchains=4 savebrlens=yes;");
nexus_file_content <- rbind(nexus_file_content," mcmc;");
nexus_file_content <- rbind(nexus_file_content," sumt;");
nexus_file_content <- rbind(nexus_file_content,"end;");
write(nexus_file_content,file=new_file_name);
}
# write nexus file from chmatrix that already is converted to character
scribio_nexus_file_from_chmatrix_character <- function(ch_matrix_ch,new_file_name,max_states,unknown="?",inap="-") {
notu <- nrow(ch_matrix_ch);
taxon_names <- rownames(ch_matrix_ch);
nchars <- ncol(ch_matrix_ch);
nexus_file_content <- c();
nexus_file_content <- rbind("#NEXUS","","BEGIN DATA;")
nexus_file_content <- rbind(nexus_file_content,paste(" DIMENSIONS NTAX=",notu," NCHAR=",nchars,";",sep=""));
if (max_states<10) {
state_symbols <- " ";
for (st in 1:max_states)
state_symbols <- paste(state_symbols,st-1,sep=" ");
} else {
# mxl <- max_states-10;
all_states <- c(0:9,letter_states,more_letter_states);
state_symbols <- paste(all_states[1:max_states],collapse=" ");
}
nexus_file_content <- rbind(nexus_file_content,paste(" FORMAT DATATYPE = STANDARD RESPECTCASE GAP = ",inap," MISSING = ",unknown," SYMBOLS = \"",state_symbols,"\";"));
nexus_file_content <- rbind(nexus_file_content," MATRIX");
string_to_count <- taxon_names;
name_lengths <- sapply(string_to_count,count_characters_in_string);
max_name_length <- max(name_lengths);
need_quotes <- c(".","(",")","[","]");
for (nn in 1:notu) {
test_name <- strsplit(taxon_names[nn],split="",fixed=TRUE)[[1]]
if (sum(test_name %in% need_quotes)==0) {
taxon <- gsub(" ","_",taxon_names[nn]);
} else {
taxon <- paste("\"",taxon_names[nn],"\"",sep="");
name_lengths[nn] <- name_lengths[nn]+2;
}
this_line <- paste("\t",taxon,paste(rep(" ",(5+(max_name_length-name_lengths[nn]))),collapse=""),sep="");
this_line <- paste(this_line,paste(ch_matrix_ch[nn,],collapse=""),sep="");
nexus_file_content <- rbind(nexus_file_content,this_line);
}
nexus_file_content <- rbind(nexus_file_content,";");
nexus_file_content <- rbind(nexus_file_content,"END;");
nexus_file_content <- rbind(nexus_file_content,"begin mrbayes;");
nexus_file_content <- rbind(nexus_file_content," set autoclose=yes nowarn=yes;");
nexus_file_content <- rbind(nexus_file_content," lset nst=6 rates=invgamma;");
nexus_file_content <- rbind(nexus_file_content," unlink statefreq=(all) revmat=(all) shape=(all) pinvar=(all); ");
nexus_file_content <- rbind(nexus_file_content," prset applyto=(all) ratepr=variable;");
nexus_file_content <- rbind(nexus_file_content," mcmcp ngen= 100000000 relburnin=yes burninfrac=0.25 printfreq=10000 samplefreq=10000 nchains=4 savebrlens=yes;");
nexus_file_content <- rbind(nexus_file_content," mcmc;");
nexus_file_content <- rbind(nexus_file_content," sumt;");
nexus_file_content <- rbind(nexus_file_content,"end;");
write(nexus_file_content,file=new_file_name);
}
ravel_polymorph_for_file <- function(polystates) {
polystates <- sort(polystates,decreasing = FALSE);
return(paste("(",paste(polystates,collapse=""),")",sep=""));
}
convert_character_matrix_to_character <- function(chmatrix,UNKNOWN=-11,INAP=-22,missing="?",gap="-") {
notu <- nrow(chmatrix);
nchars <- ncol(chmatrix);
chmatrix_char <- chmatrix;
allstates <- c(0:9,letter_states,more_letter_states);
for (ch in 1:nchars) {
these_states <- chmatrix[,ch];
coded_notu <- (1:notu)[these_states>=0];
gap_notu <- (1:notu)[these_states==INAP];
miss_notu <- (1:notu)[these_states==UNKNOWN];
poly_notu <- (1:notu)[!(1:notu) %in% c(coded_notu,gap_notu,miss_notu)];
chmatrix_char[coded_notu,ch] <- as.character(allstates[1+chmatrix[coded_notu,ch]]);
chmatrix_char[miss_notu,ch] <- missing;
chmatrix_char[gap_notu,ch] <- gap;
pn <- 0;
while (pn < length(poly_notu)) {
pn <- pn+1;
chmatrix_char[poly_notu[pn],ch] <- ravel_polymorph_for_file(allstates[1+sort(unravel_polymorph_badass(chmatrix[poly_notu[pn],ch]))]);
}
}
return(chmatrix_char);
}
#### READ NEWICK FILES ####
#### convert (1,(2,3)) to vector_tree = 4 5 5 -1 4
read_newick_tree_from_chosen_file <- function() {
newicktree_file <- file.choose();
newick_tree <- scan(file=newicktree_file,what=character(),sep="\n");
nexus_string <- strsplit(newick_tree,split="",fixed=TRUE)[[1]]
nodes <- 0
for (i in 1:length(nexus_string)) if (nexus_string[i]=="(") nodes <- nodes+1
# get clades
clades <- vector(length=nodes)
for (c in 1:nodes) clades[c] <- c
# get taxa
notu <- p <- 0
for (i in 1:length(nexus_string)) {
if (nexus_string[i]>="0" && nexus_string[i]<="9") {
otu <- as.numeric(nexus_string[i])+(otu * (10^p))
p <- p+1
if (otu>notu) notu <- otu
} else {
p <- otu <- 0
}
}
vector_tree <- vector(length=notu+max(clades))
for (c in 1:nodes) clades[c] <- -1
cl <- c <- 0
i <- 1
for (i in 1:length(nexus_string)) {
if (nexus_string[i]=="(") {
sp <- p <- 0
cl <- cl+1
if (cl>1) {
vector_tree[notu+cl] <- clades[c]+notu
} else vector_tree[notu+1] <- -1
c <- c+1
clades[c] <- cl
} else if (nexus_string[i]==")") {
c <- c-1
sp <- p <- 0
} else if (nexus_string[i]==",") {
sp <- p <- 0
} else if (nexus_string[i]>="0" && nexus_string[i]<="9") {
sp <- as.numeric(nexus_string[i])+(sp*10)
p <- p+1
if (nexus_string[i+1]<"0" || nexus_string[i]>"9") vector_tree[sp] <- notu+clades[c]
}
}
return(vector_tree)
}
read_newick_tree_from_file <- function(newicktree_file) {
newick_tree <- scan(file=newicktree_file,what=character(),sep="\n")
nexus_string <- strsplit(newick_tree,split="",fixed=TRUE)[[1]]
nodes <- 0
for (i in 1:length(nexus_string)) if (nexus_string[i]=="(") nodes <- nodes+1
# get clades
clades <- 1:nodes;
#clades <- vector(length=nodes)
#for (c in 1:nodes) clades[c] <- c;
# get taxa
notu <- p <- 0;
for (i in 1:length(nexus_string)) {
if (nexus_string[i]>="0" && nexus_string[i]<="9") {
otu <- as.numeric(nexus_string[i])+(otu * 10)
p <- p+1
if (otu>notu) notu <- otu
} else {
p <- otu <- 0
}
}
vector_tree <- vector(length=notu+max(clades))
for (c in 1:nodes) clades[c] <- -1
cl <- c <- 0
i <- 1
for (i in 1:length(nexus_string)) {
if (nexus_string[i]=="(") {
sp <- p <- 0
cl <- cl+1
if (cl>1) {
vector_tree[notu+cl] <- clades[c]+notu
} else vector_tree[notu+1] <- -1
c <- c+1
clades[c] <- cl
} else if (nexus_string[i]==")") {
c <- c-1
sp <- p <- 0
} else if (nexus_string[i]==",") {
sp <- p <- 0
} else if (nexus_string[i]>="0" && nexus_string[i]<="9") {
sp <- as.numeric(nexus_string[i])+(sp*10);
p <- p+1
if (nexus_string[i+1]<"0" || nexus_string[i]>"9") vector_tree[sp] <- notu+clades[c];
}
}
return(vector_tree)
}
#(1,(2,4,(9,(7,(10,((18,(26,(33,36)),(34,40)),(19,27))),(13,25,(14,16)))),(3,15,(5,(11,17,24,(21,29),(22,31)),(28,(20,(30,32))))),(6,(8,39,(12,23,35,37,38)))))
#### convert vector_tree = 4 5 5 -1 4 to (1,(2,3))
#### where number is the htu number of the clade to which a species or htu belong
#### does not work yet
write_newick_string_with_taxon_names_from_vector_tree <- function(vector_tree,otu_names) {
mat_tree <- transform_vector_tree_to_matrix_tree(vector_tree);
notu <- match(-1,vector_tree)-1;
nNodes <- length(vector_tree)-notu;
all_names <- c(otu_names,(1:nNodes)+notu);
newick_string <- paste(match(-1,vector_tree),";",sep="");
for (nd in 1:nNodes) {
this_node <- notu+nd;
all_names[mat_tree[nd,mat_tree[nd,]>0]]
newick_string <- gsub(this_node,paste("(",paste(all_names[mat_tree[nd,mat_tree[nd,]>0]],collapse=","),")",sep=""),newick_string);
# newick_string <- gsub(this_node,paste("(",paste(mat_tree[nd,mat_tree[nd,]>0],collapse=","),")",sep=""),newick_string)
}
return(newick_string);
}
write_newick_string_from_vector_tree <- function(vector_tree) {
mat_tree <- transform_vector_tree_to_matrix_tree(vector_tree);
notu <- match(-1,vector_tree)-1;
nNodes <- length(vector_tree)-notu;
newick_string <- paste(match(-1,vector_tree),";",sep="");
for (nd in 1:nNodes) {
this_node <- notu+nd;
newick_string <- gsub(this_node,paste("(",paste(mat_tree[nd,mat_tree[nd,]>0],collapse=","),")",sep=""),newick_string)
}
return(newick_string);
}
read_newick_string <- function(newick_string) {
nodes <- 0;
if (length(newick_string)==1) newick_string <- strsplit(newick_string,split="",fixed=TRUE)[[1]];
for (i in 1:length(newick_string)) if (newick_string[i]=="(") nodes <- nodes+1;
# get clades
clades <- vector(length=nodes);
for (c in 1:nodes) clades[c] <- c;
# get taxa
notu <- p <- 0
for (i in 1:length(newick_string)) {
if (newick_string[i]>="0" && newick_string[i]<="9") {
otu <- as.numeric(newick_string[i])+(otu * (10^p))
p <- p+1
if (otu>notu) notu <- otu
} else {
p <- otu <- 0
}
}
vector_tree <- vector(length=notu+max(clades))
for (c in 1:nodes) clades[c] <- -1
cl <- c <- 0
i <- 1
for (i in 1:length(newick_string)) {
if (newick_string[i]=="(") {
sp <- p <- 0
cl <- cl+1
if (cl>1) {
vector_tree[notu+cl] <- clades[c]+notu
} else vector_tree[notu+1] <- -1
c <- c+1
clades[c] <- cl
} else if (newick_string[i]==")") {
c <- c-1
sp <- p <- 0
} else if (newick_string[i]==",") {
sp <- p <- 0
} else if (newick_string[i]>="0" && newick_string[i]<="9") {
sp <- as.numeric(newick_string[i])+(sp*10)
p <- p+1
if (newick_string[i+1]<"0" || newick_string[i]>"9") vector_tree[sp] <- notu+clades[c]
}
}
return(vector_tree)
}
# written 2022-08-19
count_taxa_in_newick_string <- function(newick_string) {
# newick_string should look like: "(12,(7,(8,1,3,(2,(5,4,9,(11,(10,6)))))));"
newick_string <- gsub(";","",newick_string);
atomized_newick <- strsplit(newick_string,"")[[1]];
this_clade <- paste(atomized_newick,collapse="");
# print(this_clade)
this_clade <- gsub("\\(","",this_clade);
this_clade <- gsub(")","",this_clade);
these_prog <- sort(as.numeric(strsplit(this_clade,",")[[1]]));
return(length(these_prog));
}
# written for Cinctan project
transform_newick_string_to_venn_tree <- function(newick_string) {
# newick_string should look like: "(12,(7,(8,1,3,(2,(5,4,9,(11,(10,6)))))));"
atomized_newick <- strsplit(newick_string,"")[[1]];
l_m <- length(atomized_newick);
clade_bounds_l <- (1:l_m)[atomized_newick %in% "("]; # clade bounds left
clade_bounds_e <- clade_bounds_r <- (1:l_m)[atomized_newick %in% ")"]; # clade bounds right
nNodes <- length(clade_bounds_l); # number of clades;
names(clade_bounds_l) <- names(clade_bounds_r) <- names(clade_bounds_e) <- 1:nNodes;
# get the first possible right paren ending this clade;
for (nn in 1:nNodes) clade_bounds_e[nn] <- sum(clade_bounds_r>clade_bounds_l[nn]);
clade_bounds_e_unq <- unique(clade_bounds_e);
clade_boundaries <- clade_boundaries_orig <- cbind(clade_bounds_l,clade_bounds_r,clade_bounds_e);
#1 <- nn <- 1
while (length(clade_bounds_e_unq)>0) {
this_group <- sum(clade_boundaries_orig[,3]==clade_bounds_e_unq[1]);
if (length(clade_bounds_e_unq)>1) {
this_group_starts <- this_group-sum(clade_boundaries_orig[,2]<clade_boundaries_orig[this_group+1,1]);
} else {
this_group_starts <- 0;
}
clade_boundaries_orig[1:this_group,2] <- sort(clade_boundaries_orig[1:this_group,2],decreasing=T);
clade_boundaries[clade_boundaries[,1] %in% clade_boundaries_orig[,1],] <- clade_boundaries_orig;
clade_boundaries_orig <- clade_boundaries_orig[!(1:nrow(clade_boundaries_orig)) %in% ((this_group_starts+1):this_group),]
if (nrow(clade_boundaries_orig)>0) clade_boundaries_orig[,2] <- sort(clade_boundaries_orig[,2]);
n <- 0;
while (n < nrow(clade_boundaries_orig)) {
n <- n+1;
clade_boundaries_orig[n,3] <- sum(clade_boundaries_orig[,2]>clade_boundaries_orig[n,1]);
}
clade_bounds_e_unq <- unique(clade_boundaries_orig[,3]);
}
notu <- count_taxa_in_newick_string(newick_string);
venn_tree <- array(0,dim=c(nNodes,notu));
for (nn in 1:nNodes) {
lp <- clade_boundaries[nn,1];
rp <- clade_boundaries[nn,2];
this_clade <- paste(atomized_newick[lp:rp],collapse="");
# print(this_clade)
this_clade <- gsub("\\(","",this_clade);
this_clade <- gsub(")","",this_clade);
these_prog <- sort(as.numeric(strsplit(this_clade,",")[[1]]));
venn_tree[nn,1:length(these_prog)] <- these_prog;
}
return(venn_tree);
}
# written for Cinctan project
accersi_clade_boundaries_from_newick_string <- function(newick_string) {
atomized_newick <- strsplit(newick_string,"")[[1]];
l_m <- length(atomized_newick);
clade_bounds_l <- (1:l_m)[atomized_newick %in% "("];
clade_bounds_e <- clade_bounds_r <- (1:l_m)[atomized_newick %in% ")"];
nNodes <- length(clade_bounds_l); # number of clades;
names(clade_bounds_l) <- names(clade_bounds_r) <- names(clade_bounds_e) <- 1:nNodes;
# get the first possible right paren ending this clade;
for (nn in 1:nNodes) clade_bounds_e[nn] <- sum(clade_bounds_r>clade_bounds_l[nn])
clade_bounds_e_unq <- unique(clade_bounds_e);
clade_boundaries <- clade_boundaries_orig <- cbind(clade_bounds_l,clade_bounds_r,clade_bounds_e);
#1 <- nn <- 1
while (length(clade_bounds_e_unq)>0) {
this_group <- sum(clade_boundaries_orig[,3]==clade_bounds_e_unq[1]);
if (length(clade_bounds_e_unq)>1) {
this_group_starts <- this_group-sum(clade_boundaries_orig[,2]<clade_boundaries_orig[this_group+1,1]);
} else {
this_group_starts <- 0;
}
clade_boundaries_orig[1:this_group,2] <- sort(clade_boundaries_orig[1:this_group,2],decreasing=T);
clade_boundaries[clade_boundaries[,1] %in% clade_boundaries_orig[,1],] <- clade_boundaries_orig;
clade_boundaries_orig <- clade_boundaries_orig[!(1:nrow(clade_boundaries_orig)) %in% ((this_group_starts+1):this_group),]
if (nrow(clade_boundaries_orig)>0) clade_boundaries_orig[,2] <- sort(clade_boundaries_orig[,2]);
n <- 0;
while (n < nrow(clade_boundaries_orig)) {
n <- n+1;
clade_boundaries_orig[n,3] <- sum(clade_boundaries_orig[,2]>clade_boundaries_orig[n,1]);
}
clade_bounds_e_unq <- unique(clade_boundaries_orig[,3]);
}
clade_boundaries <- data.frame(lp=as.numeric(clade_boundaries[,1]),
rp=as.numeric(clade_boundaries[,2]))
return(clade_boundaries);
}
#### where number is the htu number of the clade to which a species or htu belong
# newick_string_ancestored <- newick_string_taxa_only_raw
# written for Cinctan project
# updated 2020-12-30 to allow outgroup to be ancestral
find_newick_ancestors <- function(newick_string_ancestored) {
atomic_ancestral <- strsplit(newick_string_ancestored,"")[[1]];
a_a <- length(atomic_ancestral);
l_paren <- (1:a_a)[atomic_ancestral=="("];
r_paren <- (1:a_a)[atomic_ancestral==")"];
sisters <- (1:a_a)[atomic_ancestral==","];
otu_nos <- (1:a_a)[!(1:a_a) %in% c(l_paren,r_paren,sisters)];
otu_nos <- otu_nos[otu_nos!=length(atomic_ancestral)];
notu <- 1;
breaks <- c();
for (i in 2:length(otu_nos)) {
if ((otu_nos[i]-1)>otu_nos[i-1]) {
notu <- notu+1;
breaks <- c(breaks,otu_nos[i]-1);
}
}
sampled_ancestors <- array(0,dim=notu);
ancestral_starts <- 1+breaks[atomic_ancestral[breaks]==")"];
ab <- 0;
while (ab < length(ancestral_starts)) {
ab <- ab+1;
dd <- ancestral_starts[ab];
st_hr <- match(dd,otu_nos);
this_anc <- as.numeric(atomic_ancestral[otu_nos[st_hr]]);
while(st_hr < length(otu_nos) && otu_nos[st_hr+1]==(otu_nos[st_hr]+1)) {
st_hr <- st_hr+1;
this_anc <- (10*this_anc)+as.numeric(atomic_ancestral[otu_nos[st_hr]]);
}
sampled_ancestors[this_anc] <- 1;
}
return(sampled_ancestors);
}
# written for Cinctan project
fix_newick_ancestors <- function(newick_string_ancestored) {
atomic_ancestral <- strsplit(newick_string_ancestored,"")[[1]];
a_a <- length(atomic_ancestral);
l_paren <- (1:a_a)[atomic_ancestral=="("];
r_paren <- (1:a_a)[atomic_ancestral==")"];
sisters <- (1:a_a)[atomic_ancestral==","];
otu_nos <- (1:a_a)[!(1:a_a) %in% c(l_paren,r_paren,sisters)];
otu_nos <- otu_nos[otu_nos!=length(atomic_ancestral)];
for (rp in length(r_paren):1) {
if (!is.na(match(1,otu_nos-r_paren[rp]))) {
an <- r_paren[rp];
an_no <- c();
while (atomic_ancestral[an+1] %in% as.character(0:9)) {
an_no <- c(an_no,an+1);
an <- an+1;
}
atomic_ancestral <- c(atomic_ancestral[1:(r_paren[rp]-1)],
",",
atomic_ancestral[an_no],
")",
atomic_ancestral[(an+1):a_a]);
a_a <- length(atomic_ancestral);
l_paren <- (1:a_a)[atomic_ancestral=="("];
r_paren <- (1:a_a)[atomic_ancestral==")"];
sisters <- (1:a_a)[atomic_ancestral==","];
otu_nos <- (1:a_a)[!(1:a_a) %in% c(l_paren,r_paren,sisters)];
otu_nos <- otu_nos[otu_nos!=length(atomic_ancestral)];
}
}
revised_newick_string <- paste(atomic_ancestral,collapse="");
return(revised_newick_string);
}
# written for Cinctan project
# heavily modified 2020-12
#read_newick_string_mcmc <- function(newick_string_full,otu_names) {
read_newick_string_mcmc <- function(newick_string_full,otu_names) {
otu_names_nex <- gsub(" ","_",otu_names);
simple_newick_string <- molecularize <- strsplit(newick_string_full,split="")[[1]];
left_brackets <- (1:length(molecularize))[molecularize %in% "["];
right_brackets <- (1:length(molecularize))[molecularize %in% "]"];
for (br in 1:length(left_brackets)) molecularize[left_brackets[br]:right_brackets[br]] <- "";
newick_string_taxa_only_rawwest <- newick_string_taxa_raw <- newick_string_taxa_only <- paste(molecularize[molecularize!=""],collapse="");
notu <- length(otu_names);
branch_durations <- array(0,dim=(2*notu)-1);
for (nn in 1:notu) {
dummy_newick <- gsub(paste(as.character(otu_names_nex[nn]),":",sep=""),"•",newick_string_taxa_only);
dummy_newick <- strsplit(dummy_newick,split="")[[1]];
dd <- 1+match("•",dummy_newick);
b_d <- dummy_newick[dd];
dd <- dd+1;
while (dummy_newick[dd] %in% c(".",0:9)) {
b_d <- paste(b_d,dummy_newick[dd],sep="");
dd <- dd+1;
}
branch_durations[nn] <- as.numeric(b_d);
}
for (i in 0:9) newick_string_taxa_only <- gsub(i,"",newick_string_taxa_only);
newick_string_taxa_only <- gsub(":","",newick_string_taxa_only);
newick_string_taxa_only <- gsub("-","",newick_string_taxa_only);
newick_string_taxa_only <- gsub("\\.","",newick_string_taxa_only);
for (nn in 1:notu) {
newick_string_taxa_only <- gsub(otu_names_nex[nn],as.character(nn),newick_string_taxa_only);
newick_string_taxa_raw <- gsub(otu_names_nex[nn],as.character(nn),newick_string_taxa_raw);
}
newick_string_taxa_only_atomized <- strsplit(newick_string_taxa_only,"")[[1]];
nstoa <- length(newick_string_taxa_only_atomized);
if (newick_string_taxa_only_atomized[nstoa]!=";") {
newick_string_taxa_only_atomized <- c(newick_string_taxa_only_atomized,";");
nstoa <- length(newick_string_taxa_only_atomized);
}
newick_string_taxa_only <- paste(newick_string_taxa_only_atomized,collapse="");
ancestral <- find_newick_ancestors(newick_string_ancestored=newick_string_taxa_only);
names(ancestral) <- otu_names_nex;
newick_string_taxa_only_raw <- newick_string_taxa_only;
newick_string_taxa_only <- fix_newick_ancestors(newick_string_taxa_only);
newick_string_taxa_only_atomized <- strsplit(newick_string_taxa_only,"")[[1]];
nstoa <- length(newick_string_taxa_only_atomized);
if (newick_string_taxa_only_atomized[nstoa-1]!=")") {
newick_string_taxa_only_atomized[nstoa] <- ")";
newick_string_taxa_only_atomized <- c("(",newick_string_taxa_only_atomized,";");
newick_string_taxa_only <- paste(newick_string_taxa_only_atomized,collapse="");
}
vector_tree <- read_newick_string(newick_string_taxa_only);
mat_tree <- transform_vector_tree_to_matrix_tree(vector_tree);
newick_string_taxa_raw <- strsplit(newick_string_taxa_raw,split="")[[1]];
clade_ends <- (1:length(newick_string_taxa_raw))[newick_string_taxa_raw %in% ")"];
colons <- (1:length(newick_string_taxa_raw))[newick_string_taxa_raw %in% ":"];
names(clade_ends) <- length(clade_ends):1;
clade_colons <- 1+clade_ends[(clade_ends+1) %in% colons];
for (cc in 1:length(clade_colons)) {
n_node <- notu+as.numeric(names(clade_colons)[cc]);
dd <- clade_colons[cc]+1;
b_d <- newick_string_taxa_raw[dd];
dd <- dd+1;
while (newick_string_taxa_raw[dd] %in% c(".",0:9)) {
b_d <- paste(b_d,newick_string_taxa_raw[dd],sep="");
dd <- dd+1;
}
branch_durations[n_node] <- as.numeric(b_d);
}
nNodes <- length(clade_ends);
newick_string_taxa_raw <- paste(newick_string_taxa_raw,collapse="");
if (nNodes<10) {
node_names <- paste("node_",1:nNodes,sep="")
} else if (nNodes<100) {
node_names <- c(paste("node_0",1:9,sep=""),
paste("node_",10:nNodes,sep=""));
} else {
node_names <- c(paste("node_00",1:9,sep=""),
paste("node_0",10:99,sep=""),
paste("node_",100:nNodes,sep=""));
}
names(branch_durations) <- c(otu_names_nex,node_names);
#vector_tree_raw <- read_newick_string(newick_string_taxa_only_raw);
#vector_tree <- read_newick_string(newick_string_taxa_only);
molecularize <- strsplit(newick_string_taxa_only,split="")[[1]];
venn_tree_newick <- transform_newick_string_to_venn_tree(newick_string = newick_string_taxa_only);
# have: newick_string_taxa_only,newick_string_taxa_only_raw,vector_tree,ancestral,branch_durations);
# need: clade_posteriors, prob_ancestor,hpd;
# uset mat_tree and newick_string_taxa_only to figure out which node is what number
newick_rem_info <- gsub("sampled_ancestor=","•",newick_string_full);
if (gsub("age_95%_HPD=","§",newick_rem_info)!=newick_rem_info) {
newick_rem_info <- gsub("age_95%_HPD=","§",newick_rem_info);
} else {
newick_rem_info <- gsub("brlen_95%_HPD=","§",newick_rem_info);
}
newick_rem_info <- gsub("posterior=","¶",newick_rem_info);
hpd <- data.frame(lb=as.numeric(rep(0,notu+nNodes)),ub=as.numeric(rep(0,notu+nNodes)));
molecularized <- strsplit(newick_rem_info,"")[[1]];
molecules <- length(molecularized);
panc_boundaries <- (1:molecules)[molecularized=="•"];
post_boundaries <- (1:molecules)[molecularized=="¶"];
hpd_boundaries <- (1:molecules)[molecularized=="§"];
taxon_boundaries <- array(0,dim=c(notu,2));
for (nn in 1:notu) {
taxon_dummy <- gsub(otu_names_nex[nn],"£",newick_rem_info);
taxon_boundaries[nn,1] <- (1:length(strsplit(taxon_dummy,"")[[1]]))[strsplit(taxon_dummy,"")[[1]]=="£"];
taxon_boundaries[nn,2] <- taxon_boundaries[nn,1]+length(strsplit(otu_names_nex[nn],"")[[1]])-1;
}
clade_boundaries <- accersi_clade_boundaries_from_newick_string(newick_rem_info);
colnames(taxon_boundaries) <- colnames(clade_boundaries);
tu_boundaries <- rbind(taxon_boundaries,clade_boundaries);
rownames(tu_boundaries) <- all_names <- c(otu_names_nex,node_names);
tu_boundaries <- tu_boundaries[order(tu_boundaries$rp),];
brackets_l <- (1:molecules)[molecularized=="{"];
brackets_r <- (1:molecules)[molecularized=="}"];
for (hp in 1:length(hpd_boundaries)) {
tx <- sum(tu_boundaries$rp<hpd_boundaries[hp]);
txn <- match(rownames(tu_boundaries)[tx],all_names);
i <- brackets_l[1+sum(hpd_boundaries[hp]>brackets_l)]+1;
j <- brackets_r[1+sum(hpd_boundaries[hp]>brackets_r)]-1;
hpd[txn,] <- as.numeric(strsplit(paste(molecularized[i:j],collapse=""),",")[[1]]);
}
rownames(hpd) <- all_names;
hpd[vector_tree[(1:notu)[ancestral==1]],] <- hpd[(1:notu)[ancestral==1],];
rownames(taxon_boundaries) <- otu_names_nex;
taxon_boundaries <- taxon_boundaries[order(taxon_boundaries[,1]),];
brackets_l <- (1:molecules)[molecularized=="["];
brackets_r <- (1:molecules)[molecularized=="]"];
prob_ancestor <- array(0,dim=notu);
pb <- 0;
while (pb < length(panc_boundaries)) {
#for (pb in 1:length(panc_boundaries)) {
tx <- sum(taxon_boundaries[,1]<panc_boundaries[pb]);
txn <- match(rownames(taxon_boundaries)[tx],otu_names_nex);
i <- panc_boundaries[pb]+1;
pranc <- c();
while (!molecularized[i] %in% c(",","]")) {
pranc <- paste(pranc,molecularized[i],sep="");
i <- i+1;
}
prob_ancestor[txn] <- as.numeric(pranc);
}
names(prob_ancestor) <- otu_names_nex;
clade_posteriors <- array(0,dim=nNodes);
rownames(clade_boundaries) <- names(clade_posteriors) <- node_names;
clade_boundaries <- clade_boundaries[order(clade_boundaries$rp),];
for (pp in 1:length(post_boundaries)) {
cl <- sum(clade_boundaries$rp<post_boundaries[pp]);
cld <- match(rownames(clade_boundaries)[cl],node_names);
i <- post_boundaries[pp]+1;
postp <- c();
while (!molecularized[i] %in% c(",","]")) {
postp <- paste(postp,molecularized[i],sep="");
i <- i+1;
}
clade_posteriors[cld] <- as.numeric(postp);
# molecularized[i:i+-1:10]
}
#sum(strsplit(newick_rem_info,"")[[1]]=="•")
#sum(strsplit(newick_rem_info,"")[[1]]=="§")
#sum(strsplit(newick_rem_info,"")[[1]]=="¶")
output <- list(newick_string_taxa_only,newick_string_taxa_only_raw,vector_tree,clade_posteriors,ancestral,prob_ancestor,hpd,branch_durations);
names(output) <- c("newick_modified","newick","vector_tree","clade_posteriors","ancestral","prob_ancestor","hpd","branch_durations");
return(output);
}
#### MODIFY TREES STORED IN MEMORY ####
transform_matrix_tree_to_vector_tree <- function (matrix_tree) {
Nnode <- dim(matrix_tree)[1]
notus <- max(matrix_tree)-Nnode
ttus <- max(matrix_tree)
vector_tree <- vector(length=ttus)
vector_tree[notus+1] <- -1
for (n in Nnode:1)
vector_tree[matrix_tree[n,]] <- n+notus
return(vector_tree)
}
transform_matrix_tree_to_venn_tree <- function(matrix_tree) {
nNodes <- nrow(matrix_tree);
notu <- max(matrix_tree)-nNodes;
venn_tree <- array(0,dim=c(nNodes,notu));
venn_tree[,1:ncol(matrix_tree)] <- matrix_tree;
#for (nd in nNodes:16) {
nd <- nNodes;
while (nd > 0) {
if (max(venn_tree[nd,])>notu) {
daughter_clades <- venn_tree[nd,venn_tree[nd,]>notu];
daughter_nodes <- daughter_clades-notu;
dclades <- length(daughter_clades);
for (dc in dclades:1) {
f1c <- match(daughter_clades[dc],venn_tree[nd,]);
daughter_div <- sum(venn_tree[daughter_nodes[dc],]>0);
replace_cells <- f1c:(f1c+daughter_div-1);
grandkids <- venn_tree[daughter_nodes[dc],venn_tree[daughter_nodes[dc],]>0];
displaced_cuz <- venn_tree[nd,(1+f1c):notu][venn_tree[nd,(1+f1c):notu]>0]
if (length(displaced_cuz)>0) {
displaced_cells <- (f1c+daughter_div-1)+1:length(displaced_cuz);
venn_tree[nd,displaced_cells] <- displaced_cuz;
}
venn_tree[nd,replace_cells] <- grandkids;
#venn_tree[,1:6]
#venn_tree[nd,]f1c:notu] <- c(venn_tree[daughter_nodes[dc],1:daughter_div],venn_tree[nd,(f1c+1):(notu+1-daughter_div)]);
}
}
nd <- nd-1;
}
return(venn_tree);
}
# routine to extract vector tree from matrix giving total progeny of a node
transform_venn_tree_to_vector_tree <- function (venn_tree) {
n_Nodes <- nrow(venn_tree);
notus <- ncol(venn_tree);
max_otus <- max(venn_tree);
base <- max_otus+1
new_vector_tree <- vector(length=(max_otus+n_Nodes));
otus <- sort(as.integer(venn_tree[1,]));
for (s in 1:notus) {
spc <- otus[s];
new_vector_tree[spc] <- max_otus+sort(which(venn_tree==spc,arr.ind=TRUE)[,1],decreasing=TRUE)[1]
# new_vector_tree[spc] <- max_otus+sort(which(venn_tree==spc,arr.ind=TRUE)[,1],decreasing=TRUE)[1]
}
new_vector_tree[base] <- -1
new_vector_tree[base+1] <- base
for (n in 3:n_Nodes) {
htu <- max_otus+n
lead <- venn_tree[n,1]
new_vector_tree[htu] <- max_otus+sort(which(venn_tree[1:(n-1),]==lead,arr.ind=TRUE)[,1],decreasing=TRUE)[1]
}
return(new_vector_tree)
}
transform_venn_tree_to_matrix_tree <- function(venn_tree) {
return(transform_vector_tree_to_matrix_tree(transform_venn_tree_to_vector_tree(venn_tree)));
}
transform_vector_tree_to_matrix_tree <- function(vector_tree) {
node_rosetta <- sort(unique(vector_tree[vector_tree>0]))
Nnodes <- length(node_rosetta)
maxtomy <- max((hist(vector_tree[vector_tree>1],breaks=((min(vector_tree[vector_tree>1])-1):max(vector_tree[vector_tree>1])),plot=FALSE)$counts))
#order(vector_tree)[2:length(vector_tree)]
node_rich <- vector(length=Nnodes)
matrix_tree <- matrix(0,Nnodes,maxtomy)
for (i in 1:length(vector_tree)) {
node <- match(vector_tree[i],node_rosetta)
if(!is.na(node)) {
node_rich[node] <- node_rich[node]+1
matrix_tree[node,node_rich[node]] <- i
}
# if (vector_tree[i]>=node_rosetta[1]) {
# node <- match(vector_tree[i],node_rosetta)
# node_rich[node] <- node_rich[node]+1
# matrix_tree[node,node_rich[node]] <- i
# }
}
return(matrix_tree)
}
transform_vector_tree_to_venn_tree <- function(vector_tree) {
ohtu <- length(vector_tree);
base <- match(-1,vector_tree);
otu <- base-1
htu <- ohtu-otu
venn_tree <- matrix(0,ohtu,otu)
for (i in 1:otu) venn_tree[base,i] <- i
node_rich <- vector(length=ohtu)
for (sp in otu:1) if (vector_tree[sp]!=0) node_rich[vector_tree[sp]] <- node_rich[vector_tree[sp]]+1
for (nd in ohtu:(base+1)) if (vector_tree[nd]>0) node_rich[vector_tree[nd]] <- node_rich[vector_tree[nd]]+node_rich[nd]
node_div <- vector(length=ohtu)
for (sp in 1:otu) {
node_div[vector_tree[sp]] <- node_div[vector_tree[sp]]+1
venn_tree[vector_tree[sp],node_div[vector_tree[sp]]] <- sp
}
for (nd in ohtu:(base+1)) {
anc <- vector_tree[nd]
for (i in 1:node_div[nd]) {
node_div[anc] <- node_div[anc]+1
venn_tree[anc,node_div[anc]] <- venn_tree[nd,i]
}
}
#venn_tree[base:ohtu,1:15]
return(venn_tree[base:ohtu,])
}
create_phylo_class_from_nexus_tree <- function(vector_tree,tip.label) {
htu1 <- min(vector_tree[vector_tree>0])
Nnode <- 1+(max(vector_tree)-htu1)
otus <- htu1-1
edges <- matrix(0,otus+Nnode-1,2)
j <- 0
for (i in 1:length(vector_tree)) {
if (vector_tree[i]!=-1) {
j <- j+1
edges[j,1] <- vector_tree[i]
edges[j,2] <- i
}
}
output_tree <- list(edges,tip.label,Nnode)
names(output_tree) <- c("edge","tip.label","Nnode")
class(output_tree) <- "phylo"
#str(output_tree)
return(output_tree)
}
create_phylo_class_from_nexus_tree_file <- function(nexustreefile,tip.label) {
vector_tree <- read_newick_tree_from_file(nexustreefile)
htu1 <- min(vector_tree[vector_tree>0])
Nnode <- 1+(max(vector_tree)-htu1)
otus <- htu1-1
edges <- matrix(0,otus+Nnode-1,2)
j <- 0
for (i in 1:length(vector_tree)) {
if (vector_tree[i]!=-1) {
j <- j+1
edges[j,1] <- vector_tree[i]
edges[j,2] <- i
}
}
output_tree <- list(edges,tip.label,Nnode)
names(output_tree) <- c("edge","tip.label","Nnode")
class(output_tree) <- "phylo"
#str(output_tree)
return(output_tree)
}
transmogrify_additive_dependents_to_multistate_old <- function(ind_char,dep_chars,chmatrix,secondary_dependencies,INAP=-22,UNKNOWN=-11,theoretical=T) {
# theortical: if T, then Q matrix & recoding allows for all theoretically possible combinations
# unknown_inap_sep: code unknowns separately; this is a bad idea that I regret and that we'll cut
notu <- nrow(chmatrix);
combos <- chmatrix[,c(ind_char,dep_chars)];
rstates <- count_states(combos,UNKNOWN,INAP)
combos <- combos[!(rowMaxs(combos)==UNKNOWN & rowMins(combos)==UNKNOWN),];
combos <- combos[combos[,2]!=INAP,];
# get rid of missing examples
combos <- combos[!(1:nrow(combos)) %in% unique(which(combos==UNKNOWN,arr.ind = T)[,1]),];
# get secondary dependents
secondaries <- unique(which(combos==INAP,arr.ind = T)[,2]);
semi_indies <- secondary_dependencies[secondary_dependencies!=ind_char];
nchars <- ncol(combos);
dchars <- length(dep_chars);
missing_combos <- unique(combos)[sort(unique(which(unique(combos)==UNKNOWN,arr.ind = T)[,1])),];
nstates <- count_states(combos);
nstates[secondaries] <- nstates[secondaries]+1;
#combos <- combos[order(combos[,1],combos[,2],combos[,3]),];
for (cn in ncol(combos):1) combos <- combos[order(combos[,cn]),];
all_obs_complex_combos <- unique(combos);
all_obs_complex_combos <- all_obs_complex_combos[all_obs_complex_combos[,1]>=0,];
ind_states <- sort(unique(chmatrix[,ind_char][chmatrix[,ind_char]>=0]))
key_states <- unique(all_obs_complex_combos[,1]);
wrong_states <- ind_states[!ind_states %in% key_states];
if (!theoretical) {
all_poss_combos <- unique(combos);
for (cu in ncol(all_poss_combos):1) all_poss_combos <- all_poss_combos[order(abs(all_poss_combos[,cu])),];
all_poss_combos <- all_poss_combos[(1:nrow(all_poss_combos))[!(1:nrow(all_poss_combos)) %in% which(all_poss_combos==UNKNOWN,arr.ind=T)[,1]],];
} else {
# unq_combos <- array(0,dim=c(prod(nstates),length(nstates)));
# unq_combos[,1] <- unique(unique(combos)[,1]);
# make sure that secondary dependents get inapplicable in the combos
# should the Q-matrix for the "wrong" independent character reflect this?
#which(unique(combos)==INAP,arr.ind=T);
# for (ds in 1:dchars) {
dc <- nchars+1;
while (dc >1) {
dc <- dc-1;
char_states <- sort(unique(combos[,dc]));
char_states <- char_states[!char_states %in% c(UNKNOWN,INAP)];
cs <- 1;
while (char_states[cs]<0) {
char_states <- c(char_states,unravel_polymorph_badass(char_states[cs]))
char_states <- unique(sort(char_states[2:length(char_states)]));
}
# if (dc %in% secondaries) unq_combos[unq_combos[,dc]==(nstates[dc]-1),dc] <- INAP;
if (dc %in% secondaries) char_states <- c(char_states,INAP);
nstates_ch <- length(char_states);
if (dc==nchars) {
all_poss_combos <- array(char_states,dim=c(nstates_ch,1));
} else {
cs <- 1;
existing_combos <- nrow(all_poss_combos)
added_char_states <- rep(char_states[cs],existing_combos);
unq_combos_orig <- all_poss_combos;
while (cs<nstates_ch) {
cs <- cs+1;
all_poss_combos <- rbind(all_poss_combos,unq_combos_orig);
added_char_states <- c(added_char_states,rep(char_states[cs],existing_combos));
}
all_poss_combos <- cbind(added_char_states,all_poss_combos);
}
}
# while (dc > 1) {
# dc <- dc-1;
# if (dc<nchars) {
# this_char_states <- c();
# for (dss in 1:nstates[dc]) #{
# this_char_states <- c(this_char_states,rep(dss-1,prod(nstates[(dc+1):nchars])));
# if (dss==nstates[dc] && (dc %in% secondaries)) {
# this_char_states <- c(this_char_states,rep(INAP,prod(nstates[(dc+1):dchars])));
# this_char_states <- c(this_char_states,rep(INAP,nstates[dc-1]));
# } else {
# this_char_states <- c(this_char_states,rep(dss-1,nstates[dc-1]));
# }
# }
# } else {
# this_char_states <- (1:nstates[dc])-1;
# }
# ttl_code <- round(nrow(unq_combos)/length(this_char_states),0);
# unq_combos[,dc] <- rep(this_char_states,ttl_code);
# }
}
all_poss_combos <- all_poss_combos[all_poss_combos[,1] %in% key_states,];
# alternative for secondary independents: replace all pairs with -22 & then reduce via unique(all_poss_combos);
sc <- 0;
while (sc < length(secondaries)) {
sc <- sc+1;
independents <- 2:ncol(all_poss_combos);
semi_indy <- find_independent_character(dchar=secondaries[sc],independents=2:ncol(all_poss_combos),chmatrix=combos,UNKNOWN,INAP);
this_pair <- unique(combos[,c(semi_indy,secondaries[sc])]);
semi_key_state <- unique(this_pair[this_pair[,2]!=INAP,][,1]);
semi_wrn_state <- unique(this_pair[,1][!this_pair[,1] %in% semi_key_state]);
# get rid of keystate:inapplicable pairs
zz <- (1:nrow(all_poss_combos));
xx <- zz[all_poss_combos[,semi_indy] %in% semi_key_state]
yy <- zz[all_poss_combos[,secondaries[sc]]==INAP];
xx <- xx[xx %in% yy];
all_poss_combos <- all_poss_combos[!zz %in% xx,];
# get rid of nonkeystate:state-pairs
zz <- (1:nrow(all_poss_combos));
xx <- zz[all_poss_combos[,semi_indy] %in% semi_wrn_state]
yy <- zz[all_poss_combos[,secondaries[sc]]!=INAP];
xx <- xx[xx %in% yy];
all_poss_combos <- all_poss_combos[!zz %in% xx,];
}
wrstates <- length(wrong_states);
null_combos <- cbind(wrong_states,array(INAP,dim=c(wrstates,ncol(combos)-1)));
colnames(null_combos) <- colnames(all_poss_combos)
all_poss_combos <- rbind(null_combos,all_poss_combos);
ucombos <- rcombos <- ttl_states <- nrow(all_poss_combos);
#if (unknown_inap_sep) {
# unq_combos <- rbind(unq_combos,missing_combos);
# ttl_states <- nrow(unq_combos);
# rcombos <- ttl_states-(wrstates+length(missing_combos));
# ucombos <- ttl_states-length(missing_combos);
# } else {
rcombos <- ttl_states-wrstates;
# }
state_combos <- rep("",ttl_states);
for (uc in 1:ttl_states) {
this_combo <- paste(all_poss_combos[uc,],collapse="");
this_combo <- gsub(as.character(INAP),"-",this_combo);
this_combo <- gsub(as.character(UNKNOWN),"?",this_combo);
state_combos[uc] <- this_combo;
}
state_combos <- gsub(as.character(INAP),"",state_combos);
rownames(all_poss_combos) <- state_combos;
colnames(all_poss_combos) <- c(ind_char,dep_chars);
# make sure that secondaries dependencies are weeded out.
# if we see only •2-, then make sure that •20 & •21 are eliminated
# prepare Q-Matrix
if (nrow(all_poss_combos)>100) {
print("Getting basic distances among state combinations")
Q <- pairwise_differences_discrete(all_poss_combos,UNKNOWN=UNKNOWN,INAP=INAP,progress_bar=T);
} else {
Q <- pairwise_differences_discrete(all_poss_combos,UNKNOWN=UNKNOWN,INAP=INAP,progress_bar=F);
}
# if there are secondaries, the figure out how to weight them here!!!
colnames(Q) <- rownames(Q) <- state_combos;
Q[wrstates,(wrstates+1):ttl_states] <- 1/rcombos;
si <- 0;
while (si < length(semi_indies)) {
si <- si+1;
sc <- match(semi_indies[si],c(ind_char,dep_chars));
cs <- (1:length(c(ind_char,dep_chars)))[secondary_dependencies==semi_indies[si]];
relv_combos <- unique(all_poss_combos[,c(sc,cs)]);
relv_combos <- relv_combos[(1:nrow(relv_combos))[!(1:nrow(relv_combos)) %in% unique(which(relv_combos==INAP,arr.ind=T)[,1])],];
relv_combos_all <- all_poss_combos[,c(sc,cs)];
if (length(cs)>1) {
key_combos <- which(all_poss_combos[,cs]==INAP,arr.ind=T)[,1]
} else {
key_combos <- as.numeric(which(all_poss_combos[,cs]==INAP,arr.ind=T))
}
# fix revl_combos to get all of the right matches & not just those for one possibility
key_combos <- key_combos[!key_combos %in% key_combos[all_poss_combos[,sc]==INAP]];
relv_combos_2 <- all_poss_combos[,c(sc,cs)];
relv_combos_2 <- relv_combos_2[!(1:ttl_states) %in% which(relv_combos_2==INAP,arr.ind=T)[,1],];
combos_key <- match(rownames(relv_combos_2),rownames(Q));
# Q[key_combos,combos_key]==1;
for (i in 1:length(key_combos))
Q[key_combos[i],combos_key][Q[key_combos[i],combos_key]==1] <- 1/nrow(relv_combos);
# 1/nrow(relv_combos)
}
Q[Q>1] <- 0;
for (qq in 1:ttl_states) {
Q[qq,] <- Q[qq,]/sum(Q[qq,1:ucombos]);
Q[qq,qq] <- -1;
# Q[qq,qq] <- -sum(Q[qq,1:ucombos])
}
#write.csv(Q,"Q.csv",row.names = T)
new_multistate <- vector(length=notu);
for (nn in 1:notu)
new_multistate[nn] <- match_vector_to_matrix_row(test_vector=chmatrix[nn,c(ind_char,dep_chars)],test_matrix=all_poss_combos)-1;
all_states <- c(0:9,letter_states,more_letter_states);
if (nrow(all_poss_combos)>10)
if (is.numeric(new_multistate))
new_multistate[!is.na(new_multistate)] <- all_states[1+new_multistate[!is.na(new_multistate)]]
#cbind(new_multistate,chmatrix[,c(ind_char,dep_chars)])
#if (unknown_inap_sep) {
# new_multistate[is.na(new_multistate)] <- UNKNOWN;
# } else {
prob_child <- (1:notu)[is.na(new_multistate)];
combos <- chmatrix[,c(ind_char,dep_chars)];
pc <- 0;
polymorphs <- unique(combos[combos<0])
polymorphs <- polymorphs[!polymorphs %in% c(UNKNOWN,INAP)];
while (pc < length(prob_child)) {
pc <- pc+1;
pcc <- prob_child[pc];
if (combos[pcc,1]==UNKNOWN) {
new_multistate[pcc] <- UNKNOWN;
} else {
# make it polymorphic for all possible states
this_combo <- combos[pcc,(0:dchars)+1];
# doofi <- (1:length(this_combo))[this_combo==UNKNOWN];
# this if/else probably is unneeded now!
if (sum(this_combo %in% polymorphs)==0) {
set_chars <- (1:length(this_combo))[this_combo!=UNKNOWN];
# set_chars <- set_chars[set_chars>1];
set_states <- this_combo[set_chars];
poss_combos <- all_poss_combos[all_poss_combos[,1] %in% key_states,];
ss <- 0;
while (ss < length(set_chars)) {
ss <- ss+1;
poss_combos <- subset(poss_combos,poss_combos[,set_chars[ss]]==set_states[ss]);
}
polymorph <- 0;
if (nrow(all_poss_combos)>10) polymorph <- c();
for (cp in 1:nrow(poss_combos)) {
this_state <- (row.match(poss_combos[cp,],as.data.frame(all_poss_combos))-1);
if (nrow(all_poss_combos)>10) {
polymorph <- c(polymorph,all_states[this_state]);
# base64encode(this_state);
# for (i in 1:5) print(base64encode(10^(i-1)));
} else {
polymorph <- polymorph-((10^(cp-1))*this_state);
}
}
} else {
set_chars <- (1:nchars)[!this_combo %in% c(UNKNOWN,polymorphs)]; unset_chars <- (1:nchars)[this_combo %in% c(UNKNOWN,polymorphs)];
un <- 0;
missing <- unset_chars[this_combo[unset_chars]==UNKNOWN];
polys <- unset_chars[!this_combo[unset_chars] %in% UNKNOWN];
while (un < length(missing)) {
un <- un+1;
mchar <- missing[un];
rstates <- (1:nstates[mchar])-1;
this_combo[mchar] <- 0;
for (rs in 1:length(rstates))
this_combo[mchar] <- this_combo[rs]-rstates[rs]*(10^(rs-1));
}
# set_chars <- set_chars[set_chars>1];
set_states <- this_combo[set_chars];
poss_combos <- all_poss_combos;
# print(nrow(poss_combos));
# reduce the possible combinations to those consistent
sc <- length(set_chars);
# for (sc in 1:length(set_chars)) {
while (sc > 1) {
poss_combos <- subset(poss_combos,poss_combos[,set_chars[sc]]==set_states[sc]);
# print(nrow(poss_combos));
sc <- sc-1;
}
uc <- 0;
while (uc < length(polys)) {
uc <- uc+1;
poss_combos <- poss_combos[poss_combos[,polys[uc]] %in% unravel_polymorph_badass(this_combo[polys[uc]]),];
}
pstates <- match(rownames(poss_combos),rownames(all_poss_combos));
polymorph <- 0;
if (nrow(all_poss_combos)>10) polymorph <- c();
for (cp in 1:nrow(poss_combos)) {
this_state <- (row.match(poss_combos[cp,],as.data.frame(all_poss_combos))-1);
if (nrow(all_poss_combos)>10) {
polymorph <- c(polymorph,all_states[this_state]);
# base64encode(this_state);
# for (i in 1:5) print(base64encode(10^(i-1)));
} else {
polymorph <- polymorph-((10^(cp-1))*this_state);
}
}
}
# if (nrow(unq_combos)>10) {
# if (is.numeric(new_multistate))
# new_multistate[!is.na(new_multistate)] <- all_states[1+new_multistate[!is.na(new_multistate)]]
new_multistate[pcc] <- paste("(",paste(polymorph,collapse=""),")",sep="");
# } else {
# new_multistate[pcc] <- polymorph;
# }
}
}
# }
#new_multistate[new_multistate==UNKNOWN] <- "?";
#cbind(new_multistate,chmatrix[,c(ind_char,dep_chars)])
output <- list(all_poss_combos,Q,new_multistate);
names(output) <- c("unique_combinations","Q","new_character");
return(output);
}
|
/R/Nexus_File_Routines.r
|
no_license
|
PeterJWagner3/PaleoDB_for_RevBayes_Webinar
|
R
| false
| false
| 107,935
|
r
|
#### SETUP ####
# accersi: fetch/summon
# divido: divide!
# expello: banish
# mundus: clean
# percursant: scour
# revelare: reveal
### udpated with routines written for first phylogenetics course at UNL
MAXNO <- 1.797693e+308;
newick_verbotten <- c(".","?","\"","\'");
letter_states <- LETTERS[!LETTERS %in% c("I","O")];
more_letter_states <- tolower(letter_states);
zzzz <- 0.25;
#### HOUSE-CLEANING ####
clear_matrix_na_with_another_cell_value <- function(data,j, k) {
size <- dim(data)
for (i in 1:size[1]) {
if (is.na(data[i,j])) data[i,j] <- data[i,k]
}
return(data)
}
count_characters_in_string <- function(string_to_count) {
j <- strsplit(string_to_count,split="",fixed=TRUE)[[1]];
return(length(j));
}
mundify_nexus_text <- function(nexus_line) {
nexus_line <- gsub("\xd4","",nexus_line);
nexus_line <- gsub("\xd5","",nexus_line);
nexus_line <- gsub("\x87","a",nexus_line);
nexus_line <- gsub("\xfc\xbe\x8d\xa3\xa4\xbc","n",nexus_line);
nexus_line <- gsub("\xfc\xbe\x98\x93\xa0\xbc","ae",nexus_line);
nexus_line <- gsub("\xfc\xbe\x99\x83\xa0\xbc","c",nexus_line);
nexus_line <- gsub("\xfc\xbe\x98\x96\x8c\xbc","",nexus_line);
nexus_line <- gsub("\xfc\xbe\x8c\x93\xa4\xbc","\'",nexus_line);
nexus_line <- gsub("\xfc\xbe\x98\xb3\xa0\xbc","",nexus_line);
nexus_line <- gsub("\'\'\'","\'\"",nexus_line);
nexus_line <- gsub("\'\' ","\" ",nexus_line);
nexus_line <- gsub("\xac","",nexus_line);
nexus_line <- gsub("\xa0","†",nexus_line);
nexus_line <- gsub("\x80","",nexus_line);
nexus_line <- gsub("\xd1","",nexus_line);
nexus_line <- gsub("\xc9","?",nexus_line);
nexus_line <- gsub("\xe1","a",nexus_line);
nexus_line <- gsub("\xe9","e",nexus_line);
nexus_line <- gsub("\x8e","e",nexus_line);
nexus_line <- gsub("\x8f","e",nexus_line);
nexus_line <- gsub("\x92","i",nexus_line);
nexus_line <- gsub("\xbf","o",nexus_line);
nexus_line <- gsub("\x9a","o",nexus_line);
nexus_line <- gsub("\x97","o",nexus_line);
nexus_line <- gsub("\xf6","ö",nexus_line);
nexus_line <- gsub("\xfc","ue",nexus_line);
nexus_line <- gsub("\xb0","˚",nexus_line);
nexus_line <- gsub("\xba","˚",nexus_line);
nexus_line <- gsub("\xfc\xbe\x8e\x93\xa4\xbc","o",nexus_line);
nexus_line <- gsub("\x9f","ue",nexus_line);
nexus_line <- gsub("\xd0","-",nexus_line);
nexus_line <- gsub("\xd2","\"",nexus_line);
nexus_line <- gsub("\xd3","\"",nexus_line);
nexus_line <- gsub("\xfc\xbe\x8d\x86\x90\xbc","\'",nexus_line);
nexus_line <- gsub("\xfc\xbe\x8d\x86\x8c\xbc","ƒ",nexus_line);
nexus_line <- gsub("\xdf","ß",nexus_line);
nexus_line <- gsub("\xa7","ß",nexus_line);
nexus_line <- gsub("\xfc\xbe\x8c\xa6\x88\xbc","≤",nexus_line);
nexus_line <- gsub("\xb3","≥",nexus_line);
nexus_line <- gsub("\xfc\xbe\x8d\x96\x8c\xbc","≈",nexus_line);
nexus_line <- gsub("\xfc\xbe\x98\xa6\x98\xbc","˚",nexus_line);
nexus_line <- gsub("\xb6","∂",nexus_line);
nexus_line <- gsub("\xc6","∆",nexus_line);
nexus_line <- gsub("\xfc\xbe\x8d\xb6\x88\xbc","∑",nexus_line);
nexus_line <- gsub("\xfc\xbe\x99\x86\x88\xbc","Ω",nexus_line);
nexus_line <- gsub("\xa5"," ",nexus_line);
nexus_line <- gsub("Á","A",nexus_line);
nexus_line <- gsub("Ä","A",nexus_line);
nexus_line <- gsub("ä","a",nexus_line);
nexus_line <- gsub("á","a",nexus_line);
nexus_line <- gsub("å","a",nexus_line);
nexus_line <- gsub("Ç","C",nexus_line);
nexus_line <- gsub("ç","c",nexus_line);
nexus_line <- gsub("č","c",nexus_line);
nexus_line <- gsub("é","e",nexus_line);
nexus_line <- gsub("è","e",nexus_line);
nexus_line <- gsub("ê","e",nexus_line);
nexus_line <- gsub("ė","e",nexus_line);
nexus_line <- gsub("î","i",nexus_line);
nexus_line <- gsub("Î","I",nexus_line);
nexus_line <- gsub("ñ","n",nexus_line);
nexus_line <- gsub("Ö","O",nexus_line);
nexus_line <- gsub("Ø","O",nexus_line);
nexus_line <- gsub("ø","o",nexus_line);
nexus_line <- gsub("ó","o",nexus_line);
nexus_line <- gsub("ö","o",nexus_line);
nexus_line <- gsub("õ","o",nexus_line);
nexus_line <- gsub("Š","S",nexus_line);
nexus_line <- gsub("š","s",nexus_line);
nexus_line <- gsub("ů","u",nexus_line);
nexus_line <- gsub("ü","u",nexus_line);
nexus_line <- gsub("’","’",nexus_line);
nexus_line <- gsub("\x88","a",nexus_line);
nexus_line <- gsub("Ä","A",nexus_line);
nexus_line <- gsub("Á","A",nexus_line);
nexus_line <- gsub("á","a",nexus_line);
nexus_line <- gsub("ä","a",nexus_line);
nexus_line <- gsub("Ã¥","a",nexus_line);
nexus_line <- gsub("ç","c",nexus_line);
nexus_line <- gsub("é","e",nexus_line);
nexus_line <- gsub("è","e",nexus_line);
nexus_line <- gsub("ñ","n",nexus_line);
nexus_line <- gsub("Ö","O",nexus_line);
nexus_line <- gsub("ø","o",nexus_line);
nexus_line <- gsub("ö","o",nexus_line);
nexus_line <- gsub("õ","o",nexus_line);
nexus_line <- gsub("ô","o",nexus_line);
nexus_line <- gsub("ü","u",nexus_line);
nexus_line <- gsub("î","i",nexus_line);
nexus_line <- gsub("Š","S",nexus_line);
nexus_line <- gsub("š","s",nexus_line);
nexus_line <- gsub("å","a",nexus_line);
nexus_line <- gsub("ů","u",nexus_line);
nexus_line <- gsub("α","α",nexus_line);
return(nexus_line);
}
# Turn Lophospira sp. or Lophospira sp. A to Lophospira
reduce_genus_sp_to_genus <- function(taxon_name) {
taxon_names <- strsplit(taxon_name," ")[[1]];
indet_species <- c("sp.");
indet_species <- c(indet_species,paste("sp.",LETTERS));
indet_species <- c(indet_species,paste("sp.",letters));
indet_species <- c(indet_species,"spp.");
indet_species <- c(indet_species,paste("spp.",LETTERS));
indet_species <- c(indet_species,paste("spp.",letters));
for (i in 1:100) indet_species <- c(indet_species,paste("sp.",i));
for (i in 1:100) indet_species <- c(indet_species,paste("nov.",i));
for (i in 1:100) indet_species <- c(indet_species,paste("sp. nov.",i));
for (i in 1:100) indet_species <- c(indet_species,paste("indet.",i));
if (sum(taxon_names %in% indet_species)>0)
taxon_names <- taxon_names[(1:length(taxon_names))[!taxon_names %in% indet_species]];
taxon_name <- paste(taxon_names,collapse=" ");
return(taxon_name);
}
accersi_study_name <- function(nexus_file_name) {
n_f_n <- simplify2array(strsplit(nexus_file_name,"/"));
n_f_n <- n_f_n[length(n_f_n)];
n_f_n <- gsub("\\.nex","",n_f_n);
filename_parts <- simplify2array(strsplit(n_f_n,"_"));
if (sum(filename_parts %in% "&")==1) {
author_start <- match("&",filename_parts)-1;
} else if (sum(filename_parts %in% c("et","al"))==2) {
author_start <- match("et",filename_parts)-1;
} else {
author_start <- length(filename_parts)-2;
}
study_group <- filename_parts[1:(author_start-1)];
citation <- paste("(",paste(filename_parts[!filename_parts %in% study_group],collapse=" "),")",sep="");
return(paste(study_group,citation));
}
#### ROUTINES TO READ CHARACTER MATRIX INFORMATION IN NEXUS FILE ####
# routine to read nexus file of Mesquite or Maclade format & return important infromation
accersi_data_from_nexus_file <- function(nexus_file_name, polymorphs=T, UNKNOWN=-11, INAP=-22, rate_partitions="", trend_partitions="") {
# nexus_file_name: name of nexus file (e.g., "Phacopidae_Snow_2000.nex")
# polymorphs: boolean, if TRUE, then recode "1,2" as "-21"; otherwise, treat as unknown
# UNKNOWN: value substituting for "?"
# INAP: value substituting for gap ("-")
# rate_partitions: nameof CHARPARTITION that you want to use for dividing characters into general rate classes.
nexus <- scan(file=nexus_file_name,what=character(),sep="\n");
output <- accersi_data_from_nexus_vector(nexus=nexus,polymorphs=polymorphs,UNKNOWN=UNKNOWN,INAP=INAP,rate_partitions=rate_partitions,trend_partitions=trend_partitions);
return(output)
}
# routine to prompt user for a nexus file of Mesquite or Maclade format & return important infromation
accersi_data_from_chosen_nexus_file <- function(polymorphs=T, UNKNOWN=-11, INAP=-22, rate_partitions="", trend_partitions="") {
# nexus_file_name: name of nexus file (e.g., "Phacopidae_Snow_2000.nex")
# polymorphs: boolean, if TRUE, then recode "1,2" as "-21"; otherwise, treat as unknown
# UNKNOWN: value substituting for "?"
# INAP: value substituting for gap ("-")
# rate_partitions: nameof CHARPARTITION that you want to use for dividing characters into general rate classes.
#print("Choose the nexus file you with to analyze: ");
print("Choose the nexus file that you wish to analyze: ");
flush.console();
Sys.sleep(zzzz);
nexus_file_name <- file.choose();
nexus <- scan(file=nexus_file_name,what=character(),sep="\n");
output <- accersi_data_from_nexus_vector(nexus=nexus,polymorphs=polymorphs,UNKNOWN=UNKNOWN,INAP=INAP,rate_partitions=rate_partitions,trend_partitions=trend_partitions);
return(output)
}
# routine to read nexus information Mesquite or Maclade format & return important infromation
accersi_data_from_nexus_vector <- function(nexus, polymorphs=T, UNKNOWN=-11, INAP=-22, rate_partitions="", trend_partitions="") {
# nexus_file_name: name of nexus file (e.g., "Phacopidae_Snow_2000.nex")
# polymorphs: boolean, if TRUE, then recode "1,2" as "-21"; otherwise, treat as unknown
# UNKNOWN: value substituting for "?"
# INAP: value substituting for gap ("-")
# rate_partitions: nameof CHARPARTITION that you want to use for dividing characters into general rate classes.
ml <- 0;
#i <- 1
for (i in 1:length(nexus)) {
nexus[i] <- mundify_nexus_text(nexus_line = nexus[i]);
# j <- simplify2array(strsplit(nexus[i],split="",fixed=TRUE))[,1];
j <- strsplit(nexus[i],split="",fixed=TRUE)[[1]];
# strsplit(string=nexus[i],pattern="")
if (length(j)>ml) ml <- length(j);
}
ml <- ml+1; # LENGTH OF LONGEST LINE
# file is now a vector of characters. Turn it into a matrix with one char per cell
nexusfile <- matrix("\n",length(nexus),ml)
for (i in 1:length(nexus)) {
j <- strsplit(nexus[i],split="",fixed=TRUE)[[1]];
for (k in 1:length(j)) nexusfile[i,k] <- j[k]
if ((length(j)+2)<ml)
for (k in (length(j)+2):ml) nexusfile[i,k] <- ""
}
top <- match("matrix",tolower(nexus));
if (is.na(top)) top <- match("\tmatrix",tolower(nexus));
if (is.na(top)) {
top <- 0;
ln <- 1; # this is the row with the word "Matrix": character data starts next.
while (top==0) {
em_nexus <- gsub("\t","",nexus[ln]);
nexus_words <- simplify2array(strsplit(em_nexus," ")[[1]]);
if (!is.na(match("matrix",tolower(nexus_words)))) {
top <- ln;
} else ln <- ln+1;
}
}
top <- top+1; # this will give the first row of data
# skip the comment text denoting character numbers (if present)
while(nexusfile[top,1]=="[" || nexusfile[top,1]==" ") top <- top+1;
# get character & state informations
all_states <- c();
missing <- "?";
gap <- "-";
notu <- nchars <- strat <- range <- geog <- 0;
for (i in 2:top) {
while ((nexusfile[i,1]=="[" || nexusfile[i,1]=="\n") && i<top) i <- i+1;
em_nexus <- gsub("\t","",nexus[i]);
em_nexus <- gsub("="," = ",em_nexus);
em_nexus <- gsub(";"," ; ",em_nexus);
nexus_words <- simplify2array(strsplit(em_nexus," ")[[1]]);
nexus_words <- nexus_words[nexus_words!=""];
if (!is.na(match("ntax",tolower(nexus_words))) || !is.na(match("ntaxa",tolower(nexus_words)))) {
j <- 1+match("ntax",tolower(nexus_words));
if (is.na(j)) j <- 1+match("ntaxa",tolower(nexus_words));
while(nexus_words[j]=="=") j <- j+1;
notu <- as.numeric(nexus_words[j]);
}
if (!is.na(match("nchar",tolower(nexus_words))) || !is.na(match("nchars",tolower(nexus_words)))) {
j <- 1+match("nchar",tolower(nexus_words));
if (is.na(j)) j <- 1+match("nchars",tolower(nexus_words));
while(nexus_words[j]=="=") j <- j+1;
nchars <- as.numeric(nexus_words[j]);
}
if (!is.na(match("gap",tolower(nexus_words)))) {
if (nexus_words[match("gap",tolower(nexus_words))+1]=="=") {
j <- 1+match("gap",tolower(nexus_words));
while(nexus_words[j]=="=") j <- j+1;
gap <- nexus_words[j];
}
}
if (!is.na(match("missing",tolower(nexus_words)))) {
if (nexus_words[match("missing",tolower(nexus_words))+1]=="=") {
j <- 1+match("missing",tolower(nexus_words));
while(nexus_words[j]=="=") j <- j+1;
missing <- nexus_words[j];
}
}
if (!is.na(match("symbols",tolower(nexus_words)))) {
j <- match("symbols",tolower(nexus_words))+1;
while (nexus_words[j] %in% c("=","\"")) j <- j+1;
jj <- min(((j+1):length(nexus_words))[tolower(nexus_words)[((j+1):length(nexus_words))] %in% c("missing","gap",";")]-1);
# jj <- j+match(";",nexus_words[(j+1):length(nexus_words)])-1;
all_states <- gsub("\"","",nexus_words[j:jj]);
# if (tolower(all_states) %in% "")
}
if (!is.na(match("fa",tolower(nexus_words))) || !is.na(match("fka",tolower(nexus_words)))) {
nexus_words[tolower(nexus_words)=="fka"] <- "fa";
strat <- as.numeric(nexus_words[match("fa",tolower(nexus_words))-1]);
}
if (!is.na(match("la",tolower(nexus_words))) || !is.na(match("lka",tolower(nexus_words)))) {
nexus_words[tolower(nexus_words)=="lka"] <- "la";
range <- as.numeric(nexus_words[match("la",tolower(nexus_words))-1]);
}
if (!is.na(match("geog",tolower(nexus_words)))) {
geog <- as.numeric(nexus_words[match("geog",tolower(nexus_words))-1]);
} else if (!is.na(match("geography",tolower(nexus_words)))) {
geog <- as.numeric(nexus_words[match("geography",tolower(nexus_words))-1]);
}
}
if (is.null(all_states)) all_states <- 0:9;
extra <- 0;
if (strat>0) {
if (range>0) {
nchars <- nchars-2
extra <- 2
} else {
nchars <- nchars-1
extra <- 1
}
strat_ranges <- matrix(0,notu,2)
}
if (geog>0) {
nchars <- nchars-1
geography <- vector(length=notu)
extra <- extra+1
}
taxa <- vector(length=notu);
nstates <- array(0,dim=nchars);
chmatrix <- matrix(0,notu,nchars);
tx <- 1;
# look for outgroup designation
exclude <- outgroup <- -1;
if (!is.na(match("BEGIN SETS;",nexus))) {
tx_pt <- match("BEGIN SETS;",nexus); # look at taxon partitions
look_for_outgroup <- TRUE;
while (look_for_outgroup) {
tx_pt <- 1+tx_pt;
yyy <- paste(nexusfile[tx_pt,], collapse = "");
yyy <- gsub("-"," - ",yyy);
yyy <- gsub("- "," - ",yyy);
yyy <- gsub(" - "," - ",yyy);
yyy <- gsub(";","",yyy);
yyy <- gsub(","," ,",yyy);
yyy <- gsub("\n","",yyy);
yyy <- gsub("\r","",yyy);
yyy <- gsub("\t","",yyy);
xxx <- tolower(strsplit(yyy," ")[[1]]);
xxx <- xxx[xxx!=""];
if (!is.na(match("outgroup",tolower(xxx)))) {
ttl_ln <- length(xxx);
jj <- 1+match("outgroup",tolower(xxx));
while (xxx[jj]==":" || xxx[jj]=="=") jj <- jj+1;
outgroup <- c();
while (xxx[jj]!="," && jj<=ttl_ln) {
if (xxx[jj]=="-") {
jj <- jj+1;
outgroup <- c(outgroup,((as.numeric(outgroup[length(outgroup)])+1):as.numeric(xxx[jj])));
} else {
outgroup <- c(outgroup,xxx[jj]);
}
jj <- jj+1;
}
look_for_outgroup <- FALSE;
} else {
if (tolower(nexus[tx_pt])=="end;" || tolower(nexus[tx_pt])=="\tend;")
look_for_outgroup <- FALSE;
}
}
# look for characters to exclude
tx_pt <- match("BEGIN SETS;",nexus);
xxx <- strsplit(paste(nexusfile[tx_pt-1,],collapse = "")," ");
while(tolower(xxx[1])!="end") {
tx_pt <- tx_pt+1;
yyy <- paste(nexusfile[tx_pt,], collapse = "");
yyy <- gsub("- "," - ",yyy);
yyy <- gsub(";","",yyy);
yyy <- gsub(","," ,",yyy);
yyy <- gsub("\n","",yyy);
yyy <- gsub("\r","",yyy);
yyy <- gsub("\t","",yyy);
xxx <- tolower(strsplit(yyy," ")[[1]]);
xxx <- xxx[xxx!=""];
if (length(xxx)==0 || is.na(xxx[1])) xxx <- "";
# if (!is.na(xxx) && !is.null(xxx) && xxx!="") {
if (xxx[1]=="charpartition") {
if (xxx[1]=="charpartition" && !is.na(match("exclude",tolower(xxx)))) {
ttl_ln <- length(xxx);
jj <- 1+match("exclude",tolower(xxx));
while (xxx[jj]==":") jj <- jj+1;
exclude <- c();
while (xxx[jj]!="," && jj<ttl_ln) {
if (xxx[jj]=="-") {
jj <- jj+1;
exclude <- c(exclude,((as.numeric(exclude[length(exclude)])+1):as.numeric(xxx[jj])));
} else {
exclude <- c(exclude,as.numeric(xxx[jj]));
}
jj <- jj+1;
}
}
}
# xxx[1];
# tx_pt;
}
}
if (rate_partitions!="") {
ln <- match("BEGIN SETS;",nexus);
got_splits <- F;
while (!got_splits) {
ln <- ln+1;
breakup_this_line <- strsplit(nexus[ln],split=" ")[[1]];
if (!is.na(match(rate_partitions,breakup_this_line))) {
nexus[ln] <- gsub("-"," - ",nexus[ln]); # Mesquite often puts dashes immediately after character or taxon numbers.....
nexus[ln] <- gsub(" -"," -",nexus[ln]);
nexus[ln] <- gsub("- ","- ",nexus[ln]);
breakup_this_line <- strsplit(nexus[ln],split=" ")[[1]];
breakup_this_line <- gsub(",","",breakup_this_line);
breakup_this_line <- gsub(";","",breakup_this_line);
breakup_this_line <- breakup_this_line[breakup_this_line!=""];
breakup_this_line <- breakup_this_line[match(rate_partitions,breakup_this_line):length(breakup_this_line)];
kk <- (1:length(breakup_this_line))[breakup_this_line %in% ":"];
partition_names <- breakup_this_line[kk-1];
kk <- c(kk,length(breakup_this_line)+1); # add last numberso that we can end the partion search easily below
character_rate_partitions <- rep("",nchars);
for (pn in 1:length(partition_names)) {
ll <- kk[pn]+1;
this_part <- as.numeric(breakup_this_line[ll]);
ll <- ll+1;
# while (ll<(kk[pn+1]-1)) {
if (pn < length(partition_names)) {
break_cell <- kk[pn+1]-1;
} else {
break_cell <- kk[pn+1];
}
while (ll<break_cell) {
if (breakup_this_line[ll]=="-") {
ll <- ll+1;
this_part <- c(this_part,as.numeric(breakup_this_line[ll-2]:as.numeric(breakup_this_line[ll])));
} else {
this_part <- c(this_part,as.numeric(breakup_this_line[ll]));
}
ll <- ll+1;
}
character_rate_partitions[this_part] <- partition_names[pn];
}
got_splits<- T;
}
}
} else character_rate_partitions <- rep("imagine",nchars);
if (trend_partitions!="") {
ln <- match("BEGIN SETS;",nexus);
got_splits <- F;
while (!got_splits) {
ln <- ln+1;
breakup_this_line <- strsplit(nexus[ln],split=" ")[[1]];
if (!is.na(match(trend_partitions,breakup_this_line))) {
nexus[ln] <- gsub("-"," - ",nexus[ln]); # Mesquite often puts dashes immediately after character or taxon numbers.....
nexus[ln] <- gsub(" -"," -",nexus[ln]);
nexus[ln] <- gsub("- ","- ",nexus[ln]);
breakup_this_line <- strsplit(nexus[ln],split=" ")[[1]];
breakup_this_line <- gsub(",","",breakup_this_line);
breakup_this_line <- gsub(";","",breakup_this_line);
breakup_this_line <- breakup_this_line[breakup_this_line!=""];
breakup_this_line <- breakup_this_line[match(trend_partitions,breakup_this_line):length(breakup_this_line)];
kk <- (1:length(breakup_this_line))[breakup_this_line %in% ":"];
partition_names <- breakup_this_line[kk-1];
kk <- c(kk,length(breakup_this_line)+1); # add last numberso that we can end the partion search easily below
character_trend_partitions <- rep("",nchars);
for (pn in 1:length(partition_names)) {
ll <- kk[pn]+1;
this_part <- as.numeric(breakup_this_line[ll]);
ll <- ll+1;
# while (ll<(kk[pn+1]-1)) {
if (pn < length(partition_names)) {
break_cell <- kk[pn+1]-1;
} else {
break_cell <- kk[pn+1];
}
while (ll<break_cell) {
if (breakup_this_line[ll]=="-") {
ll <- ll+1;
this_part <- c(this_part,as.numeric(breakup_this_line[ll-2]:as.numeric(breakup_this_line[ll])));
} else {
this_part <- c(this_part,as.numeric(breakup_this_line[ll]));
}
ll <- ll+1;
}
character_trend_partitions[this_part] <- partition_names[pn];
}
got_splits<- T;
}
}
} else character_trend_partitions <- rep("square",nchars);
state_orders <- rep("unordered",nchars);
if (!is.na(match("BEGIN ASSUMPTIONS;",nexus))) {
tx_pt <- 1+match("BEGIN ASSUMPTIONS;",nexus); # look at taxon partitions
while (tolower(nexus[tx_pt])!="end;") {
# yyy <- paste(nexusfile[tx_pt,], collapse = "");
yyy <- gsub("- "," - ",nexus[tx_pt]);
yyy <- gsub(";","",yyy);
yyy <- gsub(","," ,",yyy);
yyy <- gsub("\n","",yyy);
yyy <- gsub("\r","",yyy);
yyy <- gsub("\t","",yyy);
xxx <- tolower(strsplit(yyy," ")[[1]]);
xxx <- xxx[xxx!=""];
if (!is.na(match("ord:",tolower(xxx)))) {
ttl_ln <- length(xxx);
jj <- 1+match("ord:",xxx);
while (xxx[jj]==":") jj <- jj+1;
ordered <- c();
while (xxx[jj]!="," && jj<=ttl_ln) {
if (xxx[jj]=="-") {
jj <- jj+1;
ordered <- c(ordered,((as.numeric(ordered[length(ordered)])+1):as.numeric(xxx[jj])));
} else {
ordered <- c(ordered,as.numeric(xxx[jj]));
}
jj <- jj+1;
}
state_orders[ordered] <- "ordered";
}
tx_pt <- 1+tx_pt;
}
}
mxln <- length(nexusfile[top,]);
s <- top;
# te all of the taxon names
for (tx in 1:notu) {
# first, read taxon name
#### look for quotations###
s <- top+tx-1;
endline <- match("\n",nexusfile[s,]);
if (is.na(endline)) endline <- length(nexusfile[s,]);
if (nexusfile[s,1]=="'" || nexusfile[s,2]=="'") {
jj <- ((1:length(nexusfile[s,]))[nexusfile[s,] %in% "'"]);
i <- max((1:length(nexusfile[s,]))[nexusfile[s,] %in% "'"])
taxa[tx] <- pracma::strcat(nexusfile[s,(jj[1]+1):(jj[2]-1)])
i <- i+1
while (nexusfile[s,i]==" " && i<ncol(nexusfile)) i <- i+1
} else {
i <- 1;
if (nexusfile[s,1]!="\"") {
while (nexusfile[s,i]=="\t") i <- i+1;
taxa[tx] <- nexusfile[s,i]
i <- i+1
while (nexusfile[s,i]!=" " && nexusfile[s,i]!='\t' && i<ncol(nexusfile)) {
if (nexusfile[s,i]!="_") {
taxa[tx] <- paste0(taxa[tx],as.character(nexusfile[s,i]))
} else {
taxa[tx] <- paste0(taxa[tx]," ")
}
i <- i+1
}
} else {
taxa[tx] <- nexusfile[s,2];
i <- 3;
# while (nexusfile[s,i]!=" " && nexusfile[s,i+1]!=" " && i<ncol(nexusfile)) {
while (nexusfile[s,i]!=" " && i<ncol(nexusfile)) {
if (as.character(nexusfile[s,i])!="\"")
taxa[tx] <- paste0(taxa[tx],as.character(nexusfile[s,i]))
i <- i+1;
#print(taxa[tx]);
}
}
# now, get to characters
i <- (i:endline)[!nexusfile[s,i:endline] %in% c(" ","\t")][1];
# while ((nexusfile[s,i]==" " || nexusfile[s,i]=="\t") && i<ncol(nexusfile))
# i <- i+1
}
k <- i;
if ((endline-k)==(nchars+extra)) {
# true if there are no polymorphic characters for the taxon
dummy <- nexusfile[s,k:(endline-1)];
dummy[dummy==missing] <- UNKNOWN;
dummy[dummy==gap] <- INAP;
letterstate <- dummy[!dummy %in% c(UNKNOWN,INAP)];
dummy[!dummy %in% c(UNKNOWN,INAP)] <- sapply(letterstate,switch_letter_state_to_numeric,all_states);
chmatrix[tx,] <- as.numeric(dummy[1:nchars]);
if (strat>0) {
strat_ranges[tx,1] <- strat_ranges[tx,2] <- as.numeric(dummy[strat])
if (range>0) strat_ranges[tx,2] <- as.numeric(dummy[range])
}
if (geog>0) geography[tx]=as.numeric(nexusfile[geog,i])
for (c in 1:nchars) {
if ((chmatrix[tx,c]+1)>nstates[c]) nstates[c] <- chmatrix[tx,c]+1
}
} else {
# for (c in 1:(nchars+extra)) {
c <- 0;
while (c < (nchars+extra)) {
c <- c+1;
#print(c);
if (c<=nchars) {
if (nexusfile[s,i]=="(" || nexusfile[s,i]=="{") {
if (polymorphs==TRUE || polymorphs==1) {
# added 2020-11-28: sometimes polymorphics come in out-of-order
riteparens <- (i:endline)[nexusfile[s,i:endline] %in% c(")","}")];
ddd <- (i+1):(riteparens[1]-1);
polysites <- ddd[!nexusfile[s,ddd] %in% c(",","&"," ")]
polystates <- nexusfile[s,polysites];
for (ps in 1:length(polystates))
if (!polystates[ps] %in% 0:9)
polystates[ps] <- switch_letter_state_to_numeric(polystates[ps],all_states=all_states);
nexusfile[s,polysites] <- sort(as.numeric(polystates));
# nexusfile[s,(i+1):(riteparens[1]-1)] <- sort(nexusfile[s,(i+1):(riteparens[1]-1)]);
i <- i+1;
w <- as.numeric(nexusfile[s,i])
chmatrix[tx,c] <- -1*as.numeric(nexusfile[s,i])
if ((1+w)>nstates[c]) nstates[c] <- 1+w;
i <- i+1
j <- 1
while (nexusfile[s,i]!=")" && nexusfile[s,i]!="}" && i<ncol(nexusfile)) {
if (nexusfile[s,i]!="," && nexusfile[s,i]!=" ") {
w <- as.numeric(nexusfile[s,i])
if ((w+1)>nstates[c]) nstates[c] <- w+1
chmatrix[tx,c] <- chmatrix[tx,c]-((10^j)*w)
i <- i+1
j <- j+1
} else {
i <- i+1
}
}
} else {
chmatrix[tx,c] <- UNKNOWN;
while (nexusfile[s,i]!=')' && nexusfile[s,i]!="}") i <- i+1;
}
} else if (nexusfile[s,i]==missing) {
chmatrix[tx,c] <- UNKNOWN;
} else if (nexusfile[s,i]==gap) {
chmatrix[tx,c] <- INAP;
} else if (nexusfile[s,i]>="A" && nexusfile[s,i]<="Z") {
chmatrix[tx,c] <- switch_letter_state_to_numeric(nexusfile[s,i],all_states=all_states);
} else if (nexusfile[s,i]>="0" && nexusfile[s,i]<="9") {
chmatrix[tx,c] <- as.numeric(nexusfile[s,i]);
}
if ((chmatrix[tx,c]+1)>nstates[c]) nstates[c] <- chmatrix[tx,c]+1;
if (i < (endline-1)) i <- i+1;
} else {
if (c==strat) {
if (nexusfile[s,i]>="0" && nexusfile[s,i]<='9') {
strat_ranges[tx,1]=as.numeric(nexusfile[s,i])
} else if (nexusfile[s,i]>="A" && nexusfile[s,i]<="Z") {
strat_ranges[tx,1]=switch_letter_state_to_numeric(nexusfile[s,i],all_states = all_states);
}
if (range==0) strat_ranges[tx,2] <- strat_ranges[tx,1]
i <- i+1
} else if (c==range) {
if (nexusfile[s,i]>="0" && nexusfile[s,i]<='9') {
strat_ranges[tx,2]=as.numeric(nexusfile[s,i])
} else if (nexusfile[s,i]>="A" && nexusfile[s,i]<="Z") {
strat_ranges[tx,2]=switch_letter_state_to_numeric(nexusfile[s,i],all_states = all_states);
}
i <- i+1
} else if (c==geog) {
if (nexusfile[s,i]>="0" && nexusfile[s,i]<='9') {
geography[tx]=as.numeric(nexusfile[s,i])
} else if (nexusfile[s,i]>="A" && nexusfile[s,i]<="Z") {
geography[tx]=switch_letter_state_to_numeric(nexusfile[s,i],all_states = all_states);
}
}
} # end non-morphological data
# print(nexusfile[s,k:83]);
# print(chmatrix[tx,])
if (nexusfile[s,i+1]=="\n" || i==(mxln-1)) c <- nchars+extra;
}
}
# chmatrix[tx,];
# tx <- tx+1;
# s <- s+1
}
#x <- list(taxa,chmatrix,strat_ranges,geography)
#return (list(taxa,chmatrix,strat_ranges,geography))
chmatrix <- mundify_character_matrix(chmatrix,minst=0,UNKNOWN,INAP); # clean up coding
nstates <- count_states(chmatrix,UNKNOWN,INAP);
tree_found <- 0;
while (s<length(nexus) && tree_found==0) {
while (nexus[s]!= "BEGIN TREES; " && s<length(nexus)) s <- s+1;
if (s<length(nexus)) {
while (tree_found==0 && s<length(nexus)) {
s <- s+1
jj <- strsplit(nexus[s],split=c("\t"," "),fixed=TRUE)[[1]];
jj <- paste(jj,collapse="")
jj <- strsplit(jj,split=" ",fixed=TRUE)[[1]];
if (sum(jj=="TREE")>0 || sum(jj=="tree")>0) tree_found <- 1;
}
# s <- s+notu;
# while (jj[i]=="") jj[i] <- NULL;
# while (j[1]=="\t") j <- j[2:length(j)];
# if (j[1]=="T" && j[2]=="R" && j[3]=="E") {
# while (j!="(") j <- j[2:length(j)];
# }
newick_string <- jj[length(jj)];
newick_string <- fix_newick_ancestors(jj[length(jj)])
tree <- read_newick_string(newick_string);
tree_found <- 1
s <- length(nexus);
}
}
row.names(chmatrix) <- taxa;
unscored_taxa <- c();
for (n in 1:notu) {
if (sum(chmatrix[n,]==UNKNOWN)==nchars)
unscored_taxa <- c(unscored_taxa,n);
}
if (nchars<10) {
colnames(chmatrix) <- 1:nchars;
} else if (nchars<100) {
colnames(chmatrix) <- c(paste(0,(1:9),sep=""),10:nchars);
} else if (nchars<1000) {
colnames(chmatrix) <- c(paste(00,(1:9),sep=""),paste(0,(10:99),sep=""),100:nchars);
}
if (exclude[1]!=-1) {
keepers <- (1:nchars)[!(1:nchars) %in% exclude];
chmatrix <- chmatrix[,keepers];
nstates <- nstates[keepers];
state_orders <- state_orders[keepers];
character_rate_partitions <- character_rate_partitions[keepers];
}
if (strat!=0 && geog!=0 && tree_found==1) {
output <- list(taxa,chmatrix,as.numeric(nstates),state_orders,strat_ranges,geography,tree,as.numeric(outgroup),unscored_taxa,character_rate_partitions,character_trend_partitions);
names(output) <- c("OTUs","Matrix","States","State_Types","Stratigraphic_Ranges","Geography","Tree","Outgroup","Unscored_Taxa","Rate_Partitions","Trend_Partitions");
} else if (strat!=0) {
if (geog!=0) {
output <- list(taxa,chmatrix,as.numeric(nstates),state_orders,strat_ranges,geography,as.numeric(outgroup),unscored_taxa,character_rate_partitions,character_trend_partitions);
names(output) <- c("OTUs","Matrix","States","State_Types","Stratigraphic_Ranges","Geography","Outgroup","Unscored_Taxa","Rate_Partitions","Trend_Partitions");
} else if (tree_found!=0) {
output <- list(taxa,chmatrix,as.numeric(nstates),state_orders,strat_ranges,tree,as.numeric(outgroup),unscored_taxa,character_rate_partitions,character_trend_partitions);
names(output) <- c("OTUs","Matrix","States","State_Types","Stratigraphic_Ranges","Tree","Outgroup","Unscored_Taxa","Rate_Partitions","Trend_Partitions");
} else {
output <- list(taxa,chmatrix,as.numeric(nstates),state_orders,strat_ranges,as.numeric(outgroup),unscored_taxa,character_rate_partitions,character_trend_partitions);
names(output) <- c("OTUs","Matrix","States","State_Types","Stratigraphic_Ranges","Outgroup","Unscored_Taxa","Rate_Partitions","Trend_Partitions");
}
} else if (geog!=0) {
if (tree_found!=0) {
output <- list(taxa,chmatrix,as.numeric(nstates),state_orders,geography,tree,as.numeric(outgroup),unscored_taxa,character_rate_partitions,character_trend_partitions);
names(output) <- c("OTUs","Matrix","States","State_Types","Geography","Tree","Outgroup","Unscored_Taxa","Rate_Partitions","Trend_Partitions");
} else {
output <- list(taxa,chmatrix,as.numeric(nstates),state_orders,geography,as.numeric(outgroup),unscored_taxa,character_rate_partitions,character_trend_partitions);
names(output) <- c("OTUs","Matrix","States","State_Types","Geography","Outgroup","Unscored_Taxa","Rate_Partitions","Trend_Partitions");
}
} else if (tree_found!=0) {
output <- list(taxa,chmatrix,as.numeric(nstates),state_orders,tree,as.numeric(outgroup),unscored_taxa,character_rate_partitions,character_trend_partitions);
names(output) <- c("OTUs","Matrix","States","State_Types","Tree","Outgroup","Unscored_Taxa","Rate_Partitions","Trend_Partitions");
} else {
output <- list(taxa,chmatrix,as.numeric(nstates),state_orders,as.numeric(outgroup),unscored_taxa,character_rate_partitions,character_trend_partitions);
names(output) <- c("OTUs","Matrix","States","State_Types","Outgroup","Unscored_Taxa","Rate_Partitions","Trend_Partitions");
}
return(output)
}
#nexus_name <- "Anopliidae_Hanger_&_Strong_2000.nex";
accersi_matrix_location_from_RData <- function(matrix_name,character_database) {
glossary <- character_database$Glossary;
nexus_names <- pbapply::pbsapply(glossary,accersi_nexus_name_in_RData);
return(match(matrix_name,nexus_names));
}
accersi_matrix_data_from_RData <- function(matrix_name,character_database) {
nn <- accersi_matrix_location_from_RData(matrix_name,character_database);
pathway <- character_database$Glossary[nn];
pathways <- strsplit(pathway,"\\$")[[1]];
pp <- match(pathways[1],names(character_database));
cl <- match(pathways[2],names(character_database[[pp]]));
if (pathways[2]==matrix_name) {
nexus <- unlist(character_database[[pp]][cl]);
return(nexus);
} else {
od <- match(pathways[3],names(character_database[[pp]][[cl]]));
if (pathways[3]==matrix_name) {
nexus <- unlist(character_database[[pp]][[cl]][od]);
return(nexus);
} else {
sf <- match(pathways[3],names(character_database[[pp]][[cl]][[od]]));
nexus <- unlist(character_database[[pp]][[cl]][[od]][sf]);
return(nexus);
}
}
}
accersi_nexus_name_in_RData <- function(glossary) {
return(strsplit(glossary,"\\$")[[1]][length(strsplit(glossary,"\\$")[[1]])])
}
accersi_data_from_RData <- function(matrix_name, character_database, polymorphs=T, UNKNOWN=-11, INAP=-22, rate_partitions="", trend_partitions="") {
# nexus_file_name: name of nexus file (e.g., "Phacopidae_Snow_2000.nex")
# polymorphs: boolean, if TRUE, then recode "1,2" as "-21"; otherwise, treat as unknown
# UNKNOWN: value substituting for "?"
# INAP: value substituting for gap ("-")
# rate_partitions: nameof CHARPARTITION that you want to use for dividing characters into general rate classes.
nexus <- accersi_matrix_data_from_RData(matrix_name=matrix_name,character_database);
output <- accersi_data_from_nexus_vector(nexus=as.character(nexus), polymorphs=polymorphs, UNKNOWN=UNKNOWN, INAP=INAP, rate_partitions=rate_partitions, trend_partitions=trend_partitions);
return(output);
}
#### DEAL WITH TRICKY CHARACTERS ####
switch_letter_state_to_numeric <- function(state,all_states=c(0:9,LETTERS[!LETTERS %in% c("I","O")])) {
# 2017-10-09: now will pass numeric characters through unchanged
# 2019-01-25: simplified greatly!
# 2020-12-01: allows for i's and o's, but assumes that they are not there!
# -1 is for 0 to be zero
return(match(state,all_states)-1);
}
switch_letter_state_to_numeric_old <- function(state) {
# 2017-10-09: now will pass numeric characters through unchanged
# 2019-01-25: simplified greatly!
if (state > 9) {
state <- toupper(state)
poss_letter_states <- toupper(letters[!letters %in% c("i","o")]);
return(9+match(state,poss_letter_states));
} else {
return(state);
}
}
switch_numeric_state_to_letter <- function(state) {
# 2017-10-09: now will pass numeric characters through unchanged
# 2019-01-25: simplified greatly!
if (state > 9) {
# state <- toupper(state)
poss_letter_states <- toupper(letters[!letters %in% c("i","o")]);
return(poss_letter_states[state-9]);
} else {
return(state);
}
}
unravel_polymorph_badass <- function(poly,minst=0) {
combo <- -1*poly;
state_test <- as.numeric(strsplit(x=as.character(combo),split="")[[1]])
if (state_test==sort(state_test,decreasing = T) && length(unique(state_test))==length(state_test)) {
sts <- 1+floor(log10(abs(combo)))
polymorphics <- vector(length=sts)
base <- 10^(sts-1)
for (s in 1:sts) {
polymorphics[s] <- floor(abs(combo)/base)
combo <- combo%%base
base <- base/10
}
} else {
breakpt <- match(max(state_test),state_test);
if (breakpt > 2) {
i <- 1;
while (i < breakpt) {
j <- i+1;
state_test[i] <- (10*state_test[i])+state_test[j];
state_test[j] <- -1;
# print(state_test);
i <- j+1;
}
polymorphics <- state_test[state_test>=minst];
} else if (sum(state_test<minst)>0) {
# this should happen
i <- 1;
while (i < length(state_test)) {
j <- i+1;
state_test[i] <- (10*state_test[i])+state_test[j];
state_test[j] <- -1;
# print(state_test);
i <- j+1;
}
polymorphics <- state_test[state_test>=minst];
} else if ((length(state_test) %% 2)==0) {
i <- 1;
while (i < length(state_test)) {
j <- i+1;
state_test[i] <- (10*state_test[i])+state_test[j];
state_test[j] <- -1;
# print(state_test);
i <- j+1;
}
polymorphics <- state_test[state_test>=minst];
} else {
i <- 1;
while (i < length(state_test)) {
j <- i+1;
state_test[i] <- (10*state_test[i])+state_test[j];
state_test[j] <- -1;
# print(state_test);
i <- j+1;
}
polymorphics <- state_test[state_test>=minst];
}
}
return (polymorphics);
}
unravel_polymorph <- function(poly) {
combo <- -1*poly
sts <- 1+floor(log10(abs(combo)))
polymorphics <- vector(length=sts)
base <- 10^(sts-1)
for (s in 1:sts) {
polymorphics[s] <- floor(abs(combo)/base)
combo <- combo%%base
base <- base/10
}
return (polymorphics)
}
ravel_polymorph <- function(polystates) {
polystates <- sort(polystates,decreasing = TRUE);
polym <- polystates[1];
for (st in 2:length(polystates)) polym <- (10*polym)+polystates[st]
return(-1*polym)
}
ravel_polymorph_for_file <- function(polystates) {
polystates <- sort(polystates,decreasing = FALSE);
return(paste("(",paste(polystates,collapse=""),")",sep=""));
}
#### SUMMARIZE CHARACTER DATA ####
# count taxa scored with something other than missing or inapplicable
count_scored_characters_per_otu <- function(chmatrix,UNKNOWN=-11,INAP=-22) {
nch <- ncol(chmatrix);
notu <- nrow(chmatrix);
#scored <- vector(length=nch)
scored <- c();
for (s in 1:notu)
scored <- c(scored,notu - (sum(chmatrix[s,]==UNKNOWN)+sum(chmatrix[s,]==INAP)));
return(scored);
}
# count missing and/or inapplicable per otu
count_scored_otus_per_character <- function(chmatrix,UNKNOWN=-11,INAP=-22) {
if (is.matrix(chmatrix)) {
nchars <- ncol(chmatrix);
notu <- nrow(chmatrix);
} else {
nchars <- 1;
notu <- length(chmatrix);
dummy <- array(0,dim=c(length(chmatrix),1));
dummy[,1] <- chmatrix;
chmatrix <- dummy;
}
#scored <- vector(length=nch)
scored <- c();
for (c in 1:nchars)
scored <- c(scored,notu - (sum(chmatrix[,c]==UNKNOWN)+sum(chmatrix[,c]==INAP)));
return(scored);
}
# count missing and/or inapplicable per otu
count_scored_otus_per_character_state <- function(chmatrix,chstates,UNKNOWN=-11,INAP=-22) {
nch <- ncol(chmatrix);
#notu <- nrow(chmatrix);
#scored <- vector(length=nch)
scored <- array(0,dim=c(nch,max(chstates)))
for (c in 1:nch) {
for (st in 1:chstates[c]) {
stt <- st-1
scored[c,st] <- sum(chmatrix[,c]==stt)
}
}
return(scored);
}
# count characters with autapomorphic taxa
count_autapomorphic_characters <- function(chmatrix,chstates,UNKNOWN=-11,INAP=-22) {
# count number of characters with an autapomorphic state
nchars <- ncol(chmatrix);
notus_per_chstate <- count_scored_otus_per_character_state(chmatrix,chstates);
autaps <- 0
for (c in 1:nchars) {
if(sum(notus_per_chstate[c,1:chstates[c]]==1)>0) {
autaps <- autaps+1;
}
}
return(autaps);
}
# count states coding only one taxon
count_autapomorphic_states <- function(chmatrix,chstates,UNKNOWN=-11,INAP=-22) {
# count number of states that are autapomorphic
nchars <- ncol(chmatrix);
notus_per_chstate <- count_scored_otus_per_character_state(chmatrix,chstates);
autaps <- 0
for (ch in 1:nchars)
autaps <- autaps+sum(notus_per_chstate[ch,1:chstates[ch]]==1);
return(autaps);
}
# routine to list all characters with at least one autapomorphic state
list_autapomorphic_characters <- function(chmatrix,chstates,UNKNOWN=-11,INAP=-22) {
nchars <- ncol(chmatrix);
#scored <- vector(length=nch)
notus_per_chstate <- count_scored_otus_per_character_state(chmatrix,chstates);
autaps <- c()
for (ch in 1:nchars) {
if(sum(notus_per_chstate[ch,1:chstates[ch]]==1)>0) {
autaps <- c(autaps,ch);
}
}
return(autaps);
}
# routine to list all character states that are autapomorphic
list_autapomorphic_states <- function(chmatrix,chstates,UNKNOWN=-11,INAP=-22) {
nchars <- ncol(chmatrix);
#scored <- vector(length=nch)
notus_per_chstate <- count_scored_otus_per_character_state(chmatrix,chstates);
autaps <- c()
for (c in 1:nchars) {
if(sum(notus_per_chstate[c,1:chstates[c]]==1)>0) {
autaps <- rbind(autaps,c(c,chstates[c]));
}
}
return(autaps);
}
# count the number of scorings that are polymorphic. There can be 1 per character per taxon
count_polymorphic_scorings <- function(chmatrix,UNKNOWN=-11,INAP=-22) {
# count number of characters with an autapomorphic state
if (!is.matrix(chmatrix)) {
chmatrix <- data.frame(ch=chmatrix);
}
nchars <- ncol(chmatrix);
polymorphs <- vector(length=nchars);
for (ch in 1:nchars) {
char_states <- chmatrix[,ch];
char_states <- char_states[char_states!=UNKNOWN];
char_states <- char_states[char_states!=INAP];
polys <- char_states[char_states<0];
polymorphs[ch] <- length(polys);
}
return(polymorphs);
}
# count the number of characters with a polymorphic scoring
count_polymorphic_characters <- function(chmatrix,UNKNOWN=-11,INAP=-22) {
# count number of characters with an autapomorphic state
if (!is.matrix(chmatrix)) {
chmatrix <- data.frame(ch=chmatrix);
}
nchars <- ncol(chmatrix);
polymorphs <- count_polymorphic_scorings(chmatrix,UNKNOWN,INAP);
return(sum(polymorphs>0));
}
# count the number of states per character that show polymorphism
count_polymorphic_states <- function(chmatrix,UNKNOWN=-11,INAP=-22) {
# count number of characters with an autapomorphic state
if (!is.matrix(chmatrix))
chmatrix <- data.frame(ch=chmatrix);
nchars <- ncol(chmatrix);
polymorphic_states <- vector(length=nchars);
for (ch in 1:nchars) {
char_states <- sort(unique(chmatrix[,ch]));
char_states <- char_states[char_states!=UNKNOWN];
char_states <- char_states[char_states!=INAP];
polys <- char_states[char_states<0];
poly_states <- c();
pp <- 0;
while (pp < length(polys)) {
pp <- pp+1;
poly_states <- unique(c(poly_states,unravel_polymorph_badass(polys[pp])));
}
polymorphic_states[ch] <- length(polys);
}
return(polymorphic_states);
}
# get number of states for each character
count_states <- function(chmatrix,UNKNOWN=-11,INAP=-22,include_polymorphs=T) {
if (!is.matrix(chmatrix)) {
chmatrix <- data.frame(ch=chmatrix);
}
nchars <- ncol(chmatrix);
nstates <- c();
for (ch in 1:nchars) {
char_states <- sort(unique(chmatrix[,ch]));
char_states <- char_states[char_states!=UNKNOWN];
char_states <- char_states[char_states!=INAP];
unique_states <- char_states[char_states>=0]
if (sum(char_states<0)>0 && include_polymorphs) {
polys <- char_states[char_states<0];
polystates <- c();
for (pp in polys) {
polystates <- c(polystates,unravel_polymorph(poly=pp));
}
unique_states <- sort(unique(c(unique_states,polystates)));
}
nstates <- c(nstates,length(unique_states));
} # pick up here!!!
return(nstates);
}
# get number of states for each character
list_states <- function(chmatrix,UNKNOWN=-11,INAP=-22,include_polymorphs=T) {
if (!is.matrix(chmatrix)) {
chmatrix <- data.frame(ch=chmatrix);
}
nchars <- ncol(chmatrix);
state_list <- list();
for (ch in 1:nchars) {
char_states <- sort(unique(chmatrix[,ch]));
char_states <- char_states[char_states!=UNKNOWN];
char_states <- char_states[char_states!=INAP];
unique_states <- char_states[char_states>=0]
if (sum(char_states<0)>0 && include_polymorphs) {
polys <- char_states[char_states<0];
polystates <- c();
for (pp in polys) {
polystates <- c(polystates,unravel_polymorph(poly=pp));
}
unique_states <- sort(unique(c(unique_states,polystates)));
}
state_list <- rlist::list.append(state_list,sort(unique_states));
} # pick up here!!!
return(state_list);
}
count_states_old <- function(chmatrix,UNKNOWN=-11,INAP=-22) {
# 2020-09-01: fixed breakdown of polymorphics
nchars <- ncol(chmatrix);
nstates <- c();
for (ch in 1:nchars) {
char_states <- sort(unique(chmatrix[,ch]));
char_states <- char_states[char_states!=UNKNOWN];
char_states <- char_states[char_states!=INAP];
if (sum(char_states<0)>0) {
while (char_states[1]<0) {
char_states <- sort(unique(c(char_states[2:length(char_states)],unravel_polymorph(char_states[1]))));
}
}
nstates <- c(nstates,length(char_states));
}
return(nstates);
}
maximum_state <- function(chmatrix,UNKNOWN=-11,INAP=-22) {
# 2020-09-01: fixed breakdown of polymorphics
nchars <- ncol(chmatrix);
maxstates <- c();
for (ch in 1:nchars) {
char_states <- sort(unique(chmatrix[,ch]));
char_states <- char_states[char_states!=UNKNOWN];
char_states <- char_states[char_states!=INAP];
if (length(char_states)==0) {
char_states <- 0;
} else if (sum(char_states<0)>0) {
while (char_states[1]<0) {
char_states <- sort(unique(c(char_states[2:length(char_states)],unravel_polymorph(char_states[1]))));
}
}
maxstates <- c(maxstates,max(char_states));
}
return(maxstates);
}
minimum_state <- function(chmatrix,UNKNOWN=-11,INAP=-22) {
# 2020-09-01: fixed breakdown of polymorphics
nchars <- ncol(chmatrix);
minstates <- c();
for (ch in 1:nchars) {
char_states <- sort(unique(chmatrix[,ch]));
char_states <- char_states[char_states!=UNKNOWN];
char_states <- char_states[char_states!=INAP];
if (length(char_states)==0) {
char_states <- 0;
} else if (sum(char_states<0)>0) {
while (char_states[1]<0) {
char_states <- sort(unique(c(char_states[2:length(char_states)],unravel_polymorph(char_states[1]))));
}
}
minstates <- c(minstates,min(char_states));
}
return(minstates);
}
#### FIND & RECODE DEPENDENT - INDEPENDENT SETS ####
# modified 2022-02-05
find_independent_character <- function(dchar,independents,chmatrix,UNKNOWN,INAP) {
dstates <- sum(unique(chmatrix[,dchar])>=0)
pre_indies <- independents[independents < dchar];
pi <- p_inds <- length(pre_indies);
ind_char <- 0;
for (pi in p_inds:1) {
ic <- pre_indies[pi];
unique_combos <- unique(chmatrix[,c(ic,dchar)]);
unique_combos_2 <- unique_combos[!unique_combos[,1] %in% c(INAP,UNKNOWN),];
# exclude -- or ?1 pairings
u_c <- nrow(unique_combos);
u_c2 <- nrow(unique_combos_2);
if (is.null(u_c2)) if (length(unique_combos_2)>0) {u_c2 <- 1;} else {u_c2 <- 0;}
missing_match <- (1:u_c)[unique_combos[,1] %in% UNKNOWN][!(1:u_c)[unique_combos[,1] %in% UNKNOWN] %in% (1:u_c)[unique_combos[,2] %in% UNKNOWN]];
inap_match <- (1:u_c)[unique_combos[,1] %in% INAP][!(1:u_c)[unique_combos[,1] %in% INAP] %in% (1:u_c)[unique_combos[,2] %in% c(UNKNOWN,INAP)]]
# sum((unique_combos[,1] %in% INAP) * (unique_combos[,2] %in% INAP));
# double_inap <- double_inap + sum((unique_combos[,1] %in% UNKNOWN) * (!unique_combos[,2] %in% c(UNKNOWN,INAP)));
if (u_c2>=dstates && (length(missing_match)>0 || length(inap_match)>0)) {
scored_matches <- unique(unique_combos_2[,1])[!unique(unique_combos_2[,1]) %in% unique(unique_combos_2[!unique_combos_2[,2] %in% c(INAP,UNKNOWN),1])];
if (length(scored_matches)==1 && sum(scored_matches >= 0)) {
ind_char <- ic;
pi <- 1;
return(ind_char);
}
} else if (u_c2>1) {
unique_combos <- unique_combos_2;
if (!is.matrix(unique_combos)) unique_combos <- array(unique_combos,dim=c(1,2));
unique_combos <- unique_combos[!unique_combos[,2] %in% UNKNOWN,];
if (!is.matrix(unique_combos)) unique_combos <- array(unique_combos,dim=c(1,2));
poly_combos <- subset(unique_combos,unique_combos[,1]<0);
unique_combos <- unique_combos[unique_combos[,1] >= 0,];
unique_combos_ap <- unique_combos[!unique_combos[,2] %in% INAP,];
unique_combos_inap <- unique_combos[unique_combos[,2] %in% INAP,];
if (!is.matrix(unique_combos_ap)) {
if (length(unique_combos_ap)==2) {
unique_combos_ap <- array(unique_combos_ap,dim=c(1,2));
} else {
unique_combos_ap <- cbind(0:5,0:5);
}
}
if (!is.matrix(unique_combos_inap)) {
if (length(unique_combos_inap)==2) {
unique_combos_inap <- array(unique_combos_inap,dim=c(1,2));
} else {
unique_combos_inap <- cbind(0:5,0:5);
}
}
if (nrow(unique_combos_ap)>0 && nrow(unique_combos_inap)>0) {
if (sum(unique_combos_ap[,1] %in% unique_combos_inap[,1])==0) {
ind_char <- ic;
pi <- 1;
return(ind_char);
}
} # end case where ic might be the "parent" character
} else if (unique_combos[1]>=0 && unique_combos[2]==INAP) {
ind_char <- ic;
pi <- 1;
return(ind_char);
}
}
return(ind_char);
}
#transmogrify_additive_dependents_to_multistate <- function(ind_char,dep_chars,chmatrix,INAP,UNKNOWN,multichanges=F,theoretical=F,unknown_inap_sep=F) {
#transmogrify_additive_dependents_to_multistate (ind_char,dep_chars,chmatrix,char_dependencies,INAP,UNKNOWN,theoretical=T)
transmogrify_additive_dependents_to_multistate <- function(ind_char,dep_chars,chmatrix,char_dependencies,INAP=-22,UNKNOWN=-11) {
# theortical: if T, then Q matrix & recoding allows for all theoretically possible combinations
# unknown_inap_sep: code unknowns separately; this is a bad idea that I regret and that we'll cut
notu <- nrow(chmatrix);
all_chars <- c(ind_char,dep_chars);
if (is.null(names(char_dependencies)) || is.na(names(char_dependencies[1])))
names(char_dependencies) <- all_chars;
#indies <- all_chars[match(unique(char_dependencies),all_chars)];
combos <- chmatrix[,all_chars];
#combos[1,2] <- -10;
combos <- combos[!(rowMaxs(combos)==UNKNOWN & rowMins(combos)==UNKNOWN),];
hchars <- ncol(combos);
hstates <- count_states(combos);
names(hstates) <- all_chars;
#combos <- combos[combos[,2]!=INAP,];
ucombos <- unique(combos);
polyms <- unique(which(ucombos < 0 & !ucombos %in% INAP,arr.ind = T)[,1]);
ucombos <- ucombos[!(1:nrow(ucombos)) %in% polyms,];
all_poss_combos <- accersi_all_theoretical_character_state_combinations(obs_combinations=ucombos,char_dependencies = char_dependencies,INAP=INAP);
a_p_c <- nrow(all_poss_combos);
#combo_dependency <- accersi_key_character_for_hierarchical_combination(all_poss_combos,char_dependencies);
#keystates <- accersi_key_states_for_independent_character_in_hierarchical(all_poss_combos = all_poss_combos,char_dependencies = char_dependencies)
key_combos <- accersi_gateway_states_among_hierarchical_characters(all_poss_combos,char_dependencies,hstates=hstates)
ind_states <- sort(unique(ucombos[,1]));
if (ncol(key_combos)>1) {
Q <- construct_Q_matrix_for_divergent_hierarchical_multistate(all_poss_combos,char_dependencies,UNKNOWN=UNKNOWN,INAP=INAP);
# start here!!!! (why (-21)?)
new_multistate <- accersi_hierarchical_multistate(all_poss_combos,obs_combos=chmatrix[,all_chars]);
} else if (length(all_poss_combos)>0 & ncol(key_combos)==1) {
# get rid of missing examples
Q <- construct_Q_matrix_for_hierarchical_multistate(all_poss_combos,char_dependencies,UNKNOWN=UNKNOWN,INAP=INAP);
# Q <- construct_Q_matrix_for_divergent_hierarchical_multistate(all_poss_combos)
new_multistate <- accersi_hierarchical_multistate(all_poss_combos,obs_combos=chmatrix[,all_chars]);
#cbind(new_multistate,chmatrix[,c(ind_char,dep_chars)])
} else {
# only one dependent state exists, making this just a normal character
combos <- chmatrix[,c(ind_char,dep_chars)];
combos <- combos[!(rowMaxs(combos)==UNKNOWN & rowMins(combos)==UNKNOWN),];
combos <- unique(combos);
all_poss_combos <- combos;
new_multistate <- chmatrix[,ind_char];
k <- max(2,nrow(combos));
Q <- construct_Q_matrix_unordered(k);
}
output <- list(all_poss_combos,Q,new_multistate);
names(output) <- c("unique_combinations","Q","new_character");
return(output);
}
#obs_combinations <- ucombos; char_dependencies <- secondary_dependencies;
accersi_all_theoretical_character_state_combinations <- function(obs_combinations,char_dependencies=c(),INAP=-22,UNKNOWN=-11) {
ucombos <- unique(obs_combinations);
ochars <- ncol(ucombos); # no. original characters involved
all_chars <- as.numeric(colnames(ucombos));
#combos <- combos[order(combos[,1],combos[,2],combos[,3]),];
all_poss_combos <- c();
for (nch in ncol(ucombos):1) {
if (is.null(all_poss_combos)) {
all_poss_combos <- array(sort(unique(ucombos[,nch])),dim=c(length(unique(ucombos[,nch])),1));
} else {
new_states <- sort(unique(ucombos[,nch]));
prior_combos <- all_poss_combos;
all_poss_combos <- cbind(rep(new_states[1],nrow(prior_combos)),prior_combos);
for (ns in 2:length(new_states))
all_poss_combos <- rbind(all_poss_combos,cbind(rep(new_states[ns],nrow(prior_combos)),prior_combos));
}
}
colnames(all_poss_combos) <- colnames(ucombos);
state_combos <- all_poss_combos[,1];
for (dc in 2:ncol(all_poss_combos)) {
dummyc <- as.character(all_poss_combos[,dc]);
dummyc <- gsub("-22","-",dummyc);
for (sc in 1:length(state_combos)) state_combos[sc] <- paste(state_combos[sc],dummyc[sc],sep="");
}
rownames(all_poss_combos) <- state_combos;
dependent_chars <- c();
if (length(char_dependencies)>0) {
ind_char_2 <- match(ind_char,all_chars);
for (nch in 1:ochars) if (sum(ucombos[,nch] %in% INAP)>0) dependent_chars <- c(dependent_chars,nch);
for (dc in 1:length(dependent_chars)) {
a_p_c <- nrow(all_poss_combos);
dch <- dependent_chars[dc];
ich <- match(char_dependencies[dependent_chars[dc]],all_chars)
keyst <- unique(ucombos[!ucombos[,c(ich,dch)][,2] %in% INAP,ich]);
keyst <- keyst[keyst != INAP];
keepers1 <- (1:a_p_c)[all_poss_combos[,ich] %in% keyst & !all_poss_combos[,dch] %in% INAP];
keepers2 <- (1:a_p_c)[!all_poss_combos[,ich] %in% keyst & all_poss_combos[,dch] %in% INAP];
keepers <- sort(c(keepers1,keepers2));
all_poss_combos <- all_poss_combos[keepers,];
# if (ich!=ind_char_2) {
# keepers <- keepers[!all_poss_combos[,dch] %in% INAP];
# all_poss_combos <- all_poss_combos[keepers,];
# }
}
}
return(all_poss_combos);
}
accersi_hierarchical_multistate <- function(all_poss_combos,obs_combos) {
# condense hierarchical additive scheme into one multistate
new_multistate <- vector(length=notu);
for (nn in 1:notu) new_multistate[nn] <- match_vector_to_matrix_row(test_vector=obs_combos[nn,],test_matrix=all_poss_combos)-1;
all_states <- c(0:9,letter_states,more_letter_states);
if (nrow(all_poss_combos)>10) if (is.numeric(new_multistate)) new_multistate[!is.na(new_multistate)] <- all_states[1+new_multistate[!is.na(new_multistate)]];
hstates <- count_states(all_poss_combos);
# look for uncoded examples; these will be either polymorphs or unknowns
prob_child <- (1:notu)[is.na(new_multistate)];
#combos <- chmatrix[,c(ind_char,dep_chars)];
pc <- 0;
polymorphs <- unique(obs_combos[obs_combos<0])
polymorphs <- polymorphs[!polymorphs %in% c(UNKNOWN,INAP)];
while (pc < length(prob_child)) {
pc <- pc+1;
pcc <- prob_child[pc];
#obs_combos[pcc,]
if (obs_combos[pcc,1]==UNKNOWN) {
new_multistate[pcc] <- "?";
# new_multistate[pcc] <- UNKNOWN;
} else {
# make it polymorphic for all possible states
# this_combo <- obs_combos[pcc,(0:dchars)+1];
this_combo <- obs_combos[pcc,];
# doofi <- (1:length(this_combo))[this_combo==UNKNOWN];
# this if/else probably is unneeded now!
if (sum(this_combo %in% polymorphs)==0) {
# must be case where 1+ dependent is unknown
set_chars <- (1:length(this_combo))[this_combo!=UNKNOWN];
set_states <- this_combo[set_chars];
poss_combos <- all_poss_combos;
ss <- 0;
while (ss < length(set_chars)) {
ss <- ss+1;
poss_combos <- subset(poss_combos,poss_combos[,set_chars[ss]]==set_states[ss]);
}
polymorph <- 0;
if (nrow(all_poss_combos)>10) polymorph <- c();
for (cp in 1:nrow(poss_combos)) {
this_state <- (row.match(poss_combos[cp,],as.data.frame(all_poss_combos))-1);
if (nrow(all_poss_combos)>10) {
polymorph <- c(polymorph,all_states[this_state]);
# base64encode(this_state);
# for (i in 1:5) print(base64encode(10^(i-1)));
} else {
polymorph <- polymorph-((10^(cp-1))*this_state);
}
}
} else {
set_chars <- (1:ochars)[!this_combo %in% c(UNKNOWN,polymorphs)]; unset_chars <- (1:ochars)[this_combo %in% c(UNKNOWN,polymorphs)];
un <- 0;
missing <- unset_chars[this_combo[unset_chars]==UNKNOWN];
polys <- unset_chars[!this_combo[unset_chars] %in% UNKNOWN];
while (un < length(missing)) {
un <- un+1;
mchar <- missing[un];
rstates <- (1:hstates[mchar])-1;
this_combo[mchar] <- 0;
for (rs in 1:length(rstates))
this_combo[mchar] <- this_combo[rs]-rstates[rs]*(10^(rs-1));
}
# set_chars <- set_chars[set_chars>1];
set_states <- this_combo[set_chars];
poss_combos <- all_poss_combos;
# print(nrow(poss_combos));
# reduce the possible combinations to those consistent
sc <- length(set_chars);
# for (sc in 1:length(set_chars)) {
while (sc > 1) {
poss_combos <- subset(poss_combos,poss_combos[,set_chars[sc]]==set_states[sc]);
# print(nrow(poss_combos));
sc <- sc-1;
}
uc <- 0;
while (uc < length(polys)) {
uc <- uc+1;
poss_combos <- poss_combos[poss_combos[,polys[uc]] %in% unravel_polymorph_badass(this_combo[polys[uc]]),];
}
pstates <- match(rownames(poss_combos),rownames(all_poss_combos));
polymorph <- 0;
if (nrow(all_poss_combos)>10) polymorph <- c();
for (cp in 1:nrow(poss_combos)) {
this_state <- (row.match(poss_combos[cp,],as.data.frame(all_poss_combos))-1);
if (nrow(all_poss_combos)>10) {
polymorph <- c(polymorph,all_states[this_state]);
# base64encode(this_state);
# for (i in 1:5) print(base64encode(10^(i-1)));
} else {
polymorph <- polymorph-((10^(cp-1))*this_state);
}
}
}
# if (nrow(unq_combos)>10) {
# if (is.numeric(new_multistate))
# new_multistate[!is.na(new_multistate)] <- all_states[1+new_multistate[!is.na(new_multistate)]]
new_multistate[pcc] <- paste("{",paste(polymorph,collapse=""),"}",sep="");
new_multistate[pcc] <- gsub("-","",new_multistate[pcc]);
# } else {
# new_multistate[pcc] <- polymorph;
# }
}
}
return(new_multistate);
}
accersi_key_states_for_independent_character_in_hierarchical <- function(all_poss_combos,char_dependencies,INAP=-22,UNKNOWN=-11) {
all_chars <- as.numeric(colnames(all_poss_combos));
indies <- all_chars[match(unique(char_dependencies),all_chars)];
dep_chars <- as.numeric(colnames(all_poss_combos))[as.numeric(colnames(all_poss_combos))!=char_dependencies];
ostates <- count_states(all_poss_combos);
keystates <- array(-1,dim=c(length(dep_chars),max(ostates[match(indies,all_chars)])));
#unkeystates <- sort(unique(ucombos[,1]));
for (dc in 1:length(dep_chars)) {
kc <- match(char_dependencies[dc+1],all_chars)
ks <- unique(combos[!combos[,dc+1] %in% c(INAP,UNKNOWN),kc]);
ks <- ks[ks!=INAP];
keystates[dc,1:length(ks)] <- ks;
# unkeystates <- unkeystates[unkeystates!=ks];
}
rownames(keystates) <- dep_chars; # rownames(keystates) <- c(dep_chars,49);
return(keystates);
}
accersi_key_character_for_hierarchical_combination <- function(all_poss_combos,char_dependencies,INAP=-22,UNKNOWN=-11) {
all_chars <- as.numeric(colnames(all_poss_combos));
ochars <- ncol(all_poss_combos);
hchars <- nrow(all_poss_combos);
combo_dependency <- vector(length=hchars);
for (hc in 1:hchars) combo_dependency[hc] <- max(0,char_dependencies[(1:ochars)[!all_poss_combos[hc,] %in% INAP]]);
return(combo_dependency);
}
accersi_gateway_states_among_hierarchical_characters <- function(all_poss_combos,char_dependencies,hstates) {
all_chars <- as.numeric(colnames(all_poss_combos));
keystates <- accersi_key_states_for_independent_character_in_hierarchical(all_poss_combos = all_poss_combos,char_dependencies = char_dependencies)
key_combos <- array(-1,dim=c(length(unique(char_dependencies)),max(hstates[match(unique(char_dependencies),all_chars)])))
rownames(key_combos) <- unique(char_dependencies);
for (i in 1:nrow(keystates)) {
j <- match(as.numeric(rownames(keystates))[i],all_chars); # get dependency
k <- match(char_dependencies[j],rownames(key_combos)); # match dependency
h <- keystates[i,keystates[i,]>=0];
h <- h[!h %in% key_combos[k,]];
if (length(h)>0) {
g <- 1+sum(key_combos[k,]>-1);
g <- g:(g+length(h)-1);
key_combos[k,g] <- h;
}
}
mx_states <- as.array(colMaxs(key_combos));
new_dim <- orig_dim <- dim(key_combos);
new_dim[2] <- sum(mx_states>=0);
if (min(new_dim)==1) {
orig_rownames <- rownames(key_combos);
key_combos <- array(key_combos[,1:new_dim[2]],dim=c(new_dim[1],new_dim[2]));
rownames(key_combos) <- orig_rownames;
colnames(key_combos) <- letters[1:new_dim[2]];
}
return(key_combos);
}
#### MODIFY & WRITE NEXUS FILES ####
# routine to "clean" character matrix (e.g., remove gaps in coding, standarize minimum states, etc.)
mundify_character_matrix <- function(chmatrix,minst=0,UNKNOWN=-11,INAP=-22) {
notu <- nrow(chmatrix); # replaces spc to standardize coding.
nchars <- ncol(chmatrix);
min_states <- minimum_state(chmatrix,UNKNOWN=UNKNOWN,INAP=INAP);
max_states <- maximum_state(chmatrix,UNKNOWN=UNKNOWN,INAP=INAP);
for (ch in 1:nchars) {
rem <- c((1:notu)[chmatrix[,ch]==UNKNOWN],(1:notu)[chmatrix[,ch]==INAP]);
if (length(rem)>0) {
test <- chmatrix[-rem,ch]
} else test <- chmatrix[,ch];
# check polymorphics for anything that needs to be changed
if (length(rem) < notu) {
polys <- sum(test<0); # taxa with polymorphic scores
coded <- sort(unique(test[test>=0]));
if (polys>0) {
# examps <- test[test<0]
polycoded <- sort(unique(test[test<0]))
for (i in 1:length(polycoded)) {
polystates <- unravel_polymorph(polycoded[i])
coded <- sort(unique(c(coded,polystates)))
# if (min(polystates)<minstch) minstch <- min(polystates)
}
} else {
polycoded <- c();
}
minstch <- min(coded);
# eliminate gaps in states
if (sum(!min(coded):max(coded) %in% coded)>0) {
# new_codes <- match(coded,coded)-(1-minst);
new_codes <- match(coded,coded)-(1-minstch);
for (st in 1:length(coded)) {
if (coded[st]!=new_codes[st]) {
rec <- (1:notu)[chmatrix[,ch]==coded[st]];
chmatrix[rec,ch] <- new_codes[st];
redo_poly <- 1;
while (redo_poly <= length(polycoded)) {
polystates <- unravel_polymorph(polycoded[redo_poly]);
polystates[polystates==coded[st]] <- new_codes[st]
newpolystates <- ravel_polymorph(polystates);
testp <- (1:notu)[chmatrix[,ch]==polycoded[redo_poly]];
# if (newpolystates != polycoded[redo_poly]) {
chmatrix[testp,ch] <- newpolystates;
# }
# polycoded[redo_poly] <- chmatrix[,ch][chmatrix[,ch] %in% polycoded[redo_poly]] <- newpolystates
polycoded[redo_poly] <- newpolystates;
redo_poly <- redo_poly+1;
}
coded[st] <- new_codes[st];
}
}
}
# standardize minimum state
# simple cheat: subtract 1111 to polymorphics
if (minstch!=minst) {
adj <- minst-minstch;
test2 <- (1:notu)[chmatrix[,ch]>=0];
chmatrix[test2,ch] <- chmatrix[test2,ch]+adj;
if (polys>0) {
examps2 <- polycoded;
for (i in 1:length(polycoded)) {
testp <- (1:notu)[chmatrix[,ch]==polycoded[i]];
examps2[i] <- examps2[i]-(adj*floor(10^floor(log10(abs(polycoded[i])))*10/9));
chmatrix[testp,ch] <- examps2[i];
}
polycoded <- examps2;
} # end rescoring of polytomies
} # end rescaling stats
} # end case where rem < notu
} # end search of characters;
return(chmatrix);
}
# routine to remove invariant and/or unscored characters from matrix
remove_invariants_from_character_matrix <- function(chmatrix,minst=0,UNKNOWN=-11,INAP=-22) {
notu <- nrow(chmatrix) # replaces spc to standardize coding.
ncharss <- ncol(chmatrix)
rem_char <- c()
for (ch in 1:ncharss) {
rem <- c((1:notu)[chmatrix[,ch]==UNKNOWN],(1:notu)[chmatrix[,ch]==INAP])
if (length(rem)>0) {
test <- chmatrix[-rem,ch]
} else test <- chmatrix[,ch]
if (length(unique(test))<2) rem_char <- c(rem_char,ch)
}
return(chmatrix[,-rem_char])
}
# generate composite score from 2+ scored taxa
accersi_composite_scores <- function(mini_matrix,return_polymorph=TRUE,UNKNOWN=-11,INAP=-22) {
notu <- nrow(mini_matrix);
nchars <- ncol(mini_matrix);
composite_score <- c();
ch <- 1;
for (ch in 1:nchars) {
composite_states <- unique(mini_matrix[,ch]);
if (length(composite_states)==1) {
composite_score <- c(composite_score,composite_states);
} else {
composite_states <- composite_states[composite_states!=INAP];
if (length(composite_states)>1) {
composite_states <- composite_states[composite_states!=UNKNOWN];
if (length(composite_states)>1) {
polyscored <- composite_states[composite_states<0];
if (length(polyscored)>0) {
accersi_states <- sapply(polyscored,unravel_polymorph);
composite_states <- sort(unique(c(accersi_states,composite_states[composite_states>=0])));
}
composite_score <- c(composite_score,ravel_polymorph(composite_states));
} else {
composite_score <- c(composite_score,composite_states);
}
} else {
composite_score <- c(composite_score,composite_states);
}
}
# ch <- ch+1;
# composite_score;
}
#rbind(mini_matrix,composite_score)
return(composite_score);
}
scribio_nexus_file_from_chmatrix <- function(ch_matrix,new_file_name,UNKNOWN=-11,INAP=-22) {
notu <- nrow(ch_matrix);
taxon_names <- rownames(ch_matrix);
nchars <- ncol(ch_matrix);
nstates <- count_states(chmatrix = ch_matrix);
nexus_file_content <- c();
nexus_file_content <- rbind("#NEXUS","","BEGIN DATA;")
nexus_file_content <- rbind(nexus_file_content,paste(" DIMENSIONS NTAX=",notu," NCHAR=",nchars,";",sep=""));
if (max(nstates)<10) {
state_symbols <- " ";
for (st in 1:max(nstates))
state_symbols <- paste(state_symbols,st-1,sep=" ");
} else {
mxl <- max(nstates)-10;
letter_states <- LETTERS[!LETTERS %in% c("I","O")][1:mxl]
all_states <- c(0:9,letter_states);
state_symbols <- paste(all_states,collapse=" ");
}
nexus_file_content <- rbind(nexus_file_content,paste(" FORMAT DATATYPE = STANDARD RESPECTCASE GAP = - MISSING = ? SYMBOLS = \"",state_symbols,"\";"));
nexus_file_content <- rbind(nexus_file_content," MATRIX");
string_to_count <- taxon_names;
name_lengths <- sapply(string_to_count,count_characters_in_string);
max_name_length <- max(name_lengths);
need_quotes <- c(".","(",")","[","]");
for (nn in 1:notu) {
test_name <- strsplit(taxon_names[nn],split="",fixed=TRUE)[[1]]
if (sum(test_name %in% need_quotes)==0) {
taxon <- gsub(" ","_",taxon_names[nn]);
} else {
taxon <- paste("\"",taxon_names[nn],"\"",sep="");
name_lengths[nn] <- name_lengths[nn]+2;
}
this_line <- paste("\t",taxon,paste(rep(" ",(5+(max_name_length-name_lengths[nn]))),collapse=""),sep="");
otu_code <- c();
for (ch in 1:nchars) {
if (ch_matrix[nn,ch]>=0 && ch_matrix[nn,ch]<=9) {
otu_code <- paste(otu_code,ch_matrix[nn,ch],sep="");
} else if (ch_matrix[nn,ch]>9) {
otu_code <- paste(otu_code,all_states[1+ch_matrix[nn,ch]],sep=""); # note: we need +1 because of state 0
} else if (ch_matrix[nn,ch]==UNKNOWN) {
otu_code <- paste(otu_code,"?",sep="");
} else if (ch_matrix[nn,ch]==INAP) {
otu_code <- paste(otu_code,"-",sep="");
} else if (ch_matrix[nn,ch]<0) {
polystates <- strsplit(as.character(ch_matrix[nn,ch]),split="",fixed=TRUE)[[1]];
polystates <- as.numeric(polystates[polystates!="-"]);
otu_code <- paste(otu_code,ravel_polymorph_for_file(polystates),sep="");
}
}
nexus_file_content <- rbind(nexus_file_content,paste(this_line,otu_code,sep=""));
}
nexus_file_content <- rbind(nexus_file_content,";");
nexus_file_content <- rbind(nexus_file_content,"END;");
nexus_file_content <- rbind(nexus_file_content,"begin mrbayes;");
nexus_file_content <- rbind(nexus_file_content," set autoclose=yes nowarn=yes;");
nexus_file_content <- rbind(nexus_file_content," lset nst=6 rates=invgamma;");
nexus_file_content <- rbind(nexus_file_content," unlink statefreq=(all) revmat=(all) shape=(all) pinvar=(all); ");
nexus_file_content <- rbind(nexus_file_content," prset applyto=(all) ratepr=variable;");
nexus_file_content <- rbind(nexus_file_content," mcmcp ngen= 100000000 relburnin=yes burninfrac=0.25 printfreq=10000 samplefreq=10000 nchains=4 savebrlens=yes;");
nexus_file_content <- rbind(nexus_file_content," mcmc;");
nexus_file_content <- rbind(nexus_file_content," sumt;");
nexus_file_content <- rbind(nexus_file_content,"end;");
write(nexus_file_content,file=new_file_name);
}
# write nexus file from chmatrix that already is converted to character
scribio_nexus_file_from_chmatrix_character <- function(ch_matrix_ch,new_file_name,max_states,unknown="?",inap="-") {
notu <- nrow(ch_matrix_ch);
taxon_names <- rownames(ch_matrix_ch);
nchars <- ncol(ch_matrix_ch);
nexus_file_content <- c();
nexus_file_content <- rbind("#NEXUS","","BEGIN DATA;")
nexus_file_content <- rbind(nexus_file_content,paste(" DIMENSIONS NTAX=",notu," NCHAR=",nchars,";",sep=""));
if (max_states<10) {
state_symbols <- " ";
for (st in 1:max_states)
state_symbols <- paste(state_symbols,st-1,sep=" ");
} else {
# mxl <- max_states-10;
all_states <- c(0:9,letter_states,more_letter_states);
state_symbols <- paste(all_states[1:max_states],collapse=" ");
}
nexus_file_content <- rbind(nexus_file_content,paste(" FORMAT DATATYPE = STANDARD RESPECTCASE GAP = ",inap," MISSING = ",unknown," SYMBOLS = \"",state_symbols,"\";"));
nexus_file_content <- rbind(nexus_file_content," MATRIX");
string_to_count <- taxon_names;
name_lengths <- sapply(string_to_count,count_characters_in_string);
max_name_length <- max(name_lengths);
need_quotes <- c(".","(",")","[","]");
for (nn in 1:notu) {
test_name <- strsplit(taxon_names[nn],split="",fixed=TRUE)[[1]]
if (sum(test_name %in% need_quotes)==0) {
taxon <- gsub(" ","_",taxon_names[nn]);
} else {
taxon <- paste("\"",taxon_names[nn],"\"",sep="");
name_lengths[nn] <- name_lengths[nn]+2;
}
this_line <- paste("\t",taxon,paste(rep(" ",(5+(max_name_length-name_lengths[nn]))),collapse=""),sep="");
this_line <- paste(this_line,paste(ch_matrix_ch[nn,],collapse=""),sep="");
nexus_file_content <- rbind(nexus_file_content,this_line);
}
nexus_file_content <- rbind(nexus_file_content,";");
nexus_file_content <- rbind(nexus_file_content,"END;");
nexus_file_content <- rbind(nexus_file_content,"begin mrbayes;");
nexus_file_content <- rbind(nexus_file_content," set autoclose=yes nowarn=yes;");
nexus_file_content <- rbind(nexus_file_content," lset nst=6 rates=invgamma;");
nexus_file_content <- rbind(nexus_file_content," unlink statefreq=(all) revmat=(all) shape=(all) pinvar=(all); ");
nexus_file_content <- rbind(nexus_file_content," prset applyto=(all) ratepr=variable;");
nexus_file_content <- rbind(nexus_file_content," mcmcp ngen= 100000000 relburnin=yes burninfrac=0.25 printfreq=10000 samplefreq=10000 nchains=4 savebrlens=yes;");
nexus_file_content <- rbind(nexus_file_content," mcmc;");
nexus_file_content <- rbind(nexus_file_content," sumt;");
nexus_file_content <- rbind(nexus_file_content,"end;");
write(nexus_file_content,file=new_file_name);
}
ravel_polymorph_for_file <- function(polystates) {
polystates <- sort(polystates,decreasing = FALSE);
return(paste("(",paste(polystates,collapse=""),")",sep=""));
}
convert_character_matrix_to_character <- function(chmatrix,UNKNOWN=-11,INAP=-22,missing="?",gap="-") {
notu <- nrow(chmatrix);
nchars <- ncol(chmatrix);
chmatrix_char <- chmatrix;
allstates <- c(0:9,letter_states,more_letter_states);
for (ch in 1:nchars) {
these_states <- chmatrix[,ch];
coded_notu <- (1:notu)[these_states>=0];
gap_notu <- (1:notu)[these_states==INAP];
miss_notu <- (1:notu)[these_states==UNKNOWN];
poly_notu <- (1:notu)[!(1:notu) %in% c(coded_notu,gap_notu,miss_notu)];
chmatrix_char[coded_notu,ch] <- as.character(allstates[1+chmatrix[coded_notu,ch]]);
chmatrix_char[miss_notu,ch] <- missing;
chmatrix_char[gap_notu,ch] <- gap;
pn <- 0;
while (pn < length(poly_notu)) {
pn <- pn+1;
chmatrix_char[poly_notu[pn],ch] <- ravel_polymorph_for_file(allstates[1+sort(unravel_polymorph_badass(chmatrix[poly_notu[pn],ch]))]);
}
}
return(chmatrix_char);
}
#### READ NEWICK FILES ####
#### convert (1,(2,3)) to vector_tree = 4 5 5 -1 4
read_newick_tree_from_chosen_file <- function() {
newicktree_file <- file.choose();
newick_tree <- scan(file=newicktree_file,what=character(),sep="\n");
nexus_string <- strsplit(newick_tree,split="",fixed=TRUE)[[1]]
nodes <- 0
for (i in 1:length(nexus_string)) if (nexus_string[i]=="(") nodes <- nodes+1
# get clades
clades <- vector(length=nodes)
for (c in 1:nodes) clades[c] <- c
# get taxa
notu <- p <- 0
for (i in 1:length(nexus_string)) {
if (nexus_string[i]>="0" && nexus_string[i]<="9") {
otu <- as.numeric(nexus_string[i])+(otu * (10^p))
p <- p+1
if (otu>notu) notu <- otu
} else {
p <- otu <- 0
}
}
vector_tree <- vector(length=notu+max(clades))
for (c in 1:nodes) clades[c] <- -1
cl <- c <- 0
i <- 1
for (i in 1:length(nexus_string)) {
if (nexus_string[i]=="(") {
sp <- p <- 0
cl <- cl+1
if (cl>1) {
vector_tree[notu+cl] <- clades[c]+notu
} else vector_tree[notu+1] <- -1
c <- c+1
clades[c] <- cl
} else if (nexus_string[i]==")") {
c <- c-1
sp <- p <- 0
} else if (nexus_string[i]==",") {
sp <- p <- 0
} else if (nexus_string[i]>="0" && nexus_string[i]<="9") {
sp <- as.numeric(nexus_string[i])+(sp*10)
p <- p+1
if (nexus_string[i+1]<"0" || nexus_string[i]>"9") vector_tree[sp] <- notu+clades[c]
}
}
return(vector_tree)
}
read_newick_tree_from_file <- function(newicktree_file) {
newick_tree <- scan(file=newicktree_file,what=character(),sep="\n")
nexus_string <- strsplit(newick_tree,split="",fixed=TRUE)[[1]]
nodes <- 0
for (i in 1:length(nexus_string)) if (nexus_string[i]=="(") nodes <- nodes+1
# get clades
clades <- 1:nodes;
#clades <- vector(length=nodes)
#for (c in 1:nodes) clades[c] <- c;
# get taxa
notu <- p <- 0;
for (i in 1:length(nexus_string)) {
if (nexus_string[i]>="0" && nexus_string[i]<="9") {
otu <- as.numeric(nexus_string[i])+(otu * 10)
p <- p+1
if (otu>notu) notu <- otu
} else {
p <- otu <- 0
}
}
vector_tree <- vector(length=notu+max(clades))
for (c in 1:nodes) clades[c] <- -1
cl <- c <- 0
i <- 1
for (i in 1:length(nexus_string)) {
if (nexus_string[i]=="(") {
sp <- p <- 0
cl <- cl+1
if (cl>1) {
vector_tree[notu+cl] <- clades[c]+notu
} else vector_tree[notu+1] <- -1
c <- c+1
clades[c] <- cl
} else if (nexus_string[i]==")") {
c <- c-1
sp <- p <- 0
} else if (nexus_string[i]==",") {
sp <- p <- 0
} else if (nexus_string[i]>="0" && nexus_string[i]<="9") {
sp <- as.numeric(nexus_string[i])+(sp*10);
p <- p+1
if (nexus_string[i+1]<"0" || nexus_string[i]>"9") vector_tree[sp] <- notu+clades[c];
}
}
return(vector_tree)
}
#(1,(2,4,(9,(7,(10,((18,(26,(33,36)),(34,40)),(19,27))),(13,25,(14,16)))),(3,15,(5,(11,17,24,(21,29),(22,31)),(28,(20,(30,32))))),(6,(8,39,(12,23,35,37,38)))))
#### convert vector_tree = 4 5 5 -1 4 to (1,(2,3))
#### where number is the htu number of the clade to which a species or htu belong
#### does not work yet
write_newick_string_with_taxon_names_from_vector_tree <- function(vector_tree,otu_names) {
mat_tree <- transform_vector_tree_to_matrix_tree(vector_tree);
notu <- match(-1,vector_tree)-1;
nNodes <- length(vector_tree)-notu;
all_names <- c(otu_names,(1:nNodes)+notu);
newick_string <- paste(match(-1,vector_tree),";",sep="");
for (nd in 1:nNodes) {
this_node <- notu+nd;
all_names[mat_tree[nd,mat_tree[nd,]>0]]
newick_string <- gsub(this_node,paste("(",paste(all_names[mat_tree[nd,mat_tree[nd,]>0]],collapse=","),")",sep=""),newick_string);
# newick_string <- gsub(this_node,paste("(",paste(mat_tree[nd,mat_tree[nd,]>0],collapse=","),")",sep=""),newick_string)
}
return(newick_string);
}
write_newick_string_from_vector_tree <- function(vector_tree) {
mat_tree <- transform_vector_tree_to_matrix_tree(vector_tree);
notu <- match(-1,vector_tree)-1;
nNodes <- length(vector_tree)-notu;
newick_string <- paste(match(-1,vector_tree),";",sep="");
for (nd in 1:nNodes) {
this_node <- notu+nd;
newick_string <- gsub(this_node,paste("(",paste(mat_tree[nd,mat_tree[nd,]>0],collapse=","),")",sep=""),newick_string)
}
return(newick_string);
}
read_newick_string <- function(newick_string) {
nodes <- 0;
if (length(newick_string)==1) newick_string <- strsplit(newick_string,split="",fixed=TRUE)[[1]];
for (i in 1:length(newick_string)) if (newick_string[i]=="(") nodes <- nodes+1;
# get clades
clades <- vector(length=nodes);
for (c in 1:nodes) clades[c] <- c;
# get taxa
notu <- p <- 0
for (i in 1:length(newick_string)) {
if (newick_string[i]>="0" && newick_string[i]<="9") {
otu <- as.numeric(newick_string[i])+(otu * (10^p))
p <- p+1
if (otu>notu) notu <- otu
} else {
p <- otu <- 0
}
}
vector_tree <- vector(length=notu+max(clades))
for (c in 1:nodes) clades[c] <- -1
cl <- c <- 0
i <- 1
for (i in 1:length(newick_string)) {
if (newick_string[i]=="(") {
sp <- p <- 0
cl <- cl+1
if (cl>1) {
vector_tree[notu+cl] <- clades[c]+notu
} else vector_tree[notu+1] <- -1
c <- c+1
clades[c] <- cl
} else if (newick_string[i]==")") {
c <- c-1
sp <- p <- 0
} else if (newick_string[i]==",") {
sp <- p <- 0
} else if (newick_string[i]>="0" && newick_string[i]<="9") {
sp <- as.numeric(newick_string[i])+(sp*10)
p <- p+1
if (newick_string[i+1]<"0" || newick_string[i]>"9") vector_tree[sp] <- notu+clades[c]
}
}
return(vector_tree)
}
# written 2022-08-19
count_taxa_in_newick_string <- function(newick_string) {
# newick_string should look like: "(12,(7,(8,1,3,(2,(5,4,9,(11,(10,6)))))));"
newick_string <- gsub(";","",newick_string);
atomized_newick <- strsplit(newick_string,"")[[1]];
this_clade <- paste(atomized_newick,collapse="");
# print(this_clade)
this_clade <- gsub("\\(","",this_clade);
this_clade <- gsub(")","",this_clade);
these_prog <- sort(as.numeric(strsplit(this_clade,",")[[1]]));
return(length(these_prog));
}
# written for Cinctan project
transform_newick_string_to_venn_tree <- function(newick_string) {
# newick_string should look like: "(12,(7,(8,1,3,(2,(5,4,9,(11,(10,6)))))));"
atomized_newick <- strsplit(newick_string,"")[[1]];
l_m <- length(atomized_newick);
clade_bounds_l <- (1:l_m)[atomized_newick %in% "("]; # clade bounds left
clade_bounds_e <- clade_bounds_r <- (1:l_m)[atomized_newick %in% ")"]; # clade bounds right
nNodes <- length(clade_bounds_l); # number of clades;
names(clade_bounds_l) <- names(clade_bounds_r) <- names(clade_bounds_e) <- 1:nNodes;
# get the first possible right paren ending this clade;
for (nn in 1:nNodes) clade_bounds_e[nn] <- sum(clade_bounds_r>clade_bounds_l[nn]);
clade_bounds_e_unq <- unique(clade_bounds_e);
clade_boundaries <- clade_boundaries_orig <- cbind(clade_bounds_l,clade_bounds_r,clade_bounds_e);
#1 <- nn <- 1
while (length(clade_bounds_e_unq)>0) {
this_group <- sum(clade_boundaries_orig[,3]==clade_bounds_e_unq[1]);
if (length(clade_bounds_e_unq)>1) {
this_group_starts <- this_group-sum(clade_boundaries_orig[,2]<clade_boundaries_orig[this_group+1,1]);
} else {
this_group_starts <- 0;
}
clade_boundaries_orig[1:this_group,2] <- sort(clade_boundaries_orig[1:this_group,2],decreasing=T);
clade_boundaries[clade_boundaries[,1] %in% clade_boundaries_orig[,1],] <- clade_boundaries_orig;
clade_boundaries_orig <- clade_boundaries_orig[!(1:nrow(clade_boundaries_orig)) %in% ((this_group_starts+1):this_group),]
if (nrow(clade_boundaries_orig)>0) clade_boundaries_orig[,2] <- sort(clade_boundaries_orig[,2]);
n <- 0;
while (n < nrow(clade_boundaries_orig)) {
n <- n+1;
clade_boundaries_orig[n,3] <- sum(clade_boundaries_orig[,2]>clade_boundaries_orig[n,1]);
}
clade_bounds_e_unq <- unique(clade_boundaries_orig[,3]);
}
notu <- count_taxa_in_newick_string(newick_string);
venn_tree <- array(0,dim=c(nNodes,notu));
for (nn in 1:nNodes) {
lp <- clade_boundaries[nn,1];
rp <- clade_boundaries[nn,2];
this_clade <- paste(atomized_newick[lp:rp],collapse="");
# print(this_clade)
this_clade <- gsub("\\(","",this_clade);
this_clade <- gsub(")","",this_clade);
these_prog <- sort(as.numeric(strsplit(this_clade,",")[[1]]));
venn_tree[nn,1:length(these_prog)] <- these_prog;
}
return(venn_tree);
}
# written for Cinctan project
accersi_clade_boundaries_from_newick_string <- function(newick_string) {
atomized_newick <- strsplit(newick_string,"")[[1]];
l_m <- length(atomized_newick);
clade_bounds_l <- (1:l_m)[atomized_newick %in% "("];
clade_bounds_e <- clade_bounds_r <- (1:l_m)[atomized_newick %in% ")"];
nNodes <- length(clade_bounds_l); # number of clades;
names(clade_bounds_l) <- names(clade_bounds_r) <- names(clade_bounds_e) <- 1:nNodes;
# get the first possible right paren ending this clade;
for (nn in 1:nNodes) clade_bounds_e[nn] <- sum(clade_bounds_r>clade_bounds_l[nn])
clade_bounds_e_unq <- unique(clade_bounds_e);
clade_boundaries <- clade_boundaries_orig <- cbind(clade_bounds_l,clade_bounds_r,clade_bounds_e);
#1 <- nn <- 1
while (length(clade_bounds_e_unq)>0) {
this_group <- sum(clade_boundaries_orig[,3]==clade_bounds_e_unq[1]);
if (length(clade_bounds_e_unq)>1) {
this_group_starts <- this_group-sum(clade_boundaries_orig[,2]<clade_boundaries_orig[this_group+1,1]);
} else {
this_group_starts <- 0;
}
clade_boundaries_orig[1:this_group,2] <- sort(clade_boundaries_orig[1:this_group,2],decreasing=T);
clade_boundaries[clade_boundaries[,1] %in% clade_boundaries_orig[,1],] <- clade_boundaries_orig;
clade_boundaries_orig <- clade_boundaries_orig[!(1:nrow(clade_boundaries_orig)) %in% ((this_group_starts+1):this_group),]
if (nrow(clade_boundaries_orig)>0) clade_boundaries_orig[,2] <- sort(clade_boundaries_orig[,2]);
n <- 0;
while (n < nrow(clade_boundaries_orig)) {
n <- n+1;
clade_boundaries_orig[n,3] <- sum(clade_boundaries_orig[,2]>clade_boundaries_orig[n,1]);
}
clade_bounds_e_unq <- unique(clade_boundaries_orig[,3]);
}
clade_boundaries <- data.frame(lp=as.numeric(clade_boundaries[,1]),
rp=as.numeric(clade_boundaries[,2]))
return(clade_boundaries);
}
#### where number is the htu number of the clade to which a species or htu belong
# newick_string_ancestored <- newick_string_taxa_only_raw
# written for Cinctan project
# updated 2020-12-30 to allow outgroup to be ancestral
find_newick_ancestors <- function(newick_string_ancestored) {
atomic_ancestral <- strsplit(newick_string_ancestored,"")[[1]];
a_a <- length(atomic_ancestral);
l_paren <- (1:a_a)[atomic_ancestral=="("];
r_paren <- (1:a_a)[atomic_ancestral==")"];
sisters <- (1:a_a)[atomic_ancestral==","];
otu_nos <- (1:a_a)[!(1:a_a) %in% c(l_paren,r_paren,sisters)];
otu_nos <- otu_nos[otu_nos!=length(atomic_ancestral)];
notu <- 1;
breaks <- c();
for (i in 2:length(otu_nos)) {
if ((otu_nos[i]-1)>otu_nos[i-1]) {
notu <- notu+1;
breaks <- c(breaks,otu_nos[i]-1);
}
}
sampled_ancestors <- array(0,dim=notu);
ancestral_starts <- 1+breaks[atomic_ancestral[breaks]==")"];
ab <- 0;
while (ab < length(ancestral_starts)) {
ab <- ab+1;
dd <- ancestral_starts[ab];
st_hr <- match(dd,otu_nos);
this_anc <- as.numeric(atomic_ancestral[otu_nos[st_hr]]);
while(st_hr < length(otu_nos) && otu_nos[st_hr+1]==(otu_nos[st_hr]+1)) {
st_hr <- st_hr+1;
this_anc <- (10*this_anc)+as.numeric(atomic_ancestral[otu_nos[st_hr]]);
}
sampled_ancestors[this_anc] <- 1;
}
return(sampled_ancestors);
}
# written for Cinctan project
fix_newick_ancestors <- function(newick_string_ancestored) {
atomic_ancestral <- strsplit(newick_string_ancestored,"")[[1]];
a_a <- length(atomic_ancestral);
l_paren <- (1:a_a)[atomic_ancestral=="("];
r_paren <- (1:a_a)[atomic_ancestral==")"];
sisters <- (1:a_a)[atomic_ancestral==","];
otu_nos <- (1:a_a)[!(1:a_a) %in% c(l_paren,r_paren,sisters)];
otu_nos <- otu_nos[otu_nos!=length(atomic_ancestral)];
for (rp in length(r_paren):1) {
if (!is.na(match(1,otu_nos-r_paren[rp]))) {
an <- r_paren[rp];
an_no <- c();
while (atomic_ancestral[an+1] %in% as.character(0:9)) {
an_no <- c(an_no,an+1);
an <- an+1;
}
atomic_ancestral <- c(atomic_ancestral[1:(r_paren[rp]-1)],
",",
atomic_ancestral[an_no],
")",
atomic_ancestral[(an+1):a_a]);
a_a <- length(atomic_ancestral);
l_paren <- (1:a_a)[atomic_ancestral=="("];
r_paren <- (1:a_a)[atomic_ancestral==")"];
sisters <- (1:a_a)[atomic_ancestral==","];
otu_nos <- (1:a_a)[!(1:a_a) %in% c(l_paren,r_paren,sisters)];
otu_nos <- otu_nos[otu_nos!=length(atomic_ancestral)];
}
}
revised_newick_string <- paste(atomic_ancestral,collapse="");
return(revised_newick_string);
}
# written for Cinctan project
# heavily modified 2020-12
#read_newick_string_mcmc <- function(newick_string_full,otu_names) {
read_newick_string_mcmc <- function(newick_string_full,otu_names) {
otu_names_nex <- gsub(" ","_",otu_names);
simple_newick_string <- molecularize <- strsplit(newick_string_full,split="")[[1]];
left_brackets <- (1:length(molecularize))[molecularize %in% "["];
right_brackets <- (1:length(molecularize))[molecularize %in% "]"];
for (br in 1:length(left_brackets)) molecularize[left_brackets[br]:right_brackets[br]] <- "";
newick_string_taxa_only_rawwest <- newick_string_taxa_raw <- newick_string_taxa_only <- paste(molecularize[molecularize!=""],collapse="");
notu <- length(otu_names);
branch_durations <- array(0,dim=(2*notu)-1);
for (nn in 1:notu) {
dummy_newick <- gsub(paste(as.character(otu_names_nex[nn]),":",sep=""),"•",newick_string_taxa_only);
dummy_newick <- strsplit(dummy_newick,split="")[[1]];
dd <- 1+match("•",dummy_newick);
b_d <- dummy_newick[dd];
dd <- dd+1;
while (dummy_newick[dd] %in% c(".",0:9)) {
b_d <- paste(b_d,dummy_newick[dd],sep="");
dd <- dd+1;
}
branch_durations[nn] <- as.numeric(b_d);
}
for (i in 0:9) newick_string_taxa_only <- gsub(i,"",newick_string_taxa_only);
newick_string_taxa_only <- gsub(":","",newick_string_taxa_only);
newick_string_taxa_only <- gsub("-","",newick_string_taxa_only);
newick_string_taxa_only <- gsub("\\.","",newick_string_taxa_only);
for (nn in 1:notu) {
newick_string_taxa_only <- gsub(otu_names_nex[nn],as.character(nn),newick_string_taxa_only);
newick_string_taxa_raw <- gsub(otu_names_nex[nn],as.character(nn),newick_string_taxa_raw);
}
newick_string_taxa_only_atomized <- strsplit(newick_string_taxa_only,"")[[1]];
nstoa <- length(newick_string_taxa_only_atomized);
if (newick_string_taxa_only_atomized[nstoa]!=";") {
newick_string_taxa_only_atomized <- c(newick_string_taxa_only_atomized,";");
nstoa <- length(newick_string_taxa_only_atomized);
}
newick_string_taxa_only <- paste(newick_string_taxa_only_atomized,collapse="");
ancestral <- find_newick_ancestors(newick_string_ancestored=newick_string_taxa_only);
names(ancestral) <- otu_names_nex;
newick_string_taxa_only_raw <- newick_string_taxa_only;
newick_string_taxa_only <- fix_newick_ancestors(newick_string_taxa_only);
newick_string_taxa_only_atomized <- strsplit(newick_string_taxa_only,"")[[1]];
nstoa <- length(newick_string_taxa_only_atomized);
if (newick_string_taxa_only_atomized[nstoa-1]!=")") {
newick_string_taxa_only_atomized[nstoa] <- ")";
newick_string_taxa_only_atomized <- c("(",newick_string_taxa_only_atomized,";");
newick_string_taxa_only <- paste(newick_string_taxa_only_atomized,collapse="");
}
vector_tree <- read_newick_string(newick_string_taxa_only);
mat_tree <- transform_vector_tree_to_matrix_tree(vector_tree);
newick_string_taxa_raw <- strsplit(newick_string_taxa_raw,split="")[[1]];
clade_ends <- (1:length(newick_string_taxa_raw))[newick_string_taxa_raw %in% ")"];
colons <- (1:length(newick_string_taxa_raw))[newick_string_taxa_raw %in% ":"];
names(clade_ends) <- length(clade_ends):1;
clade_colons <- 1+clade_ends[(clade_ends+1) %in% colons];
for (cc in 1:length(clade_colons)) {
n_node <- notu+as.numeric(names(clade_colons)[cc]);
dd <- clade_colons[cc]+1;
b_d <- newick_string_taxa_raw[dd];
dd <- dd+1;
while (newick_string_taxa_raw[dd] %in% c(".",0:9)) {
b_d <- paste(b_d,newick_string_taxa_raw[dd],sep="");
dd <- dd+1;
}
branch_durations[n_node] <- as.numeric(b_d);
}
nNodes <- length(clade_ends);
newick_string_taxa_raw <- paste(newick_string_taxa_raw,collapse="");
if (nNodes<10) {
node_names <- paste("node_",1:nNodes,sep="")
} else if (nNodes<100) {
node_names <- c(paste("node_0",1:9,sep=""),
paste("node_",10:nNodes,sep=""));
} else {
node_names <- c(paste("node_00",1:9,sep=""),
paste("node_0",10:99,sep=""),
paste("node_",100:nNodes,sep=""));
}
names(branch_durations) <- c(otu_names_nex,node_names);
#vector_tree_raw <- read_newick_string(newick_string_taxa_only_raw);
#vector_tree <- read_newick_string(newick_string_taxa_only);
molecularize <- strsplit(newick_string_taxa_only,split="")[[1]];
venn_tree_newick <- transform_newick_string_to_venn_tree(newick_string = newick_string_taxa_only);
# have: newick_string_taxa_only,newick_string_taxa_only_raw,vector_tree,ancestral,branch_durations);
# need: clade_posteriors, prob_ancestor,hpd;
# uset mat_tree and newick_string_taxa_only to figure out which node is what number
newick_rem_info <- gsub("sampled_ancestor=","•",newick_string_full);
if (gsub("age_95%_HPD=","§",newick_rem_info)!=newick_rem_info) {
newick_rem_info <- gsub("age_95%_HPD=","§",newick_rem_info);
} else {
newick_rem_info <- gsub("brlen_95%_HPD=","§",newick_rem_info);
}
newick_rem_info <- gsub("posterior=","¶",newick_rem_info);
hpd <- data.frame(lb=as.numeric(rep(0,notu+nNodes)),ub=as.numeric(rep(0,notu+nNodes)));
molecularized <- strsplit(newick_rem_info,"")[[1]];
molecules <- length(molecularized);
panc_boundaries <- (1:molecules)[molecularized=="•"];
post_boundaries <- (1:molecules)[molecularized=="¶"];
hpd_boundaries <- (1:molecules)[molecularized=="§"];
taxon_boundaries <- array(0,dim=c(notu,2));
for (nn in 1:notu) {
taxon_dummy <- gsub(otu_names_nex[nn],"£",newick_rem_info);
taxon_boundaries[nn,1] <- (1:length(strsplit(taxon_dummy,"")[[1]]))[strsplit(taxon_dummy,"")[[1]]=="£"];
taxon_boundaries[nn,2] <- taxon_boundaries[nn,1]+length(strsplit(otu_names_nex[nn],"")[[1]])-1;
}
clade_boundaries <- accersi_clade_boundaries_from_newick_string(newick_rem_info);
colnames(taxon_boundaries) <- colnames(clade_boundaries);
tu_boundaries <- rbind(taxon_boundaries,clade_boundaries);
rownames(tu_boundaries) <- all_names <- c(otu_names_nex,node_names);
tu_boundaries <- tu_boundaries[order(tu_boundaries$rp),];
brackets_l <- (1:molecules)[molecularized=="{"];
brackets_r <- (1:molecules)[molecularized=="}"];
for (hp in 1:length(hpd_boundaries)) {
tx <- sum(tu_boundaries$rp<hpd_boundaries[hp]);
txn <- match(rownames(tu_boundaries)[tx],all_names);
i <- brackets_l[1+sum(hpd_boundaries[hp]>brackets_l)]+1;
j <- brackets_r[1+sum(hpd_boundaries[hp]>brackets_r)]-1;
hpd[txn,] <- as.numeric(strsplit(paste(molecularized[i:j],collapse=""),",")[[1]]);
}
rownames(hpd) <- all_names;
hpd[vector_tree[(1:notu)[ancestral==1]],] <- hpd[(1:notu)[ancestral==1],];
rownames(taxon_boundaries) <- otu_names_nex;
taxon_boundaries <- taxon_boundaries[order(taxon_boundaries[,1]),];
brackets_l <- (1:molecules)[molecularized=="["];
brackets_r <- (1:molecules)[molecularized=="]"];
prob_ancestor <- array(0,dim=notu);
pb <- 0;
while (pb < length(panc_boundaries)) {
#for (pb in 1:length(panc_boundaries)) {
tx <- sum(taxon_boundaries[,1]<panc_boundaries[pb]);
txn <- match(rownames(taxon_boundaries)[tx],otu_names_nex);
i <- panc_boundaries[pb]+1;
pranc <- c();
while (!molecularized[i] %in% c(",","]")) {
pranc <- paste(pranc,molecularized[i],sep="");
i <- i+1;
}
prob_ancestor[txn] <- as.numeric(pranc);
}
names(prob_ancestor) <- otu_names_nex;
clade_posteriors <- array(0,dim=nNodes);
rownames(clade_boundaries) <- names(clade_posteriors) <- node_names;
clade_boundaries <- clade_boundaries[order(clade_boundaries$rp),];
for (pp in 1:length(post_boundaries)) {
cl <- sum(clade_boundaries$rp<post_boundaries[pp]);
cld <- match(rownames(clade_boundaries)[cl],node_names);
i <- post_boundaries[pp]+1;
postp <- c();
while (!molecularized[i] %in% c(",","]")) {
postp <- paste(postp,molecularized[i],sep="");
i <- i+1;
}
clade_posteriors[cld] <- as.numeric(postp);
# molecularized[i:i+-1:10]
}
#sum(strsplit(newick_rem_info,"")[[1]]=="•")
#sum(strsplit(newick_rem_info,"")[[1]]=="§")
#sum(strsplit(newick_rem_info,"")[[1]]=="¶")
output <- list(newick_string_taxa_only,newick_string_taxa_only_raw,vector_tree,clade_posteriors,ancestral,prob_ancestor,hpd,branch_durations);
names(output) <- c("newick_modified","newick","vector_tree","clade_posteriors","ancestral","prob_ancestor","hpd","branch_durations");
return(output);
}
#### MODIFY TREES STORED IN MEMORY ####
transform_matrix_tree_to_vector_tree <- function (matrix_tree) {
Nnode <- dim(matrix_tree)[1]
notus <- max(matrix_tree)-Nnode
ttus <- max(matrix_tree)
vector_tree <- vector(length=ttus)
vector_tree[notus+1] <- -1
for (n in Nnode:1)
vector_tree[matrix_tree[n,]] <- n+notus
return(vector_tree)
}
transform_matrix_tree_to_venn_tree <- function(matrix_tree) {
nNodes <- nrow(matrix_tree);
notu <- max(matrix_tree)-nNodes;
venn_tree <- array(0,dim=c(nNodes,notu));
venn_tree[,1:ncol(matrix_tree)] <- matrix_tree;
#for (nd in nNodes:16) {
nd <- nNodes;
while (nd > 0) {
if (max(venn_tree[nd,])>notu) {
daughter_clades <- venn_tree[nd,venn_tree[nd,]>notu];
daughter_nodes <- daughter_clades-notu;
dclades <- length(daughter_clades);
for (dc in dclades:1) {
f1c <- match(daughter_clades[dc],venn_tree[nd,]);
daughter_div <- sum(venn_tree[daughter_nodes[dc],]>0);
replace_cells <- f1c:(f1c+daughter_div-1);
grandkids <- venn_tree[daughter_nodes[dc],venn_tree[daughter_nodes[dc],]>0];
displaced_cuz <- venn_tree[nd,(1+f1c):notu][venn_tree[nd,(1+f1c):notu]>0]
if (length(displaced_cuz)>0) {
displaced_cells <- (f1c+daughter_div-1)+1:length(displaced_cuz);
venn_tree[nd,displaced_cells] <- displaced_cuz;
}
venn_tree[nd,replace_cells] <- grandkids;
#venn_tree[,1:6]
#venn_tree[nd,]f1c:notu] <- c(venn_tree[daughter_nodes[dc],1:daughter_div],venn_tree[nd,(f1c+1):(notu+1-daughter_div)]);
}
}
nd <- nd-1;
}
return(venn_tree);
}
# routine to extract vector tree from matrix giving total progeny of a node
transform_venn_tree_to_vector_tree <- function (venn_tree) {
n_Nodes <- nrow(venn_tree);
notus <- ncol(venn_tree);
max_otus <- max(venn_tree);
base <- max_otus+1
new_vector_tree <- vector(length=(max_otus+n_Nodes));
otus <- sort(as.integer(venn_tree[1,]));
for (s in 1:notus) {
spc <- otus[s];
new_vector_tree[spc] <- max_otus+sort(which(venn_tree==spc,arr.ind=TRUE)[,1],decreasing=TRUE)[1]
# new_vector_tree[spc] <- max_otus+sort(which(venn_tree==spc,arr.ind=TRUE)[,1],decreasing=TRUE)[1]
}
new_vector_tree[base] <- -1
new_vector_tree[base+1] <- base
for (n in 3:n_Nodes) {
htu <- max_otus+n
lead <- venn_tree[n,1]
new_vector_tree[htu] <- max_otus+sort(which(venn_tree[1:(n-1),]==lead,arr.ind=TRUE)[,1],decreasing=TRUE)[1]
}
return(new_vector_tree)
}
transform_venn_tree_to_matrix_tree <- function(venn_tree) {
return(transform_vector_tree_to_matrix_tree(transform_venn_tree_to_vector_tree(venn_tree)));
}
transform_vector_tree_to_matrix_tree <- function(vector_tree) {
node_rosetta <- sort(unique(vector_tree[vector_tree>0]))
Nnodes <- length(node_rosetta)
maxtomy <- max((hist(vector_tree[vector_tree>1],breaks=((min(vector_tree[vector_tree>1])-1):max(vector_tree[vector_tree>1])),plot=FALSE)$counts))
#order(vector_tree)[2:length(vector_tree)]
node_rich <- vector(length=Nnodes)
matrix_tree <- matrix(0,Nnodes,maxtomy)
for (i in 1:length(vector_tree)) {
node <- match(vector_tree[i],node_rosetta)
if(!is.na(node)) {
node_rich[node] <- node_rich[node]+1
matrix_tree[node,node_rich[node]] <- i
}
# if (vector_tree[i]>=node_rosetta[1]) {
# node <- match(vector_tree[i],node_rosetta)
# node_rich[node] <- node_rich[node]+1
# matrix_tree[node,node_rich[node]] <- i
# }
}
return(matrix_tree)
}
transform_vector_tree_to_venn_tree <- function(vector_tree) {
ohtu <- length(vector_tree);
base <- match(-1,vector_tree);
otu <- base-1
htu <- ohtu-otu
venn_tree <- matrix(0,ohtu,otu)
for (i in 1:otu) venn_tree[base,i] <- i
node_rich <- vector(length=ohtu)
for (sp in otu:1) if (vector_tree[sp]!=0) node_rich[vector_tree[sp]] <- node_rich[vector_tree[sp]]+1
for (nd in ohtu:(base+1)) if (vector_tree[nd]>0) node_rich[vector_tree[nd]] <- node_rich[vector_tree[nd]]+node_rich[nd]
node_div <- vector(length=ohtu)
for (sp in 1:otu) {
node_div[vector_tree[sp]] <- node_div[vector_tree[sp]]+1
venn_tree[vector_tree[sp],node_div[vector_tree[sp]]] <- sp
}
for (nd in ohtu:(base+1)) {
anc <- vector_tree[nd]
for (i in 1:node_div[nd]) {
node_div[anc] <- node_div[anc]+1
venn_tree[anc,node_div[anc]] <- venn_tree[nd,i]
}
}
#venn_tree[base:ohtu,1:15]
return(venn_tree[base:ohtu,])
}
create_phylo_class_from_nexus_tree <- function(vector_tree,tip.label) {
htu1 <- min(vector_tree[vector_tree>0])
Nnode <- 1+(max(vector_tree)-htu1)
otus <- htu1-1
edges <- matrix(0,otus+Nnode-1,2)
j <- 0
for (i in 1:length(vector_tree)) {
if (vector_tree[i]!=-1) {
j <- j+1
edges[j,1] <- vector_tree[i]
edges[j,2] <- i
}
}
output_tree <- list(edges,tip.label,Nnode)
names(output_tree) <- c("edge","tip.label","Nnode")
class(output_tree) <- "phylo"
#str(output_tree)
return(output_tree)
}
create_phylo_class_from_nexus_tree_file <- function(nexustreefile,tip.label) {
vector_tree <- read_newick_tree_from_file(nexustreefile)
htu1 <- min(vector_tree[vector_tree>0])
Nnode <- 1+(max(vector_tree)-htu1)
otus <- htu1-1
edges <- matrix(0,otus+Nnode-1,2)
j <- 0
for (i in 1:length(vector_tree)) {
if (vector_tree[i]!=-1) {
j <- j+1
edges[j,1] <- vector_tree[i]
edges[j,2] <- i
}
}
output_tree <- list(edges,tip.label,Nnode)
names(output_tree) <- c("edge","tip.label","Nnode")
class(output_tree) <- "phylo"
#str(output_tree)
return(output_tree)
}
transmogrify_additive_dependents_to_multistate_old <- function(ind_char,dep_chars,chmatrix,secondary_dependencies,INAP=-22,UNKNOWN=-11,theoretical=T) {
# theortical: if T, then Q matrix & recoding allows for all theoretically possible combinations
# unknown_inap_sep: code unknowns separately; this is a bad idea that I regret and that we'll cut
notu <- nrow(chmatrix);
combos <- chmatrix[,c(ind_char,dep_chars)];
rstates <- count_states(combos,UNKNOWN,INAP)
combos <- combos[!(rowMaxs(combos)==UNKNOWN & rowMins(combos)==UNKNOWN),];
combos <- combos[combos[,2]!=INAP,];
# get rid of missing examples
combos <- combos[!(1:nrow(combos)) %in% unique(which(combos==UNKNOWN,arr.ind = T)[,1]),];
# get secondary dependents
secondaries <- unique(which(combos==INAP,arr.ind = T)[,2]);
semi_indies <- secondary_dependencies[secondary_dependencies!=ind_char];
nchars <- ncol(combos);
dchars <- length(dep_chars);
missing_combos <- unique(combos)[sort(unique(which(unique(combos)==UNKNOWN,arr.ind = T)[,1])),];
nstates <- count_states(combos);
nstates[secondaries] <- nstates[secondaries]+1;
#combos <- combos[order(combos[,1],combos[,2],combos[,3]),];
for (cn in ncol(combos):1) combos <- combos[order(combos[,cn]),];
all_obs_complex_combos <- unique(combos);
all_obs_complex_combos <- all_obs_complex_combos[all_obs_complex_combos[,1]>=0,];
ind_states <- sort(unique(chmatrix[,ind_char][chmatrix[,ind_char]>=0]))
key_states <- unique(all_obs_complex_combos[,1]);
wrong_states <- ind_states[!ind_states %in% key_states];
if (!theoretical) {
all_poss_combos <- unique(combos);
for (cu in ncol(all_poss_combos):1) all_poss_combos <- all_poss_combos[order(abs(all_poss_combos[,cu])),];
all_poss_combos <- all_poss_combos[(1:nrow(all_poss_combos))[!(1:nrow(all_poss_combos)) %in% which(all_poss_combos==UNKNOWN,arr.ind=T)[,1]],];
} else {
# unq_combos <- array(0,dim=c(prod(nstates),length(nstates)));
# unq_combos[,1] <- unique(unique(combos)[,1]);
# make sure that secondary dependents get inapplicable in the combos
# should the Q-matrix for the "wrong" independent character reflect this?
#which(unique(combos)==INAP,arr.ind=T);
# for (ds in 1:dchars) {
dc <- nchars+1;
while (dc >1) {
dc <- dc-1;
char_states <- sort(unique(combos[,dc]));
char_states <- char_states[!char_states %in% c(UNKNOWN,INAP)];
cs <- 1;
while (char_states[cs]<0) {
char_states <- c(char_states,unravel_polymorph_badass(char_states[cs]))
char_states <- unique(sort(char_states[2:length(char_states)]));
}
# if (dc %in% secondaries) unq_combos[unq_combos[,dc]==(nstates[dc]-1),dc] <- INAP;
if (dc %in% secondaries) char_states <- c(char_states,INAP);
nstates_ch <- length(char_states);
if (dc==nchars) {
all_poss_combos <- array(char_states,dim=c(nstates_ch,1));
} else {
cs <- 1;
existing_combos <- nrow(all_poss_combos)
added_char_states <- rep(char_states[cs],existing_combos);
unq_combos_orig <- all_poss_combos;
while (cs<nstates_ch) {
cs <- cs+1;
all_poss_combos <- rbind(all_poss_combos,unq_combos_orig);
added_char_states <- c(added_char_states,rep(char_states[cs],existing_combos));
}
all_poss_combos <- cbind(added_char_states,all_poss_combos);
}
}
# while (dc > 1) {
# dc <- dc-1;
# if (dc<nchars) {
# this_char_states <- c();
# for (dss in 1:nstates[dc]) #{
# this_char_states <- c(this_char_states,rep(dss-1,prod(nstates[(dc+1):nchars])));
# if (dss==nstates[dc] && (dc %in% secondaries)) {
# this_char_states <- c(this_char_states,rep(INAP,prod(nstates[(dc+1):dchars])));
# this_char_states <- c(this_char_states,rep(INAP,nstates[dc-1]));
# } else {
# this_char_states <- c(this_char_states,rep(dss-1,nstates[dc-1]));
# }
# }
# } else {
# this_char_states <- (1:nstates[dc])-1;
# }
# ttl_code <- round(nrow(unq_combos)/length(this_char_states),0);
# unq_combos[,dc] <- rep(this_char_states,ttl_code);
# }
}
all_poss_combos <- all_poss_combos[all_poss_combos[,1] %in% key_states,];
# alternative for secondary independents: replace all pairs with -22 & then reduce via unique(all_poss_combos);
sc <- 0;
while (sc < length(secondaries)) {
sc <- sc+1;
independents <- 2:ncol(all_poss_combos);
semi_indy <- find_independent_character(dchar=secondaries[sc],independents=2:ncol(all_poss_combos),chmatrix=combos,UNKNOWN,INAP);
this_pair <- unique(combos[,c(semi_indy,secondaries[sc])]);
semi_key_state <- unique(this_pair[this_pair[,2]!=INAP,][,1]);
semi_wrn_state <- unique(this_pair[,1][!this_pair[,1] %in% semi_key_state]);
# get rid of keystate:inapplicable pairs
zz <- (1:nrow(all_poss_combos));
xx <- zz[all_poss_combos[,semi_indy] %in% semi_key_state]
yy <- zz[all_poss_combos[,secondaries[sc]]==INAP];
xx <- xx[xx %in% yy];
all_poss_combos <- all_poss_combos[!zz %in% xx,];
# get rid of nonkeystate:state-pairs
zz <- (1:nrow(all_poss_combos));
xx <- zz[all_poss_combos[,semi_indy] %in% semi_wrn_state]
yy <- zz[all_poss_combos[,secondaries[sc]]!=INAP];
xx <- xx[xx %in% yy];
all_poss_combos <- all_poss_combos[!zz %in% xx,];
}
wrstates <- length(wrong_states);
null_combos <- cbind(wrong_states,array(INAP,dim=c(wrstates,ncol(combos)-1)));
colnames(null_combos) <- colnames(all_poss_combos)
all_poss_combos <- rbind(null_combos,all_poss_combos);
ucombos <- rcombos <- ttl_states <- nrow(all_poss_combos);
#if (unknown_inap_sep) {
# unq_combos <- rbind(unq_combos,missing_combos);
# ttl_states <- nrow(unq_combos);
# rcombos <- ttl_states-(wrstates+length(missing_combos));
# ucombos <- ttl_states-length(missing_combos);
# } else {
rcombos <- ttl_states-wrstates;
# }
state_combos <- rep("",ttl_states);
for (uc in 1:ttl_states) {
this_combo <- paste(all_poss_combos[uc,],collapse="");
this_combo <- gsub(as.character(INAP),"-",this_combo);
this_combo <- gsub(as.character(UNKNOWN),"?",this_combo);
state_combos[uc] <- this_combo;
}
state_combos <- gsub(as.character(INAP),"",state_combos);
rownames(all_poss_combos) <- state_combos;
colnames(all_poss_combos) <- c(ind_char,dep_chars);
# make sure that secondaries dependencies are weeded out.
# if we see only •2-, then make sure that •20 & •21 are eliminated
# prepare Q-Matrix
if (nrow(all_poss_combos)>100) {
print("Getting basic distances among state combinations")
Q <- pairwise_differences_discrete(all_poss_combos,UNKNOWN=UNKNOWN,INAP=INAP,progress_bar=T);
} else {
Q <- pairwise_differences_discrete(all_poss_combos,UNKNOWN=UNKNOWN,INAP=INAP,progress_bar=F);
}
# if there are secondaries, the figure out how to weight them here!!!
colnames(Q) <- rownames(Q) <- state_combos;
Q[wrstates,(wrstates+1):ttl_states] <- 1/rcombos;
si <- 0;
while (si < length(semi_indies)) {
si <- si+1;
sc <- match(semi_indies[si],c(ind_char,dep_chars));
cs <- (1:length(c(ind_char,dep_chars)))[secondary_dependencies==semi_indies[si]];
relv_combos <- unique(all_poss_combos[,c(sc,cs)]);
relv_combos <- relv_combos[(1:nrow(relv_combos))[!(1:nrow(relv_combos)) %in% unique(which(relv_combos==INAP,arr.ind=T)[,1])],];
relv_combos_all <- all_poss_combos[,c(sc,cs)];
if (length(cs)>1) {
key_combos <- which(all_poss_combos[,cs]==INAP,arr.ind=T)[,1]
} else {
key_combos <- as.numeric(which(all_poss_combos[,cs]==INAP,arr.ind=T))
}
# fix revl_combos to get all of the right matches & not just those for one possibility
key_combos <- key_combos[!key_combos %in% key_combos[all_poss_combos[,sc]==INAP]];
relv_combos_2 <- all_poss_combos[,c(sc,cs)];
relv_combos_2 <- relv_combos_2[!(1:ttl_states) %in% which(relv_combos_2==INAP,arr.ind=T)[,1],];
combos_key <- match(rownames(relv_combos_2),rownames(Q));
# Q[key_combos,combos_key]==1;
for (i in 1:length(key_combos))
Q[key_combos[i],combos_key][Q[key_combos[i],combos_key]==1] <- 1/nrow(relv_combos);
# 1/nrow(relv_combos)
}
Q[Q>1] <- 0;
for (qq in 1:ttl_states) {
Q[qq,] <- Q[qq,]/sum(Q[qq,1:ucombos]);
Q[qq,qq] <- -1;
# Q[qq,qq] <- -sum(Q[qq,1:ucombos])
}
#write.csv(Q,"Q.csv",row.names = T)
new_multistate <- vector(length=notu);
for (nn in 1:notu)
new_multistate[nn] <- match_vector_to_matrix_row(test_vector=chmatrix[nn,c(ind_char,dep_chars)],test_matrix=all_poss_combos)-1;
all_states <- c(0:9,letter_states,more_letter_states);
if (nrow(all_poss_combos)>10)
if (is.numeric(new_multistate))
new_multistate[!is.na(new_multistate)] <- all_states[1+new_multistate[!is.na(new_multistate)]]
#cbind(new_multistate,chmatrix[,c(ind_char,dep_chars)])
#if (unknown_inap_sep) {
# new_multistate[is.na(new_multistate)] <- UNKNOWN;
# } else {
prob_child <- (1:notu)[is.na(new_multistate)];
combos <- chmatrix[,c(ind_char,dep_chars)];
pc <- 0;
polymorphs <- unique(combos[combos<0])
polymorphs <- polymorphs[!polymorphs %in% c(UNKNOWN,INAP)];
while (pc < length(prob_child)) {
pc <- pc+1;
pcc <- prob_child[pc];
if (combos[pcc,1]==UNKNOWN) {
new_multistate[pcc] <- UNKNOWN;
} else {
# make it polymorphic for all possible states
this_combo <- combos[pcc,(0:dchars)+1];
# doofi <- (1:length(this_combo))[this_combo==UNKNOWN];
# this if/else probably is unneeded now!
if (sum(this_combo %in% polymorphs)==0) {
set_chars <- (1:length(this_combo))[this_combo!=UNKNOWN];
# set_chars <- set_chars[set_chars>1];
set_states <- this_combo[set_chars];
poss_combos <- all_poss_combos[all_poss_combos[,1] %in% key_states,];
ss <- 0;
while (ss < length(set_chars)) {
ss <- ss+1;
poss_combos <- subset(poss_combos,poss_combos[,set_chars[ss]]==set_states[ss]);
}
polymorph <- 0;
if (nrow(all_poss_combos)>10) polymorph <- c();
for (cp in 1:nrow(poss_combos)) {
this_state <- (row.match(poss_combos[cp,],as.data.frame(all_poss_combos))-1);
if (nrow(all_poss_combos)>10) {
polymorph <- c(polymorph,all_states[this_state]);
# base64encode(this_state);
# for (i in 1:5) print(base64encode(10^(i-1)));
} else {
polymorph <- polymorph-((10^(cp-1))*this_state);
}
}
} else {
set_chars <- (1:nchars)[!this_combo %in% c(UNKNOWN,polymorphs)]; unset_chars <- (1:nchars)[this_combo %in% c(UNKNOWN,polymorphs)];
un <- 0;
missing <- unset_chars[this_combo[unset_chars]==UNKNOWN];
polys <- unset_chars[!this_combo[unset_chars] %in% UNKNOWN];
while (un < length(missing)) {
un <- un+1;
mchar <- missing[un];
rstates <- (1:nstates[mchar])-1;
this_combo[mchar] <- 0;
for (rs in 1:length(rstates))
this_combo[mchar] <- this_combo[rs]-rstates[rs]*(10^(rs-1));
}
# set_chars <- set_chars[set_chars>1];
set_states <- this_combo[set_chars];
poss_combos <- all_poss_combos;
# print(nrow(poss_combos));
# reduce the possible combinations to those consistent
sc <- length(set_chars);
# for (sc in 1:length(set_chars)) {
while (sc > 1) {
poss_combos <- subset(poss_combos,poss_combos[,set_chars[sc]]==set_states[sc]);
# print(nrow(poss_combos));
sc <- sc-1;
}
uc <- 0;
while (uc < length(polys)) {
uc <- uc+1;
poss_combos <- poss_combos[poss_combos[,polys[uc]] %in% unravel_polymorph_badass(this_combo[polys[uc]]),];
}
pstates <- match(rownames(poss_combos),rownames(all_poss_combos));
polymorph <- 0;
if (nrow(all_poss_combos)>10) polymorph <- c();
for (cp in 1:nrow(poss_combos)) {
this_state <- (row.match(poss_combos[cp,],as.data.frame(all_poss_combos))-1);
if (nrow(all_poss_combos)>10) {
polymorph <- c(polymorph,all_states[this_state]);
# base64encode(this_state);
# for (i in 1:5) print(base64encode(10^(i-1)));
} else {
polymorph <- polymorph-((10^(cp-1))*this_state);
}
}
}
# if (nrow(unq_combos)>10) {
# if (is.numeric(new_multistate))
# new_multistate[!is.na(new_multistate)] <- all_states[1+new_multistate[!is.na(new_multistate)]]
new_multistate[pcc] <- paste("(",paste(polymorph,collapse=""),")",sep="");
# } else {
# new_multistate[pcc] <- polymorph;
# }
}
}
# }
#new_multistate[new_multistate==UNKNOWN] <- "?";
#cbind(new_multistate,chmatrix[,c(ind_char,dep_chars)])
output <- list(all_poss_combos,Q,new_multistate);
names(output) <- c("unique_combinations","Q","new_character");
return(output);
}
|
context("Autoplot")
library(testthat)
library(dplyr)
library(ggplot2)
# As of R 3.6, cannot rely on old sample() results to be the same.
# Pre R 3.6, they were generated like this, and we have saved them
# as static values to be more reproducible
# set.seed(123)
# resample_idx <- replicate(
# n = 10,
# expr = sample.int(
# n = nrow(two_class_example),
# size = 300,
# replace = TRUE
# ),
# simplify = FALSE
# )
# saveRDS(object = resample_idx, file = testthat::test_path("test_autoplot.rds"))
resample_idx <- readRDS(testthat::test_path("test_autoplot.rds"))
two_class_resamples <- bind_rows(
lapply(resample_idx, function(idx) two_class_example[idx,]),
.id = "Resample"
) %>%
group_by(Resample)
# make it smaller, and order it in the same order as what ggplot2 displays
hpc_cv2 <- filter(hpc_cv, Resample %in% c("Fold06", "Fold07", "Fold08", "Fold09", "Fold10")) %>%
as_tibble() %>%
group_by(Resample) %>%
arrange(as.character(obs)) %>%
ungroup()
# ROC --------------------------------------------------------------------------
test_that("ROC Curve - two class", {
res <- roc_curve(two_class_example, truth, Class1)
expect_error(.plot <- autoplot(res), NA)
expect_is(.plot, "gg")
.plot_data <- ggplot_build(.plot)
# x and y data
expect_equal(1 - res$specificity, .plot_data$data[[1]]$x)
expect_equal(res$sensitivity, .plot_data$data[[1]]$y)
# 45 degree line
expect_equal(.plot_data$data[[2]]$intercept, 0)
expect_equal(.plot_data$data[[2]]$slope, 1)
})
test_that("ROC Curve - two class, with resamples", {
res <- roc_curve(two_class_resamples, truth, Class1)
expect_error(.plot <- autoplot(res), NA)
expect_is(.plot, "gg")
.plot_data <- ggplot_build(.plot)
expect_equal(1 - res$specificity, .plot_data$data[[1]]$x)
expect_equal(res$sensitivity, .plot_data$data[[1]]$y)
# number of unique colors
expect_equal(length(unique(.plot_data$data[[1]]$colour)), 10)
})
test_that("ROC Curve - multi class", {
res <- roc_curve(hpc_cv2, obs, VF:L)
expect_error(.plot <- autoplot(res), NA)
expect_is(.plot, "gg")
expect_true(".level" %in% colnames(res))
.plot_data <- ggplot_build(.plot)
# 4 panels
expect_equal(nrow(.plot_data$data[[2]]), 4)
})
test_that("ROC Curve - multi class, with resamples", {
res <- roc_curve(group_by(hpc_cv2, Resample), obs, VF:L)
expect_error(.plot <- autoplot(res), NA)
expect_is(.plot, "gg")
expect_true(".level" %in% colnames(res))
expect_true("Resample" %in% colnames(res))
.plot_data <- ggplot_build(.plot)
# 4 panels
expect_equal(nrow(.plot_data$data[[2]]), 4)
})
# PR ---------------------------------------------------------------------------
test_that("PR Curve - two class", {
res <- pr_curve(two_class_example, truth, Class1)
expect_error(.plot <- autoplot(res), NA)
expect_is(.plot, "gg")
.plot_data <- ggplot_build(.plot)
# x and y data
expect_equal(res$recall, .plot_data$data[[1]]$x)
expect_equal(res$precision, .plot_data$data[[1]]$y)
})
test_that("PR Curve - two class, with resamples", {
res <- pr_curve(two_class_resamples, truth, Class1)
expect_error(.plot <- autoplot(res), NA)
expect_is(.plot, "gg")
.plot_data <- ggplot_build(.plot)
expect_equal(res$recall, .plot_data$data[[1]]$x)
expect_equal(res$precision, .plot_data$data[[1]]$y)
# number of unique colors
expect_equal(length(unique(.plot_data$data[[1]]$colour)), 10)
})
test_that("PR Curve - multi class", {
res <- pr_curve(hpc_cv2, obs, VF:L)
expect_error(.plot <- autoplot(res), NA)
expect_is(.plot, "gg")
expect_true(".level" %in% colnames(res))
.plot_data <- ggplot_build(.plot)
# 4 panels
expect_equal(length(unique(.plot_data$data[[1]]$PANEL)), 4)
})
test_that("PR Curve - multi class, with resamples", {
res <- pr_curve(group_by(hpc_cv2, Resample), obs, VF:L)
expect_error(.plot <- autoplot(res), NA)
expect_is(.plot, "gg")
expect_true(".level" %in% colnames(res))
expect_true("Resample" %in% colnames(res))
.plot_data <- ggplot_build(.plot)
# 4 panels
expect_equal(length(unique(.plot_data$data[[1]]$PANEL)), 4)
# 5 resamples
expect_equal(length(unique(.plot_data$data[[1]]$colour)), 5)
})
# Gain -------------------------------------------------------------------------
test_that("Gain Curve - two class", {
res <- gain_curve(two_class_example, truth, Class1)
expect_error(.plot <- autoplot(res), NA)
expect_is(.plot, "gg")
.plot_data <- ggplot_build(.plot)
# x and y data
expect_equal(res$.percent_tested, .plot_data$data[[1]]$x)
expect_equal(res$.percent_found, .plot_data$data[[1]]$y)
# polygon "perfect" corner
expect_equal(.plot_data$data[[2]]$x[2], 51.6)
})
test_that("Gain Curve - two class, with resamples", {
res <- gain_curve(two_class_resamples, truth, Class1)
expect_error(.plot <- autoplot(res), NA)
expect_is(.plot, "gg")
.plot_data <- ggplot_build(.plot)
expect_equal(res$.percent_tested, .plot_data$data[[1]]$x)
expect_equal(res$.percent_found, .plot_data$data[[1]]$y)
# number of unique colors
expect_equal(length(unique(.plot_data$data[[1]]$colour)), 10)
# polygon "perfect" corner (min of the resamples)
expect_equal(.plot_data$data[[2]]$x[2], 43 + 2/3)
})
test_that("Gain Curve - multi class", {
res <- gain_curve(hpc_cv2, obs, VF:L)
expect_error(.plot <- autoplot(res), NA)
expect_is(.plot, "gg")
expect_true(".level" %in% colnames(res))
.plot_data <- ggplot_build(.plot)
# 4 panels
expect_equal(length(unique(.plot_data$data[[1]]$PANEL)), 4)
# polygon "perfect" corner (one per level)
corners <- c(2, 5, 8, 11)
corner_vals <- c(31.0623556581986, 5.94688221709007, 11.9515011547344, 51.0392609699769)
expect_equal(.plot_data$data[[2]]$x[corners], corner_vals)
})
test_that("Gain Curve - multi class, with resamples", {
res <- gain_curve(group_by(hpc_cv2, Resample), obs, VF:L)
expect_error(.plot <- autoplot(res), NA)
expect_is(.plot, "gg")
expect_true(".level" %in% colnames(res))
expect_true("Resample" %in% colnames(res))
.plot_data <- ggplot_build(.plot)
# 4 panels
expect_equal(length(unique(.plot_data$data[[1]]$PANEL)), 4)
# 5 resamples
expect_equal(length(unique(.plot_data$data[[1]]$colour)), 5)
# polygon "perfect" corner (one per level)
corners <- c(2, 5, 8, 11)
corner_vals <- c(30.9248554913295, 5.78034682080925, 11.8155619596542, 50.8620689655172)
expect_equal(.plot_data$data[[2]]$x[corners], corner_vals)
})
# Lift -------------------------------------------------------------------------
test_that("Lift Curve - two class", {
res <- lift_curve(two_class_example, truth, Class1)
expect_error(.plot <- autoplot(res), NA)
expect_is(.plot, "gg")
.plot_data <- ggplot_build(.plot)
# first row has NA and is removed
res <- res[-1,]
# 1 row removed
expect_equal(nrow(.plot_data$data[[1]]), 500)
# x and y data
expect_equal(res$.percent_tested, .plot_data$data[[1]]$x)
expect_equal(res$.lift, .plot_data$data[[1]]$y)
# horizontal line
expect_equal(.plot_data$data[[2]]$x, c(0, 100))
})
test_that("Lift Curve - two class, with resamples", {
res <- lift_curve(two_class_resamples, truth, Class1)
expect_error(.plot <- autoplot(res), NA)
expect_is(.plot, "gg")
.plot_data <- ggplot_build(.plot)
# 10 rows removed (the 0 event rows)
expect_equal(nrow(.plot_data$data[[1]]), nrow(res) - 10)
# 0 event rows are removed before plotting
res <- filter(res, .n_events != 0)
expect_equal(res$.percent_tested, .plot_data$data[[1]]$x)
expect_equal(res$.lift, .plot_data$data[[1]]$y)
# number of unique colors
expect_equal(length(unique(.plot_data$data[[1]]$colour)), 10)
})
test_that("Lift Curve - multi class", {
res <- lift_curve(hpc_cv2, obs, VF:L)
expect_error(.plot <- autoplot(res), NA)
expect_is(.plot, "gg")
expect_true(".level" %in% colnames(res))
.plot_data <- ggplot_build(.plot)
# 4 panels
expect_equal(length(unique(.plot_data$data[[1]]$PANEL)), 4)
})
test_that("Lift Curve - multi class, with resamples", {
res <- lift_curve(group_by(hpc_cv2, Resample), obs, VF:L)
expect_error(.plot <- autoplot(res), NA)
expect_is(.plot, "gg")
expect_true(".level" %in% colnames(res))
expect_true("Resample" %in% colnames(res))
.plot_data <- ggplot_build(.plot)
# 4 panels
expect_equal(length(unique(.plot_data$data[[1]]$PANEL)), 4)
# 5 resamples
expect_equal(length(unique(.plot_data$data[[1]]$colour)), 5)
})
# Confusion Matrix ------------------------------------------------------------
test_that("Confusion Matrix - type argument", {
res <- conf_mat(two_class_example, truth, predicted)
expect_error(.plot <- autoplot(res, type = "wrong"), "type")
})
test_that("Confusion Matrix - two class - heatmap", {
res <- conf_mat(two_class_example, truth, predicted)
expect_error(.plot <- autoplot(res, type = "heatmap"), NA)
expect_is(.plot, "gg")
.plot_data <- ggplot_build(.plot)
# 4 panes
expect_equal(nrow(.plot_data$data[[1]]), length(res$table))
})
test_that("Confusion Matrix - multi class - heatmap", {
res <- hpc_cv %>%
filter(Resample == "Fold01") %>%
conf_mat(obs, pred)
expect_error(.plot <- autoplot(res, type = "heatmap"), NA)
expect_is(.plot, "gg")
.plot_data <- ggplot_build(.plot)
# panes
expect_equal(nrow(.plot_data$data[[1]]), length(res$table))
# check dimensions
expect_equal(rlang::expr_label(.plot$mapping[["x"]]), "`~Truth`")
expect_equal(rlang::expr_label(.plot$mapping[["y"]]), "`~Prediction`")
expect_equal(rlang::expr_label(.plot$mapping[["fill"]]), "`~Freq`")
})
test_that("Confusion Matrix - mosaic - non-standard labels (#157)", {
res <- hpc_cv %>%
filter(Resample == "Fold01") %>%
conf_mat(obs, pred, dnn = c("Pred", "True"))
expect_error(.plot <- autoplot(res, type = "heatmap"), NA)
# Overridden with default labels
expect_identical(.plot$labels$x, "Truth")
expect_identical(.plot$labels$y, "Prediction")
})
test_that("Confusion Matrix - two class - mosaic", {
res <- conf_mat(two_class_example, truth, predicted)
expect_error(.plot <- autoplot(res, type = "mosaic"), NA)
expect_is(.plot, "gg")
.plot_data <- ggplot_build(.plot)
# 4 panes
expect_equal(nrow(.plot_data$data[[1]]), length(res$table))
})
test_that("Confusion Matrix - multi class - mosaic", {
res <- hpc_cv %>%
filter(Resample == "Fold01") %>%
conf_mat(obs, pred)
expect_error(.plot <- autoplot(res, type = "mosaic"), NA)
expect_is(.plot, "gg")
.plot_data <- ggplot_build(.plot)
# panes
expect_equal(nrow(.plot_data$data[[1]]), length(res$table))
})
|
/tests/testthat/test_autoplot.R
|
no_license
|
PalimPalimPalim/yardstick
|
R
| false
| false
| 10,630
|
r
|
context("Autoplot")
library(testthat)
library(dplyr)
library(ggplot2)
# As of R 3.6, cannot rely on old sample() results to be the same.
# Pre R 3.6, they were generated like this, and we have saved them
# as static values to be more reproducible
# set.seed(123)
# resample_idx <- replicate(
# n = 10,
# expr = sample.int(
# n = nrow(two_class_example),
# size = 300,
# replace = TRUE
# ),
# simplify = FALSE
# )
# saveRDS(object = resample_idx, file = testthat::test_path("test_autoplot.rds"))
resample_idx <- readRDS(testthat::test_path("test_autoplot.rds"))
two_class_resamples <- bind_rows(
lapply(resample_idx, function(idx) two_class_example[idx,]),
.id = "Resample"
) %>%
group_by(Resample)
# make it smaller, and order it in the same order as what ggplot2 displays
hpc_cv2 <- filter(hpc_cv, Resample %in% c("Fold06", "Fold07", "Fold08", "Fold09", "Fold10")) %>%
as_tibble() %>%
group_by(Resample) %>%
arrange(as.character(obs)) %>%
ungroup()
# ROC --------------------------------------------------------------------------
test_that("ROC Curve - two class", {
res <- roc_curve(two_class_example, truth, Class1)
expect_error(.plot <- autoplot(res), NA)
expect_is(.plot, "gg")
.plot_data <- ggplot_build(.plot)
# x and y data
expect_equal(1 - res$specificity, .plot_data$data[[1]]$x)
expect_equal(res$sensitivity, .plot_data$data[[1]]$y)
# 45 degree line
expect_equal(.plot_data$data[[2]]$intercept, 0)
expect_equal(.plot_data$data[[2]]$slope, 1)
})
test_that("ROC Curve - two class, with resamples", {
res <- roc_curve(two_class_resamples, truth, Class1)
expect_error(.plot <- autoplot(res), NA)
expect_is(.plot, "gg")
.plot_data <- ggplot_build(.plot)
expect_equal(1 - res$specificity, .plot_data$data[[1]]$x)
expect_equal(res$sensitivity, .plot_data$data[[1]]$y)
# number of unique colors
expect_equal(length(unique(.plot_data$data[[1]]$colour)), 10)
})
test_that("ROC Curve - multi class", {
res <- roc_curve(hpc_cv2, obs, VF:L)
expect_error(.plot <- autoplot(res), NA)
expect_is(.plot, "gg")
expect_true(".level" %in% colnames(res))
.plot_data <- ggplot_build(.plot)
# 4 panels
expect_equal(nrow(.plot_data$data[[2]]), 4)
})
test_that("ROC Curve - multi class, with resamples", {
res <- roc_curve(group_by(hpc_cv2, Resample), obs, VF:L)
expect_error(.plot <- autoplot(res), NA)
expect_is(.plot, "gg")
expect_true(".level" %in% colnames(res))
expect_true("Resample" %in% colnames(res))
.plot_data <- ggplot_build(.plot)
# 4 panels
expect_equal(nrow(.plot_data$data[[2]]), 4)
})
# PR ---------------------------------------------------------------------------
test_that("PR Curve - two class", {
res <- pr_curve(two_class_example, truth, Class1)
expect_error(.plot <- autoplot(res), NA)
expect_is(.plot, "gg")
.plot_data <- ggplot_build(.plot)
# x and y data
expect_equal(res$recall, .plot_data$data[[1]]$x)
expect_equal(res$precision, .plot_data$data[[1]]$y)
})
test_that("PR Curve - two class, with resamples", {
res <- pr_curve(two_class_resamples, truth, Class1)
expect_error(.plot <- autoplot(res), NA)
expect_is(.plot, "gg")
.plot_data <- ggplot_build(.plot)
expect_equal(res$recall, .plot_data$data[[1]]$x)
expect_equal(res$precision, .plot_data$data[[1]]$y)
# number of unique colors
expect_equal(length(unique(.plot_data$data[[1]]$colour)), 10)
})
test_that("PR Curve - multi class", {
res <- pr_curve(hpc_cv2, obs, VF:L)
expect_error(.plot <- autoplot(res), NA)
expect_is(.plot, "gg")
expect_true(".level" %in% colnames(res))
.plot_data <- ggplot_build(.plot)
# 4 panels
expect_equal(length(unique(.plot_data$data[[1]]$PANEL)), 4)
})
test_that("PR Curve - multi class, with resamples", {
res <- pr_curve(group_by(hpc_cv2, Resample), obs, VF:L)
expect_error(.plot <- autoplot(res), NA)
expect_is(.plot, "gg")
expect_true(".level" %in% colnames(res))
expect_true("Resample" %in% colnames(res))
.plot_data <- ggplot_build(.plot)
# 4 panels
expect_equal(length(unique(.plot_data$data[[1]]$PANEL)), 4)
# 5 resamples
expect_equal(length(unique(.plot_data$data[[1]]$colour)), 5)
})
# Gain -------------------------------------------------------------------------
test_that("Gain Curve - two class", {
res <- gain_curve(two_class_example, truth, Class1)
expect_error(.plot <- autoplot(res), NA)
expect_is(.plot, "gg")
.plot_data <- ggplot_build(.plot)
# x and y data
expect_equal(res$.percent_tested, .plot_data$data[[1]]$x)
expect_equal(res$.percent_found, .plot_data$data[[1]]$y)
# polygon "perfect" corner
expect_equal(.plot_data$data[[2]]$x[2], 51.6)
})
test_that("Gain Curve - two class, with resamples", {
res <- gain_curve(two_class_resamples, truth, Class1)
expect_error(.plot <- autoplot(res), NA)
expect_is(.plot, "gg")
.plot_data <- ggplot_build(.plot)
expect_equal(res$.percent_tested, .plot_data$data[[1]]$x)
expect_equal(res$.percent_found, .plot_data$data[[1]]$y)
# number of unique colors
expect_equal(length(unique(.plot_data$data[[1]]$colour)), 10)
# polygon "perfect" corner (min of the resamples)
expect_equal(.plot_data$data[[2]]$x[2], 43 + 2/3)
})
test_that("Gain Curve - multi class", {
res <- gain_curve(hpc_cv2, obs, VF:L)
expect_error(.plot <- autoplot(res), NA)
expect_is(.plot, "gg")
expect_true(".level" %in% colnames(res))
.plot_data <- ggplot_build(.plot)
# 4 panels
expect_equal(length(unique(.plot_data$data[[1]]$PANEL)), 4)
# polygon "perfect" corner (one per level)
corners <- c(2, 5, 8, 11)
corner_vals <- c(31.0623556581986, 5.94688221709007, 11.9515011547344, 51.0392609699769)
expect_equal(.plot_data$data[[2]]$x[corners], corner_vals)
})
test_that("Gain Curve - multi class, with resamples", {
res <- gain_curve(group_by(hpc_cv2, Resample), obs, VF:L)
expect_error(.plot <- autoplot(res), NA)
expect_is(.plot, "gg")
expect_true(".level" %in% colnames(res))
expect_true("Resample" %in% colnames(res))
.plot_data <- ggplot_build(.plot)
# 4 panels
expect_equal(length(unique(.plot_data$data[[1]]$PANEL)), 4)
# 5 resamples
expect_equal(length(unique(.plot_data$data[[1]]$colour)), 5)
# polygon "perfect" corner (one per level)
corners <- c(2, 5, 8, 11)
corner_vals <- c(30.9248554913295, 5.78034682080925, 11.8155619596542, 50.8620689655172)
expect_equal(.plot_data$data[[2]]$x[corners], corner_vals)
})
# Lift -------------------------------------------------------------------------
test_that("Lift Curve - two class", {
res <- lift_curve(two_class_example, truth, Class1)
expect_error(.plot <- autoplot(res), NA)
expect_is(.plot, "gg")
.plot_data <- ggplot_build(.plot)
# first row has NA and is removed
res <- res[-1,]
# 1 row removed
expect_equal(nrow(.plot_data$data[[1]]), 500)
# x and y data
expect_equal(res$.percent_tested, .plot_data$data[[1]]$x)
expect_equal(res$.lift, .plot_data$data[[1]]$y)
# horizontal line
expect_equal(.plot_data$data[[2]]$x, c(0, 100))
})
test_that("Lift Curve - two class, with resamples", {
res <- lift_curve(two_class_resamples, truth, Class1)
expect_error(.plot <- autoplot(res), NA)
expect_is(.plot, "gg")
.plot_data <- ggplot_build(.plot)
# 10 rows removed (the 0 event rows)
expect_equal(nrow(.plot_data$data[[1]]), nrow(res) - 10)
# 0 event rows are removed before plotting
res <- filter(res, .n_events != 0)
expect_equal(res$.percent_tested, .plot_data$data[[1]]$x)
expect_equal(res$.lift, .plot_data$data[[1]]$y)
# number of unique colors
expect_equal(length(unique(.plot_data$data[[1]]$colour)), 10)
})
test_that("Lift Curve - multi class", {
res <- lift_curve(hpc_cv2, obs, VF:L)
expect_error(.plot <- autoplot(res), NA)
expect_is(.plot, "gg")
expect_true(".level" %in% colnames(res))
.plot_data <- ggplot_build(.plot)
# 4 panels
expect_equal(length(unique(.plot_data$data[[1]]$PANEL)), 4)
})
test_that("Lift Curve - multi class, with resamples", {
res <- lift_curve(group_by(hpc_cv2, Resample), obs, VF:L)
expect_error(.plot <- autoplot(res), NA)
expect_is(.plot, "gg")
expect_true(".level" %in% colnames(res))
expect_true("Resample" %in% colnames(res))
.plot_data <- ggplot_build(.plot)
# 4 panels
expect_equal(length(unique(.plot_data$data[[1]]$PANEL)), 4)
# 5 resamples
expect_equal(length(unique(.plot_data$data[[1]]$colour)), 5)
})
# Confusion Matrix ------------------------------------------------------------
test_that("Confusion Matrix - type argument", {
res <- conf_mat(two_class_example, truth, predicted)
expect_error(.plot <- autoplot(res, type = "wrong"), "type")
})
test_that("Confusion Matrix - two class - heatmap", {
res <- conf_mat(two_class_example, truth, predicted)
expect_error(.plot <- autoplot(res, type = "heatmap"), NA)
expect_is(.plot, "gg")
.plot_data <- ggplot_build(.plot)
# 4 panes
expect_equal(nrow(.plot_data$data[[1]]), length(res$table))
})
test_that("Confusion Matrix - multi class - heatmap", {
res <- hpc_cv %>%
filter(Resample == "Fold01") %>%
conf_mat(obs, pred)
expect_error(.plot <- autoplot(res, type = "heatmap"), NA)
expect_is(.plot, "gg")
.plot_data <- ggplot_build(.plot)
# panes
expect_equal(nrow(.plot_data$data[[1]]), length(res$table))
# check dimensions
expect_equal(rlang::expr_label(.plot$mapping[["x"]]), "`~Truth`")
expect_equal(rlang::expr_label(.plot$mapping[["y"]]), "`~Prediction`")
expect_equal(rlang::expr_label(.plot$mapping[["fill"]]), "`~Freq`")
})
test_that("Confusion Matrix - mosaic - non-standard labels (#157)", {
res <- hpc_cv %>%
filter(Resample == "Fold01") %>%
conf_mat(obs, pred, dnn = c("Pred", "True"))
expect_error(.plot <- autoplot(res, type = "heatmap"), NA)
# Overridden with default labels
expect_identical(.plot$labels$x, "Truth")
expect_identical(.plot$labels$y, "Prediction")
})
test_that("Confusion Matrix - two class - mosaic", {
res <- conf_mat(two_class_example, truth, predicted)
expect_error(.plot <- autoplot(res, type = "mosaic"), NA)
expect_is(.plot, "gg")
.plot_data <- ggplot_build(.plot)
# 4 panes
expect_equal(nrow(.plot_data$data[[1]]), length(res$table))
})
test_that("Confusion Matrix - multi class - mosaic", {
res <- hpc_cv %>%
filter(Resample == "Fold01") %>%
conf_mat(obs, pred)
expect_error(.plot <- autoplot(res, type = "mosaic"), NA)
expect_is(.plot, "gg")
.plot_data <- ggplot_build(.plot)
# panes
expect_equal(nrow(.plot_data$data[[1]]), length(res$table))
})
|
# Define project functions ------------------------------------------------
foo <- function(x){
return(2*x)
}
bar <- function(x){
return(x^2)
}
shiny_df <- function(x,y,df){
df <- df %>%
select(y, x)
df <- df %>% rename(density=x, category=y)
return(df)
}
|
/R/99_project_functions.R
|
no_license
|
HCEhlers/2021_group21_final_project
|
R
| false
| false
| 272
|
r
|
# Define project functions ------------------------------------------------
foo <- function(x){
return(2*x)
}
bar <- function(x){
return(x^2)
}
shiny_df <- function(x,y,df){
df <- df %>%
select(y, x)
df <- df %>% rename(density=x, category=y)
return(df)
}
|
selectionsort.rec <- function(x)
{
if(length(x) > 1)
{
mini <- which.min(x)
c(x[mini], selectionsort(x[-mini]))
} else x
}
|
/Programming Language Detection/Experiment-2/Dataset/Train/R/sorting-algorithms-selection-sort-2.r
|
no_license
|
dlaststark/machine-learning-projects
|
R
| false
| false
| 144
|
r
|
selectionsort.rec <- function(x)
{
if(length(x) > 1)
{
mini <- which.min(x)
c(x[mini], selectionsort(x[-mini]))
} else x
}
|
# format_coxme_pre
# Extract parameters and tranform to tidy format
# then write as csv
library(broom)
library(ehahelper)
library(tidyverse)
load(".cache/coxme_pre_match.rda")
c_t <- tidy(coxme_pre_match, exp = TRUE)
c_g <- glance(coxme_pre_match) %>%
gather(term, estimate)
c_g$estimate[c_g$term == "random_sd_mid"] <- exp(c_g$estimate[c_g$term == "random_sd_mid"])
c_out <- c_t %>%
bind_rows(c_g[c(1,2,4,5,6,9,10), ]) %>%
dplyr::select(term, estimate, std.error, p.value) %>%
mutate_if(is.numeric, round, 3)
write_csv(c_out, path = "data/coxme_pre_match.csv")
|
/scripts/format_coxme_pre.R
|
no_license
|
junkka/hlcs2018
|
R
| false
| false
| 580
|
r
|
# format_coxme_pre
# Extract parameters and tranform to tidy format
# then write as csv
library(broom)
library(ehahelper)
library(tidyverse)
load(".cache/coxme_pre_match.rda")
c_t <- tidy(coxme_pre_match, exp = TRUE)
c_g <- glance(coxme_pre_match) %>%
gather(term, estimate)
c_g$estimate[c_g$term == "random_sd_mid"] <- exp(c_g$estimate[c_g$term == "random_sd_mid"])
c_out <- c_t %>%
bind_rows(c_g[c(1,2,4,5,6,9,10), ]) %>%
dplyr::select(term, estimate, std.error, p.value) %>%
mutate_if(is.numeric, round, 3)
write_csv(c_out, path = "data/coxme_pre_match.csv")
|
select_all_score2zo <- function(data1, agg_method1) {
print(paste0("Recode select all values to 1/0"))
### First we provide attribute label to variable name
#data.label <- as.data.frame(names(data))
#data<-as.data.frame(data,stringsAsFactors=FALSE,check.names=FALSE)
data_names<-names(data1)
#-select all the field headers for select one
agg_m_sall<-filter(agg_method1,aggmethod=="SEL_ALL")
#--loop through all the rows or take all value
agg_m_sall_headers<-distinct(as.data.frame(agg_m_sall[,"gname"]))
data_rec<-as.data.frame(data1) # dont see any reason to do it
for(i in 1:nrow(agg_m_sall_headers)){
i_headername<-agg_m_sall_headers[i,1]
#column index from the data
col_ind<-which(str_detect(data_names, paste0(i_headername,"/")) %in% TRUE)
#Replace only if header is found in the main data table
if (length(col_ind)>0){
#loop through each index
for (i_lt in col_ind){
#i_lt=2
d_i_lt<-conv_num(data_rec[,i_lt])
data_rec[,i_lt]<-ifelse(d_i_lt>1,1,data_rec[,i_lt])
}
}
}#finish recoding of select one ORDINAL
return(data_rec)
}
NULL
select_upto_n_score2zo <- function(data1, agg_method1) {
print(paste0("Recode select top 3/top 4 values to 1/0"))
### First we provide attribute label to variable name
#data.label <- as.data.frame(names(data))
#data<-as.data.frame(data,stringsAsFactors=FALSE,check.names=FALSE)
data_names<-names(data1)
#-select all the field headers for select one
agg_m3<-filter(agg_method1,aggmethod=="SEL_3" | aggmethod=="SEL_4")
#--loop through all the rows or take all value
agg_m3_headers<-distinct(as.data.frame(agg_m3[,c("gname","aggmethod")]))
data_rec<-as.data.frame(data1) # dont see any reason to do it
for(i in 1:nrow(agg_m3_headers)){
i_headername<-agg_m3_headers[i,1]
i_type<-agg_m3_headers[i,2]
#column index from the data
col_ind<-which(str_detect(data_names, paste0(i_headername,"/")) %in% TRUE)
#Replace only if header is found in the main data table
if (length(col_ind)>0){
#loop through each index
list_rnk<-data_rec[,col_ind]
for (i_list in 1:ncol(list_rnk)){
list_rnk[,i_list]<-as.numeric(as.character(list_rnk[,i_list]))
}
rank3<-t(apply(list_rnk,1,function(x) rank(-x,na.last="keep", ties.method = "min")))
rank3<-as.data.frame(rank3)
#Zero removed - ZERO in the main table is substituted with maximum rank value
for(ir in 1:ncol(rank3)){
rank3[,ir]<-ifelse(list_rnk[,ir]==0,ncol(rank3),rank3[,ir])
}
#Now select based on SEL_3 or SEL_4
if(i_type=="SEL_4"){
for(ir in 1:ncol(rank3)){rank3[,ir]<- rank3[,ir]<=4}
}else{
for(ir in 1:ncol(rank3)){rank3[,ir]<- rank3[,ir]<=3}
}
#change true false to 1/0
for (ir in 1:ncol(rank3)){
rank3[,ir]<-ifelse(rank3[,ir]=="True"|rank3[,ir]=="TRUE",1,ifelse(rank3[,ir]=="False"|rank3[,ir]=="FALSE",0,rank3[,ir]))
}
#Replace values in the main table
data_rec[,col_ind]<-rank3
# count<-0
# for (i_lt in col_ind){
# count<-count+1
# data_rec[,i_lt]<-rank3[,count]
# }
}
}#finish recoding of select one ORDINAL
return(data_rec)
}
NULL
select_rank_score2rank <- function(data1, agg_method1) {
print(paste0("Recode select rank score to actual ranking"))
### First we provide attribute label to variable name
#data.label <- as.data.frame(names(data))
#data<-as.data.frame(data,stringsAsFactors=FALSE,check.names=FALSE)
data_names<-names(data1)
#-select all the field headers for select one
agg_rank<-filter(agg_method1,aggmethod=="RANK1"|aggmethod=="RANK3" | aggmethod=="RANK4")
#--loop through all the rows or take all value
agg_rank_headers<-distinct(as.data.frame(agg_rank[,c("qrankgroup","aggmethod")]))
data_rec<-as.data.frame(data1) # dont see any reason to do it
for(i in 1:nrow(agg_rank_headers)){
i_headername<-agg_rank_headers[i,1]
i_type<-agg_rank_headers[i,2]
#lookup table
lookup_table<-filter(agg_rank,qrankgroup==i_headername)
#column index from the data
col_ind<-which(str_detect(data_names, paste0(i_headername,"/","RANK")) %in% TRUE)
#Replace only if header is found in the main data table
if (length(col_ind)>0){
#loop through each index
list_rnk<-data_rec[,col_ind]
for (i_list in 1:ncol(list_rnk)){
list_rnk[,i_list]<-as.numeric(as.character(list_rnk[,i_list]))
}
rank3<-t(apply(list_rnk,1,function(x) rank(-x,na.last="keep", ties.method = "min")))
rank3<-as.data.frame(rank3)
#Zero removed - ZERO in the main table is substituted with maximum rank value
for(ir in 1:ncol(rank3)){
rank3[,ir]<-ifelse(list_rnk[,ir]==0,ncol(rank3),rank3[,ir])
}
#Now select based on SEL_3 or SEL_4
if(i_type=="RANK4"){
for(ir in 1:ncol(rank3)){
rank3[,ir]<-ifelse(rank3[,ir]>4,NA,rank3[,ir])
}
}else if (i_type=="RANK1"){
for(ir in 1:ncol(rank3)){
rank3[,ir]<-ifelse(rank3[,ir]>1,NA,rank3[,ir])
}
}else{
for(ir in 1:ncol(rank3)){
rank3[,ir]<-ifelse(rank3[,ir]>3,NA,rank3[,ir])
}
}
#Replace values in the main table
data_rec[,col_ind]<-rank3
#time to extract the concatenated actual text response
txt_list_rank<-rank3
txt_list_rank1<-concat_multiresponse(txt_list_rank,1) #first rank
txt_list_rank2<-concat_multiresponse(txt_list_rank,2) #second rank
txt_list_rank3<-concat_multiresponse(txt_list_rank,3) #third rank
txt_list_rank4<-concat_multiresponse(txt_list_rank,4) #fourth rank
#Replace '_' by '/' this is an original replacement
txt_list_rank1<-gsub("_","/",txt_list_rank1)
txt_list_rank2<-gsub("_","/",txt_list_rank2)
txt_list_rank3<-gsub("_","/",txt_list_rank3)
txt_list_rank4<-gsub("_","/",txt_list_rank4)
#now find out replacement column in the main database
for (ir in 1:nrow(lookup_table)){
rank_gname<-lookup_table$gname[ir]
rank_score<-as.numeric(lookup_table$qrankscore[ir])
rank_level<-nrow(lookup_table)-rank_score+1
if(rank_level==1){data_rec[,which(data_names==rank_gname)]<-txt_list_rank1}
if(rank_level==2){data_rec[,which(data_names==rank_gname)]<-txt_list_rank2}
if(rank_level==3){data_rec[,which(data_names==rank_gname)]<-txt_list_rank3}
if(rank_level==4){data_rec[,which(data_names==rank_gname)]<-txt_list_rank4}
}#done replacement in the main column
}
}#finish recoding of select one ORDINAL
return(data_rec)
}
NULL
#concatenate multiple rank response texts
concat_multiresponse<-function(db_rnk,rnk){
txt_list_rank<-db_rnk
for(ir in 1:ncol(db_rnk)){
txt_list_rank[,ir]<-ifelse(db_rnk[,ir]==rnk,split_heading_get_varname(names(db_rnk),ir,"/"),NA)
txt_list_rank<-as.data.frame(txt_list_rank)
}
#txt_list_rank1$result<-apply(txt_list_rank1,1, function(x) toString(na.omit(x)))
concat_result<-apply(txt_list_rank,1, function(x) {paste(x[which(!is.na(x))],collapse="; ")})
return(concat_result)
}
NULL
#split column header and get the last one
split_heading_get_varname<-function(headername,ind,sep){
txt_split<-str_split(headername[ind],sep)
txt_len<-length(txt_split[[1]])
txt_val<-txt_split[[1]][txt_len]
return(txt_val)
}
NULL
|
/R/r_func_ps_select_multiple_score2zo.R
|
no_license
|
achalak/kobohrtoolbox
|
R
| false
| false
| 7,609
|
r
|
select_all_score2zo <- function(data1, agg_method1) {
print(paste0("Recode select all values to 1/0"))
### First we provide attribute label to variable name
#data.label <- as.data.frame(names(data))
#data<-as.data.frame(data,stringsAsFactors=FALSE,check.names=FALSE)
data_names<-names(data1)
#-select all the field headers for select one
agg_m_sall<-filter(agg_method1,aggmethod=="SEL_ALL")
#--loop through all the rows or take all value
agg_m_sall_headers<-distinct(as.data.frame(agg_m_sall[,"gname"]))
data_rec<-as.data.frame(data1) # dont see any reason to do it
for(i in 1:nrow(agg_m_sall_headers)){
i_headername<-agg_m_sall_headers[i,1]
#column index from the data
col_ind<-which(str_detect(data_names, paste0(i_headername,"/")) %in% TRUE)
#Replace only if header is found in the main data table
if (length(col_ind)>0){
#loop through each index
for (i_lt in col_ind){
#i_lt=2
d_i_lt<-conv_num(data_rec[,i_lt])
data_rec[,i_lt]<-ifelse(d_i_lt>1,1,data_rec[,i_lt])
}
}
}#finish recoding of select one ORDINAL
return(data_rec)
}
NULL
select_upto_n_score2zo <- function(data1, agg_method1) {
print(paste0("Recode select top 3/top 4 values to 1/0"))
### First we provide attribute label to variable name
#data.label <- as.data.frame(names(data))
#data<-as.data.frame(data,stringsAsFactors=FALSE,check.names=FALSE)
data_names<-names(data1)
#-select all the field headers for select one
agg_m3<-filter(agg_method1,aggmethod=="SEL_3" | aggmethod=="SEL_4")
#--loop through all the rows or take all value
agg_m3_headers<-distinct(as.data.frame(agg_m3[,c("gname","aggmethod")]))
data_rec<-as.data.frame(data1) # dont see any reason to do it
for(i in 1:nrow(agg_m3_headers)){
i_headername<-agg_m3_headers[i,1]
i_type<-agg_m3_headers[i,2]
#column index from the data
col_ind<-which(str_detect(data_names, paste0(i_headername,"/")) %in% TRUE)
#Replace only if header is found in the main data table
if (length(col_ind)>0){
#loop through each index
list_rnk<-data_rec[,col_ind]
for (i_list in 1:ncol(list_rnk)){
list_rnk[,i_list]<-as.numeric(as.character(list_rnk[,i_list]))
}
rank3<-t(apply(list_rnk,1,function(x) rank(-x,na.last="keep", ties.method = "min")))
rank3<-as.data.frame(rank3)
#Zero removed - ZERO in the main table is substituted with maximum rank value
for(ir in 1:ncol(rank3)){
rank3[,ir]<-ifelse(list_rnk[,ir]==0,ncol(rank3),rank3[,ir])
}
#Now select based on SEL_3 or SEL_4
if(i_type=="SEL_4"){
for(ir in 1:ncol(rank3)){rank3[,ir]<- rank3[,ir]<=4}
}else{
for(ir in 1:ncol(rank3)){rank3[,ir]<- rank3[,ir]<=3}
}
#change true false to 1/0
for (ir in 1:ncol(rank3)){
rank3[,ir]<-ifelse(rank3[,ir]=="True"|rank3[,ir]=="TRUE",1,ifelse(rank3[,ir]=="False"|rank3[,ir]=="FALSE",0,rank3[,ir]))
}
#Replace values in the main table
data_rec[,col_ind]<-rank3
# count<-0
# for (i_lt in col_ind){
# count<-count+1
# data_rec[,i_lt]<-rank3[,count]
# }
}
}#finish recoding of select one ORDINAL
return(data_rec)
}
NULL
select_rank_score2rank <- function(data1, agg_method1) {
print(paste0("Recode select rank score to actual ranking"))
### First we provide attribute label to variable name
#data.label <- as.data.frame(names(data))
#data<-as.data.frame(data,stringsAsFactors=FALSE,check.names=FALSE)
data_names<-names(data1)
#-select all the field headers for select one
agg_rank<-filter(agg_method1,aggmethod=="RANK1"|aggmethod=="RANK3" | aggmethod=="RANK4")
#--loop through all the rows or take all value
agg_rank_headers<-distinct(as.data.frame(agg_rank[,c("qrankgroup","aggmethod")]))
data_rec<-as.data.frame(data1) # dont see any reason to do it
for(i in 1:nrow(agg_rank_headers)){
i_headername<-agg_rank_headers[i,1]
i_type<-agg_rank_headers[i,2]
#lookup table
lookup_table<-filter(agg_rank,qrankgroup==i_headername)
#column index from the data
col_ind<-which(str_detect(data_names, paste0(i_headername,"/","RANK")) %in% TRUE)
#Replace only if header is found in the main data table
if (length(col_ind)>0){
#loop through each index
list_rnk<-data_rec[,col_ind]
for (i_list in 1:ncol(list_rnk)){
list_rnk[,i_list]<-as.numeric(as.character(list_rnk[,i_list]))
}
rank3<-t(apply(list_rnk,1,function(x) rank(-x,na.last="keep", ties.method = "min")))
rank3<-as.data.frame(rank3)
#Zero removed - ZERO in the main table is substituted with maximum rank value
for(ir in 1:ncol(rank3)){
rank3[,ir]<-ifelse(list_rnk[,ir]==0,ncol(rank3),rank3[,ir])
}
#Now select based on SEL_3 or SEL_4
if(i_type=="RANK4"){
for(ir in 1:ncol(rank3)){
rank3[,ir]<-ifelse(rank3[,ir]>4,NA,rank3[,ir])
}
}else if (i_type=="RANK1"){
for(ir in 1:ncol(rank3)){
rank3[,ir]<-ifelse(rank3[,ir]>1,NA,rank3[,ir])
}
}else{
for(ir in 1:ncol(rank3)){
rank3[,ir]<-ifelse(rank3[,ir]>3,NA,rank3[,ir])
}
}
#Replace values in the main table
data_rec[,col_ind]<-rank3
#time to extract the concatenated actual text response
txt_list_rank<-rank3
txt_list_rank1<-concat_multiresponse(txt_list_rank,1) #first rank
txt_list_rank2<-concat_multiresponse(txt_list_rank,2) #second rank
txt_list_rank3<-concat_multiresponse(txt_list_rank,3) #third rank
txt_list_rank4<-concat_multiresponse(txt_list_rank,4) #fourth rank
#Replace '_' by '/' this is an original replacement
txt_list_rank1<-gsub("_","/",txt_list_rank1)
txt_list_rank2<-gsub("_","/",txt_list_rank2)
txt_list_rank3<-gsub("_","/",txt_list_rank3)
txt_list_rank4<-gsub("_","/",txt_list_rank4)
#now find out replacement column in the main database
for (ir in 1:nrow(lookup_table)){
rank_gname<-lookup_table$gname[ir]
rank_score<-as.numeric(lookup_table$qrankscore[ir])
rank_level<-nrow(lookup_table)-rank_score+1
if(rank_level==1){data_rec[,which(data_names==rank_gname)]<-txt_list_rank1}
if(rank_level==2){data_rec[,which(data_names==rank_gname)]<-txt_list_rank2}
if(rank_level==3){data_rec[,which(data_names==rank_gname)]<-txt_list_rank3}
if(rank_level==4){data_rec[,which(data_names==rank_gname)]<-txt_list_rank4}
}#done replacement in the main column
}
}#finish recoding of select one ORDINAL
return(data_rec)
}
NULL
#concatenate multiple rank response texts
concat_multiresponse<-function(db_rnk,rnk){
txt_list_rank<-db_rnk
for(ir in 1:ncol(db_rnk)){
txt_list_rank[,ir]<-ifelse(db_rnk[,ir]==rnk,split_heading_get_varname(names(db_rnk),ir,"/"),NA)
txt_list_rank<-as.data.frame(txt_list_rank)
}
#txt_list_rank1$result<-apply(txt_list_rank1,1, function(x) toString(na.omit(x)))
concat_result<-apply(txt_list_rank,1, function(x) {paste(x[which(!is.na(x))],collapse="; ")})
return(concat_result)
}
NULL
#split column header and get the last one
split_heading_get_varname<-function(headername,ind,sep){
txt_split<-str_split(headername[ind],sep)
txt_len<-length(txt_split[[1]])
txt_val<-txt_split[[1]][txt_len]
return(txt_val)
}
NULL
|
library(robustfa)
### Name: FaClassic-class
### Title: Class '"FaClassic"'
### Aliases: FaClassic-class
### Keywords: classes
### ** Examples
showClass("FaClassic")
|
/data/genthat_extracted_code/robustfa/examples/FaClassic-class.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 172
|
r
|
library(robustfa)
### Name: FaClassic-class
### Title: Class '"FaClassic"'
### Aliases: FaClassic-class
### Keywords: classes
### ** Examples
showClass("FaClassic")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/comorbidity_map_icd9.R
\docType{data}
\name{comorbidity_map_icd9}
\alias{comorbidity_map_icd9}
\title{Comorbidity mapping using ICD-9-CM}
\format{A named list with a character vector of ICD-9-CM codes which
correspond to the comorbidities.}
\usage{
comorbidity_map_icd9
}
\description{
A list containing the ICD-9-CM codes for comorbidities included in APAPCHE
II, APAPCHE III, and SAPS II.
}
\keyword{datasets}
|
/man/comorbidity_map_icd9.Rd
|
no_license
|
bgulbis/icuriskr
|
R
| false
| true
| 493
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/comorbidity_map_icd9.R
\docType{data}
\name{comorbidity_map_icd9}
\alias{comorbidity_map_icd9}
\title{Comorbidity mapping using ICD-9-CM}
\format{A named list with a character vector of ICD-9-CM codes which
correspond to the comorbidities.}
\usage{
comorbidity_map_icd9
}
\description{
A list containing the ICD-9-CM codes for comorbidities included in APAPCHE
II, APAPCHE III, and SAPS II.
}
\keyword{datasets}
|
test_that("Check that a file is in tabular format", {
### read data
path <- system.file("extdata", "eduAttainOkbay.txt", package = "MungeSumstats")
sumstats_dt <- data.table::fread(path, nThread = 1)
### Test tsv
header <- read_header(path = path)
is_tabular_tsv <- check_tabular(header = header)
### Test csv
path_csv <- tempfile()
data.table::fwrite(sumstats_dt, path_csv, sep = ",")
header <- read_header(path = path_csv)
is_tabular_csv <- check_tabular(header = header)
#### Test space-separated
path_space <- tempfile()
data.table::fwrite(sumstats_dt, path_space, sep = " ")
header <- read_header(path = path_space)
is_tabular_space <- check_tabular(header = header)
expect_equal(all.equal(is_tabular_tsv, is_tabular_csv, is_tabular_space), TRUE)
})
|
/tests/testthat/test-check_tabular.R
|
no_license
|
daklab/MungeSumstats
|
R
| false
| false
| 823
|
r
|
test_that("Check that a file is in tabular format", {
### read data
path <- system.file("extdata", "eduAttainOkbay.txt", package = "MungeSumstats")
sumstats_dt <- data.table::fread(path, nThread = 1)
### Test tsv
header <- read_header(path = path)
is_tabular_tsv <- check_tabular(header = header)
### Test csv
path_csv <- tempfile()
data.table::fwrite(sumstats_dt, path_csv, sep = ",")
header <- read_header(path = path_csv)
is_tabular_csv <- check_tabular(header = header)
#### Test space-separated
path_space <- tempfile()
data.table::fwrite(sumstats_dt, path_space, sep = " ")
header <- read_header(path = path_space)
is_tabular_space <- check_tabular(header = header)
expect_equal(all.equal(is_tabular_tsv, is_tabular_csv, is_tabular_space), TRUE)
})
|
accRun <- function(featureData,
Model = "RF",
codeTest = TRUE,
Parallel = TRUE,
K = 10,
SAVE = TRUE,
Dummies = FALSE,
Cores = 4,
printSummary = FALSE,
classMax = 500,
Kernel = "linear"){
library(caret)
library(randomForest)
library(glmnet)
library(e1071)
###### remove NA's from the data
# browser()
featureData <- featureData[complete.cases(featureData),]
##### feature imputation for missing values
# mean imputation, nope no missing values! yay!
if(printSummary){
cat("##### raw data summary:\n")
print(table(featureData$EventIds))
}
##### remove class other
featureData <- featureData[featureData$EventIds!="Other",]
# browser()
featureData <- featureData[!is.na(featureData$EventIds),]
featureData <- featureData[featureData$EventIds != "NA",]
##### remove low length behaviours
featureData <- featureData[featureData$nRows > 5,]
if(Dummies){
##### add in the seal specific features
sealCharacteristic <- read.csv("I:/Juvenile fur seal energetics/animal_details2.csv",
stringsAsFactors=FALSE)
featureData <- merge(featureData, sealCharacteristic[,1:5], by = "SealName")
featureData$Place <- as.numeric(as.factor(featureData$Place))
featureData$Gender <- as.numeric(as.factor(featureData$Gender))
featureData$species <- as.numeric(as.factor(featureData$species))
#featureData$harsness <- factor(featureData$harsness)
}
else{
featureData$Place <- as.numeric(as.factor(featureData$Place))
}
##### Down sample the large classes
set.seed(123, "L'Ecuyer")
featureData$EventIds <- as.character(featureData$EventIds)
uEventIds <- unique(featureData$EventIds)
if(codeTest){
classMax <- 200
K <- 2
}else{
classMax <- classMax
}
sampledData <- NULL
for(i in 1:length(uEventIds)){
tempData <- featureData[featureData$EventIds==uEventIds[i],]
nr <- nrow(tempData)
if(nr>classMax){
sampleIdx <- sample.int(n = nr, size = classMax)
tempData <- tempData[sampleIdx,]
}
sampledData <- rbind(sampledData, tempData)
}
if(printSummary){
cat("##### full training and testing data summary:\n")
print(table(sampledData$EventIds))
}
##### remove the indetifier variables
trainData <- sampledData[, !(names(sampledData) %in% c("FileDate", "SealName", "nRows", "X"))]
if(codeTest){
###### use this for testing for fast model training
trainTestIdx <- createDataPartition(y=trainData$EventIds, times=1, p = 0.2)
}else{
##### full data training and testing
trainTestIdx <- createDataPartition(y=trainData$EventIds, times=1, p = 0.7)
}
trainDataSplit=trainData[trainTestIdx$Resample1, ]
testDataSplit=trainData[-trainTestIdx$Resample1, ]
if(printSummary){
cat("##### training split data summary:\n")
print(table(trainDataSplit$EventIds))
cat("##### testing split data summary:\n")
print(table(testDataSplit$EventIds))
}
##### creaete a function to evaluate the final fittness
fittnessFun <- function(obs,pred){
ACC <- sum(obs==pred)/length(pred)
}
switch(Model,
"XGB" = {
#source("parallelCVxgb.r")
if(codeTest){
K = K
paramList = expand.grid(eta = 10 ^ - seq(from = 1, to = 2, by = 1),
max.depth = 5,
nrounds = 100,
subsample = c(0.5),
colsample_bytree = 1,
# lambda = seq(from = 0.5, to = 1, by = 0.5),
# alpha = seq(from = 0.5, to = 1, by = 0.5),
max_delta_step = 0) #don't think this param is doing anything leave at default
}
else{
# paramList <- NULL
paramList = expand.grid(eta = 10 ^ - seq(from = 2, to = 4, by = 1),
max.depth = 1:5,
nrounds = 5000,
subsample = c(0.7, 0.8),
colsample_bytree = 1,
# lambda = seq(from = 0, to = 1, by = 0.2),
# alpha = seq(from = 0, to = 1, by = 0.2),
max_delta_step = 0) #don't think this param is doing anything leave at default
}
outputData <- parallelCVxgb(inputData = trainDataSplit,
k = K,
trainTargets = "EventIds",
paramList = paramList,
#Model= "XGB",
testDataSplit = testDataSplit)
# browser()
finalPreds <- outputData$testDataSplit$predsTest
finalModel <- outputData$trainData$predsTrain
fittnessCV <- outputData
},
"rPart" = {
##### model for plotting only
require("rpart")
require("ggplot2")
fittnessCV = NULL
finalModel <- rpart(EventIds ~., method="class", data=trainDataSplit)
finalPreds <- predict(finalModel,
newdata=testDataSplit[, !names(testDataSplit) == "EventIds"],
type="class")
prp(finalModel, extra=0,
box.col=c(5:8)[finalModel$frame$yval])
},
"RF" = {
source("r/models/parallelCV6.R")
if(codeTest){
paramList <- expand.grid(mtry = seq(from = 1, to = 2, by = 1),
ntree = seq(from = 1000, to = 1200, by = 100),
nodesize = seq(from = 1, to = 2, by = 1))#,
}
else{
paramList <- expand.grid(mtry = seq(from = 1, to = 12, by = 1),
ntree = seq(from = 1000, to = 2400, by = 200),
nodesize = seq(from = 1, to = 10, by = 1))
}
fittnessCV <- parallelCV(trainDataSplit,
k = K,
paramList = paramList,
Model="RF",
Parallel = Parallel,
Cores = Cores,
printSummary = printSummary)
finalModel <- do.call("randomForest",
c(list(x = trainDataSplit[, !names(trainDataSplit) == "EventIds"],
y = factor(trainDataSplit$EventIds)),
fittnessCV$bestParams,
importance = TRUE))
# print(finalModel$confusion)
if(printSummary){
plot(finalModel)
}
finalPreds <- predict(finalModel,
newdata=testDataSplit[, !names(testDataSplit) == "EventIds"],
type="response")
finalFittness <- fittnessFun(obs = testDataSplit$EventIds,
pred = finalPreds)
if(printSummary){
varImpPlot(finalModel)
}
},
"SVM" = {
source("parallelCVSvm.r")
if(codeTest){
# degree
# parameter needed for kernel of type polynomial (default: 3)
#
# gamma
# parameter needed for all kernels except linear (default: 1/(data dimension))
#
# coef0
# parameter needed for kernels of type polynomial and sigmoid (default: 0)
#
# cost
# cost of constraints violation (default: 1)—it is the ‘C’-constant of the regularization term in the Lagrange formulation.
switch(Kernel,
"linear" = {
paramList <- data.frame(cost = 10 ^ seq(from = -5, to = 0, by = 5))
},
"polynomial" = {
paramList <- expand.grid(degree = seq(from = 0, to = 2, by = 1),
gamma = 10 ^ seq(from = -5, to = 0, by = 5),
coef0 = seq(from = 0, to = 1, by = 1),
cost = 10 ^ seq(from = -5, to = 0, by = 5))
},
"radial" = {
paramList <- expand.grid(gamma = 10 ^ seq(from = -5, to = 0, by = 5),
cost = 10 ^ seq(from = -5, to = 0, by = 5))
},
"sigmoid" = {
paramList <- expand.grid(gamma = 10 ^ seq(from = -5, to = 0, by = 5),
coef0 = seq(from = 0, to = 1, by = 1),
cost = 10 ^ seq(from = -5, to = 0, by = 5))
}
)
}else{
switch(Kernel,
"linear" = {
paramList <- data.frame(cost = 10 ^ seq(from = -5, to = 5, by = 1))
},
"polynomial" = {
paramList <- expand.grid(degree = seq(from = 0, to = 5, by = 1),
gamma = 10 ^ seq(from = -4, to = 5, by = 2),
coef0 = seq(from = 0, to = 5, by = 1),
cost = 10 ^ seq(from = -4, to = 5, by = 2))
},
"radial" = {
paramList <- expand.grid(gamma = 10 ^ seq(from = -5, to = 5, by = 1),
cost = 10 ^ seq(from = -5, to = 5, by = 1))
},
"sigmoid" = {
paramList <- expand.grid(gamma = 10 ^ seq(from = -4, to = 5, by = 2),
coef0 = seq(from = 0, to = 5, by = 1),
cost = 10 ^ seq(from = -4, to = 5, by = 2)) }
)
}
fittnessCV <- parallelCVSvm(trainDataSplit,
k = K,
paramList = paramList,
Model="SVM",
Parallel = Parallel,
Cores = Cores,
Dummies = Dummies,
Kernel = Kernel)
xx <- trainDataSplit[, !names(trainDataSplit) == "EventIds"]
if(Dummies){
xx <- as.matrix(cbind(xx[, !(names(xx) %in% c("Place", "Gender", "species", "harsness"))],
model.matrix(~Place + Gender + species + harsness-1, data=xx)))
}else
{
xx <- as.matrix(xx)
}
finalModel <- do.call("svm",
c(list(x = xx,
y = factor(trainDataSplit$EventIds)),
fittnessCV$bestParams,
kernel = Kernel,
type = "C-classification"))
xxTest <- testDataSplit[, !names(testDataSplit) == "EventIds"]
if(Dummies){
xxTest <- as.matrix(cbind(xxTest[, !(names(xxTest) %in% c("Place", "Gender", "species", "harsness"))],
model.matrix(~Place + Gender + species + harsness-1, data=xxTest)))
}else
{
xxTest <- as.matrix(xxTest)
}
finalPreds <- predict(finalModel,
newdata=xxTest,
type="response")
finalFittness <- fittnessFun(obs = testDataSplit$EventIds,
pred = finalPreds)
},
"RLR"={
source("parallelCvRlr.r")
if(codeTest){
paramList = list(param1=seq(from = 0, to = 1, by = 0.5),
param2=10^seq(from = 0, to = -3, length.out = 5))
}
else{
paramList = list(param1=seq(from = 0, to = 1, by = 0.01),
param2=10^seq(from = 0, to = -3, length.out = 50))
}
##### train the model to find the best parameters
fittnessCV <- parallelCvRlr(trainDataSplit,
k = K,
paramList = paramList,
Model="RLR",
Parallel = FALSE,
Cores = Cores,
Dummies = Dummies,
printSummary = printSummary)
xx <- trainDataSplit[, !names(trainDataSplit) == "EventIds"]
if(Dummies){
xx <- as.matrix(cbind(xx[, !(names(xx) %in% c("Place", "Gender", "species", "harsness"))],
model.matrix(~Place + Gender + species + harsness -1, data=xx)))
}else
{
xx <- as.matrix(xx)
}
##### re-train the overall model
finalModel <- glmnet(x=xx,
y=factor(trainDataSplit$EventIds),
family = "multinomial",
alpha = fittnessCV$bestParams[1],
lambda = paramList$param2)
# print(finalModel)
plot(finalModel)
xxTest <- testDataSplit[, !names(testDataSplit) == "EventIds"]
if(Dummies){
xxTest <- as.matrix(cbind(xxTest[, !(names(xxTest) %in% c("Place", "Gender", "species", "harsness"))],
model.matrix(~Place + Gender + species + harsness -1, data=xxTest)))
}else
{
xxTest <- as.matrix(xxTest)
}
finalPreds <- predict(finalModel,
newx=xxTest,
type="class",
s = fittnessCV$bestParams[2])
finalFittness <- fittnessFun(obs = testDataSplit$EventIds,
pred = finalPreds)
coef(finalModel, s=fittnessCV$bestParams[2])
},
"GBM" = {
source("parallelCVGBM.r")
if(codeTest){
paramList <- expand.grid(n.trees = seq(from = 30, to = 45, by = 5),
interaction.depth = seq(from = 1, to = 3, by = 2),
shrinkage = 10 ^ seq(from = -1, to = -2, by = -1),
n.minobsinnode = seq(from = 7, to = 11, by = 2))
}
else{
paramList <- expand.grid(interaction.depth = seq(from = 1, to = 6, by = 2),
n.trees = seq(from = 800, to = 2400, by = 100),
shrinkage = 10 ^ seq(from = -1, to = -3, by = -1),
n.minobsinnode = seq(from = 1, to = 17, by = 2))
# paramList <- expand.grid(interaction.depth = seq(from = 1, to = 17, by = 2),
# n.trees = seq(from = 1200, to = 2400, by = 200),
# shrinkage = 10 ^ seq(from = -1, to = -3, by = -1),
# n.minobsinnode = seq(from = 1, to = 17, by = 2))
}
##### train the model
fittnessCV <- parallelCV(trainDataSplit,
k = K,
paramList = paramList,
Model="GBM",
Parallel = Parallel,
Cores = Cores,
printSummary = printSummary)
if(Dummies){
trainDataSplit$Place <- factor(trainDataSplit$Place)
trainDataSplit$Gender <- factor(trainDataSplit$Gender)
trainDataSplit$species <- factor(trainDataSplit$species)
trainDataSplit$harsness <- factor(trainDataSplit$harsness)
testDataSplit$Place <- factor(testDataSplit$Place)
testDataSplit$Gender <- factor(testDataSplit$Gender)
testDataSplit$species <- factor(testDataSplit$species)
testDataSplit$harsness <- factor(testDataSplit$harsness)
}
##### train the final overall model at the best parameters
finalModel <- do.call("gbm.fit",
c(list(x = trainDataSplit[, !names(trainDataSplit) == "EventIds"],
y = factor(trainDataSplit$EventIds)),
fittnessCV$bestParams,
distribution = "multinomial",
verbose= FALSE))
finalPreds <- predict(finalModel,
newdata = testDataSplit[, !names(testDataSplit) == "EventIds"],
n.trees = fittnessCV$bestParams$n.trees,
type = "response")[,,1]
behaviourNames <- colnames(finalPreds)
finalPreds <- sapply(max.col(finalPreds), function(ii){behaviourNames[ii]})
})
confusionMat <- confusionMatrix(data = finalPreds, reference = testDataSplit$EventIds)
if(printSummary){
print(confusionMat)
cat("Sensitivity == Recall, PPV == Precision\n")
}
fScore<- 2 * confusionMat$byClass[, "Sensitivity"] * confusionMat$byClass[, "Pos Pred Value"] /
(confusionMat$byClass[, "Sensitivity"] + confusionMat$byClass[, "Pos Pred Value"])
if(printSummary){
cat("####f Score:\n")
print(data.frame(fScore))
# cat("Out of sample accuracy:",
# sum(testDataSplit$EventIds == finalPreds)/length(finalPreds), "\n")
}
outputData <- list(finalModel = finalModel,
confusionMat = confusionMat,
#fittnessCV = fittnessCV$meanfittnessCV,
finalPreds = finalPreds,
trainDataSplit = trainDataSplit,
testDataSplit = testDataSplit,
trainTestIdx = trainTestIdx,
bestParams = fittnessCV$bestParams,
fScore = fScore,
Dummies = Dummies,
Model = Model)
if(Model == "SVM"){
outputData$kernel <- Kernel
}
if(SAVE){
if(Model == "SVM"){
fName <- paste0(Model, " Model with ",Kernel," kernel, ", K, "-fold CV, on ", Sys.Date(),".RData")
}else{
fName <- paste0(Model, " Model, ","Hz = ", Hz,", epochs = ", epochs,".RData")
}
cat("##### saving", fName, "\n")
#mainDir <- getwd()
if(codeTest){
subDir <- "I:/Juvenile fur seal energetics/output/outputDirectory/test/"
}
else{
subDir <- "I:/Juvenile fur seal energetics/output/outputDirectory/"
}
#if (file.exists(subDir)){
# setwd(file.path(mainDir, subDir))
#} else {
#dir.create(file.path(mainDir, subDir))
# setwd(file.path(mainDir, subDir))
#}
save(outputData, file=paste0(subDir,fName))
#setwd(mainDir)
}
return(outputData)
}
|
/r/1Hz_models/accRun.r
|
no_license
|
MoniqueLadds/Time-energy_budgets_from_accelerometers
|
R
| false
| false
| 20,887
|
r
|
accRun <- function(featureData,
Model = "RF",
codeTest = TRUE,
Parallel = TRUE,
K = 10,
SAVE = TRUE,
Dummies = FALSE,
Cores = 4,
printSummary = FALSE,
classMax = 500,
Kernel = "linear"){
library(caret)
library(randomForest)
library(glmnet)
library(e1071)
###### remove NA's from the data
# browser()
featureData <- featureData[complete.cases(featureData),]
##### feature imputation for missing values
# mean imputation, nope no missing values! yay!
if(printSummary){
cat("##### raw data summary:\n")
print(table(featureData$EventIds))
}
##### remove class other
featureData <- featureData[featureData$EventIds!="Other",]
# browser()
featureData <- featureData[!is.na(featureData$EventIds),]
featureData <- featureData[featureData$EventIds != "NA",]
##### remove low length behaviours
featureData <- featureData[featureData$nRows > 5,]
if(Dummies){
##### add in the seal specific features
sealCharacteristic <- read.csv("I:/Juvenile fur seal energetics/animal_details2.csv",
stringsAsFactors=FALSE)
featureData <- merge(featureData, sealCharacteristic[,1:5], by = "SealName")
featureData$Place <- as.numeric(as.factor(featureData$Place))
featureData$Gender <- as.numeric(as.factor(featureData$Gender))
featureData$species <- as.numeric(as.factor(featureData$species))
#featureData$harsness <- factor(featureData$harsness)
}
else{
featureData$Place <- as.numeric(as.factor(featureData$Place))
}
##### Down sample the large classes
set.seed(123, "L'Ecuyer")
featureData$EventIds <- as.character(featureData$EventIds)
uEventIds <- unique(featureData$EventIds)
if(codeTest){
classMax <- 200
K <- 2
}else{
classMax <- classMax
}
sampledData <- NULL
for(i in 1:length(uEventIds)){
tempData <- featureData[featureData$EventIds==uEventIds[i],]
nr <- nrow(tempData)
if(nr>classMax){
sampleIdx <- sample.int(n = nr, size = classMax)
tempData <- tempData[sampleIdx,]
}
sampledData <- rbind(sampledData, tempData)
}
if(printSummary){
cat("##### full training and testing data summary:\n")
print(table(sampledData$EventIds))
}
##### remove the indetifier variables
trainData <- sampledData[, !(names(sampledData) %in% c("FileDate", "SealName", "nRows", "X"))]
if(codeTest){
###### use this for testing for fast model training
trainTestIdx <- createDataPartition(y=trainData$EventIds, times=1, p = 0.2)
}else{
##### full data training and testing
trainTestIdx <- createDataPartition(y=trainData$EventIds, times=1, p = 0.7)
}
trainDataSplit=trainData[trainTestIdx$Resample1, ]
testDataSplit=trainData[-trainTestIdx$Resample1, ]
if(printSummary){
cat("##### training split data summary:\n")
print(table(trainDataSplit$EventIds))
cat("##### testing split data summary:\n")
print(table(testDataSplit$EventIds))
}
##### creaete a function to evaluate the final fittness
fittnessFun <- function(obs,pred){
ACC <- sum(obs==pred)/length(pred)
}
switch(Model,
"XGB" = {
#source("parallelCVxgb.r")
if(codeTest){
K = K
paramList = expand.grid(eta = 10 ^ - seq(from = 1, to = 2, by = 1),
max.depth = 5,
nrounds = 100,
subsample = c(0.5),
colsample_bytree = 1,
# lambda = seq(from = 0.5, to = 1, by = 0.5),
# alpha = seq(from = 0.5, to = 1, by = 0.5),
max_delta_step = 0) #don't think this param is doing anything leave at default
}
else{
# paramList <- NULL
paramList = expand.grid(eta = 10 ^ - seq(from = 2, to = 4, by = 1),
max.depth = 1:5,
nrounds = 5000,
subsample = c(0.7, 0.8),
colsample_bytree = 1,
# lambda = seq(from = 0, to = 1, by = 0.2),
# alpha = seq(from = 0, to = 1, by = 0.2),
max_delta_step = 0) #don't think this param is doing anything leave at default
}
outputData <- parallelCVxgb(inputData = trainDataSplit,
k = K,
trainTargets = "EventIds",
paramList = paramList,
#Model= "XGB",
testDataSplit = testDataSplit)
# browser()
finalPreds <- outputData$testDataSplit$predsTest
finalModel <- outputData$trainData$predsTrain
fittnessCV <- outputData
},
"rPart" = {
##### model for plotting only
require("rpart")
require("ggplot2")
fittnessCV = NULL
finalModel <- rpart(EventIds ~., method="class", data=trainDataSplit)
finalPreds <- predict(finalModel,
newdata=testDataSplit[, !names(testDataSplit) == "EventIds"],
type="class")
prp(finalModel, extra=0,
box.col=c(5:8)[finalModel$frame$yval])
},
"RF" = {
source("r/models/parallelCV6.R")
if(codeTest){
paramList <- expand.grid(mtry = seq(from = 1, to = 2, by = 1),
ntree = seq(from = 1000, to = 1200, by = 100),
nodesize = seq(from = 1, to = 2, by = 1))#,
}
else{
paramList <- expand.grid(mtry = seq(from = 1, to = 12, by = 1),
ntree = seq(from = 1000, to = 2400, by = 200),
nodesize = seq(from = 1, to = 10, by = 1))
}
fittnessCV <- parallelCV(trainDataSplit,
k = K,
paramList = paramList,
Model="RF",
Parallel = Parallel,
Cores = Cores,
printSummary = printSummary)
finalModel <- do.call("randomForest",
c(list(x = trainDataSplit[, !names(trainDataSplit) == "EventIds"],
y = factor(trainDataSplit$EventIds)),
fittnessCV$bestParams,
importance = TRUE))
# print(finalModel$confusion)
if(printSummary){
plot(finalModel)
}
finalPreds <- predict(finalModel,
newdata=testDataSplit[, !names(testDataSplit) == "EventIds"],
type="response")
finalFittness <- fittnessFun(obs = testDataSplit$EventIds,
pred = finalPreds)
if(printSummary){
varImpPlot(finalModel)
}
},
"SVM" = {
source("parallelCVSvm.r")
if(codeTest){
# degree
# parameter needed for kernel of type polynomial (default: 3)
#
# gamma
# parameter needed for all kernels except linear (default: 1/(data dimension))
#
# coef0
# parameter needed for kernels of type polynomial and sigmoid (default: 0)
#
# cost
# cost of constraints violation (default: 1)—it is the ‘C’-constant of the regularization term in the Lagrange formulation.
switch(Kernel,
"linear" = {
paramList <- data.frame(cost = 10 ^ seq(from = -5, to = 0, by = 5))
},
"polynomial" = {
paramList <- expand.grid(degree = seq(from = 0, to = 2, by = 1),
gamma = 10 ^ seq(from = -5, to = 0, by = 5),
coef0 = seq(from = 0, to = 1, by = 1),
cost = 10 ^ seq(from = -5, to = 0, by = 5))
},
"radial" = {
paramList <- expand.grid(gamma = 10 ^ seq(from = -5, to = 0, by = 5),
cost = 10 ^ seq(from = -5, to = 0, by = 5))
},
"sigmoid" = {
paramList <- expand.grid(gamma = 10 ^ seq(from = -5, to = 0, by = 5),
coef0 = seq(from = 0, to = 1, by = 1),
cost = 10 ^ seq(from = -5, to = 0, by = 5))
}
)
}else{
switch(Kernel,
"linear" = {
paramList <- data.frame(cost = 10 ^ seq(from = -5, to = 5, by = 1))
},
"polynomial" = {
paramList <- expand.grid(degree = seq(from = 0, to = 5, by = 1),
gamma = 10 ^ seq(from = -4, to = 5, by = 2),
coef0 = seq(from = 0, to = 5, by = 1),
cost = 10 ^ seq(from = -4, to = 5, by = 2))
},
"radial" = {
paramList <- expand.grid(gamma = 10 ^ seq(from = -5, to = 5, by = 1),
cost = 10 ^ seq(from = -5, to = 5, by = 1))
},
"sigmoid" = {
paramList <- expand.grid(gamma = 10 ^ seq(from = -4, to = 5, by = 2),
coef0 = seq(from = 0, to = 5, by = 1),
cost = 10 ^ seq(from = -4, to = 5, by = 2)) }
)
}
fittnessCV <- parallelCVSvm(trainDataSplit,
k = K,
paramList = paramList,
Model="SVM",
Parallel = Parallel,
Cores = Cores,
Dummies = Dummies,
Kernel = Kernel)
xx <- trainDataSplit[, !names(trainDataSplit) == "EventIds"]
if(Dummies){
xx <- as.matrix(cbind(xx[, !(names(xx) %in% c("Place", "Gender", "species", "harsness"))],
model.matrix(~Place + Gender + species + harsness-1, data=xx)))
}else
{
xx <- as.matrix(xx)
}
finalModel <- do.call("svm",
c(list(x = xx,
y = factor(trainDataSplit$EventIds)),
fittnessCV$bestParams,
kernel = Kernel,
type = "C-classification"))
xxTest <- testDataSplit[, !names(testDataSplit) == "EventIds"]
if(Dummies){
xxTest <- as.matrix(cbind(xxTest[, !(names(xxTest) %in% c("Place", "Gender", "species", "harsness"))],
model.matrix(~Place + Gender + species + harsness-1, data=xxTest)))
}else
{
xxTest <- as.matrix(xxTest)
}
finalPreds <- predict(finalModel,
newdata=xxTest,
type="response")
finalFittness <- fittnessFun(obs = testDataSplit$EventIds,
pred = finalPreds)
},
"RLR"={
source("parallelCvRlr.r")
if(codeTest){
paramList = list(param1=seq(from = 0, to = 1, by = 0.5),
param2=10^seq(from = 0, to = -3, length.out = 5))
}
else{
paramList = list(param1=seq(from = 0, to = 1, by = 0.01),
param2=10^seq(from = 0, to = -3, length.out = 50))
}
##### train the model to find the best parameters
fittnessCV <- parallelCvRlr(trainDataSplit,
k = K,
paramList = paramList,
Model="RLR",
Parallel = FALSE,
Cores = Cores,
Dummies = Dummies,
printSummary = printSummary)
xx <- trainDataSplit[, !names(trainDataSplit) == "EventIds"]
if(Dummies){
xx <- as.matrix(cbind(xx[, !(names(xx) %in% c("Place", "Gender", "species", "harsness"))],
model.matrix(~Place + Gender + species + harsness -1, data=xx)))
}else
{
xx <- as.matrix(xx)
}
##### re-train the overall model
finalModel <- glmnet(x=xx,
y=factor(trainDataSplit$EventIds),
family = "multinomial",
alpha = fittnessCV$bestParams[1],
lambda = paramList$param2)
# print(finalModel)
plot(finalModel)
xxTest <- testDataSplit[, !names(testDataSplit) == "EventIds"]
if(Dummies){
xxTest <- as.matrix(cbind(xxTest[, !(names(xxTest) %in% c("Place", "Gender", "species", "harsness"))],
model.matrix(~Place + Gender + species + harsness -1, data=xxTest)))
}else
{
xxTest <- as.matrix(xxTest)
}
finalPreds <- predict(finalModel,
newx=xxTest,
type="class",
s = fittnessCV$bestParams[2])
finalFittness <- fittnessFun(obs = testDataSplit$EventIds,
pred = finalPreds)
coef(finalModel, s=fittnessCV$bestParams[2])
},
"GBM" = {
source("parallelCVGBM.r")
if(codeTest){
paramList <- expand.grid(n.trees = seq(from = 30, to = 45, by = 5),
interaction.depth = seq(from = 1, to = 3, by = 2),
shrinkage = 10 ^ seq(from = -1, to = -2, by = -1),
n.minobsinnode = seq(from = 7, to = 11, by = 2))
}
else{
paramList <- expand.grid(interaction.depth = seq(from = 1, to = 6, by = 2),
n.trees = seq(from = 800, to = 2400, by = 100),
shrinkage = 10 ^ seq(from = -1, to = -3, by = -1),
n.minobsinnode = seq(from = 1, to = 17, by = 2))
# paramList <- expand.grid(interaction.depth = seq(from = 1, to = 17, by = 2),
# n.trees = seq(from = 1200, to = 2400, by = 200),
# shrinkage = 10 ^ seq(from = -1, to = -3, by = -1),
# n.minobsinnode = seq(from = 1, to = 17, by = 2))
}
##### train the model
fittnessCV <- parallelCV(trainDataSplit,
k = K,
paramList = paramList,
Model="GBM",
Parallel = Parallel,
Cores = Cores,
printSummary = printSummary)
if(Dummies){
trainDataSplit$Place <- factor(trainDataSplit$Place)
trainDataSplit$Gender <- factor(trainDataSplit$Gender)
trainDataSplit$species <- factor(trainDataSplit$species)
trainDataSplit$harsness <- factor(trainDataSplit$harsness)
testDataSplit$Place <- factor(testDataSplit$Place)
testDataSplit$Gender <- factor(testDataSplit$Gender)
testDataSplit$species <- factor(testDataSplit$species)
testDataSplit$harsness <- factor(testDataSplit$harsness)
}
##### train the final overall model at the best parameters
finalModel <- do.call("gbm.fit",
c(list(x = trainDataSplit[, !names(trainDataSplit) == "EventIds"],
y = factor(trainDataSplit$EventIds)),
fittnessCV$bestParams,
distribution = "multinomial",
verbose= FALSE))
finalPreds <- predict(finalModel,
newdata = testDataSplit[, !names(testDataSplit) == "EventIds"],
n.trees = fittnessCV$bestParams$n.trees,
type = "response")[,,1]
behaviourNames <- colnames(finalPreds)
finalPreds <- sapply(max.col(finalPreds), function(ii){behaviourNames[ii]})
})
confusionMat <- confusionMatrix(data = finalPreds, reference = testDataSplit$EventIds)
if(printSummary){
print(confusionMat)
cat("Sensitivity == Recall, PPV == Precision\n")
}
fScore<- 2 * confusionMat$byClass[, "Sensitivity"] * confusionMat$byClass[, "Pos Pred Value"] /
(confusionMat$byClass[, "Sensitivity"] + confusionMat$byClass[, "Pos Pred Value"])
if(printSummary){
cat("####f Score:\n")
print(data.frame(fScore))
# cat("Out of sample accuracy:",
# sum(testDataSplit$EventIds == finalPreds)/length(finalPreds), "\n")
}
outputData <- list(finalModel = finalModel,
confusionMat = confusionMat,
#fittnessCV = fittnessCV$meanfittnessCV,
finalPreds = finalPreds,
trainDataSplit = trainDataSplit,
testDataSplit = testDataSplit,
trainTestIdx = trainTestIdx,
bestParams = fittnessCV$bestParams,
fScore = fScore,
Dummies = Dummies,
Model = Model)
if(Model == "SVM"){
outputData$kernel <- Kernel
}
if(SAVE){
if(Model == "SVM"){
fName <- paste0(Model, " Model with ",Kernel," kernel, ", K, "-fold CV, on ", Sys.Date(),".RData")
}else{
fName <- paste0(Model, " Model, ","Hz = ", Hz,", epochs = ", epochs,".RData")
}
cat("##### saving", fName, "\n")
#mainDir <- getwd()
if(codeTest){
subDir <- "I:/Juvenile fur seal energetics/output/outputDirectory/test/"
}
else{
subDir <- "I:/Juvenile fur seal energetics/output/outputDirectory/"
}
#if (file.exists(subDir)){
# setwd(file.path(mainDir, subDir))
#} else {
#dir.create(file.path(mainDir, subDir))
# setwd(file.path(mainDir, subDir))
#}
save(outputData, file=paste0(subDir,fName))
#setwd(mainDir)
}
return(outputData)
}
|
library(tidyquant)
library(quantmod)
library(tidyverse)
library(zoo)
library(DBI)
library(odbc)
source("functions.r")
# Connection String -------------------------------------------------------
con <- dbConnect(odbc::odbc(),
Driver = "SQL Server",
Server = "DESKTOP-EGAJP7S\\SQLEXPRESS",
Database = "SineNome",
Port = 1433)
# Top holdings table
top_holdings <- dbGetQuery(con, "SELECT * FROM SineNome.")
# List of stock tickers -- to be appended by AM
ticks <- c("SLB","AMZN","OZRK","DFS","AXP", "MSFT","GOOGL", "AXP", "FB", "FISV", "QCOM")
# Create a dataframe from the list of tickers, stock_call and signals
stock_data <- as.data.frame(do.call(rbind,(lapply(ticks, stock_call))))%>%
arrange(Ticker, Date)%>%
group_by(Ticker)%>%
mutate(Change = Close -lag(Close, default = first(NA)), Gain = if_else(Change > 0, Change, 0),
Loss = if_else(Change <= 0, abs(Change), 0),
Avg_Gain = rollapply(Gain, 14, mean, align='right',fill=NA),
Avg_Loss = rollapply(Loss, 14, mean, align='right',fill=NA),
RS = (Avg_Gain / Avg_Loss), RS_14 = if_else(Avg_Loss == 100, 100, 100 - (100 / (1 + RS))),
Signal = if_else(((RS_14 < 20 & lag(RS_14) > 20 & lag(RS_14, k = 2) > 25 &
Volume > 2 * lag(rollmean(x = Volume, 4, align = "right", fill = NA)) &
Close / lag(Close) < 1 & Close / lag(Close) < lag(Close) / lag(Close, k = 2))
| (RS_14 < 30 & lag(RS_14) > 30 & lag(RS_14, k = 2) > 35 &
Volume > 2 * lag(rollmean(x = Volume, 4, align = "right", fill = NA)) &
Close / lag(Close) < 1 & (Close / lag(Close)) < (lag(Close) / lag(Close, k = 2)))
| (RS_14 < 50 & lag(RS_14) > 50 & lag(RS_14, k = 2) > 55 &
Volume > 2 * lag(rollmean(x = Volume, 4, align = "right", fill = NA)) &
Close / lag(Close) < 1 & Close / lag(Close) < lag(Close) / lag(Close, k = 2))),
1,
if_else(((RS_14 > 80 & lag(RS_14) < 80 & lag(RS_14, k = 2) < 75 &
Volume > 2 * lag(rollmean(x = Volume, 4, align = "right", fill = NA)) &
Close / lag(Close) > 1 & Close / lag(Close) > lag(Close) / lag(Close, k = 2))
| (RS_14 > 70 & lag(RS_14) < 70 & lag(RS_14, k = 2) < 65 &
Volume > 2 * lag(rollmean(x = Volume, 4, align = "right", fill = NA)) &
Close / lag(Close) > 1 & Close / lag(Close) > lag(Close) / lag(Close, k = 2))
| (RS_14 > 50 & lag(RS_14) < 50 & lag(RS_14, k = 2) < 45 &
Volume > 2 * lag(rollmean(x = Volume, 4, align = "right", fill = NA)) &
Close / lag(Close) > 1 & Close / lag(Close) > lag(Close) / lag(Close, k = 2))),
-1, 0))
)
# Performance Table
performance <- stock_data%>%
filter(Signal == 1 | Signal == -1)%>%
mutate(Year = substring(Date, 1, 4),
Purchase_Price = if_else((Signal == 1 & lag(Signal) != 1) | row_number() == min(row_number()), Close, 0),
Sold_Price = if_else(Signal == -1 & lag(Signal) != -1, Close, 0) )%>%
select(Date, Ticker, Year, Close, Signal, Purchase_Price, Sold_Price)%>%
filter(!(Purchase_Price == 0 & Sold_Price == 0))%>%
mutate(Returns = (Sold_Price / (lag(Purchase_Price)) -1) * 100)%>%
mutate(Returns = if_else(is.na(Returns) | is.nan(Returns), 0, Returns))%>%
select(Ticker, Returns)%>%
summarize(Returns = sum(Returns))
|
/techs.R
|
no_license
|
nalewellen/Sine-Nome
|
R
| false
| false
| 4,314
|
r
|
library(tidyquant)
library(quantmod)
library(tidyverse)
library(zoo)
library(DBI)
library(odbc)
source("functions.r")
# Connection String -------------------------------------------------------
con <- dbConnect(odbc::odbc(),
Driver = "SQL Server",
Server = "DESKTOP-EGAJP7S\\SQLEXPRESS",
Database = "SineNome",
Port = 1433)
# Top holdings table
top_holdings <- dbGetQuery(con, "SELECT * FROM SineNome.")
# List of stock tickers -- to be appended by AM
ticks <- c("SLB","AMZN","OZRK","DFS","AXP", "MSFT","GOOGL", "AXP", "FB", "FISV", "QCOM")
# Create a dataframe from the list of tickers, stock_call and signals
stock_data <- as.data.frame(do.call(rbind,(lapply(ticks, stock_call))))%>%
arrange(Ticker, Date)%>%
group_by(Ticker)%>%
mutate(Change = Close -lag(Close, default = first(NA)), Gain = if_else(Change > 0, Change, 0),
Loss = if_else(Change <= 0, abs(Change), 0),
Avg_Gain = rollapply(Gain, 14, mean, align='right',fill=NA),
Avg_Loss = rollapply(Loss, 14, mean, align='right',fill=NA),
RS = (Avg_Gain / Avg_Loss), RS_14 = if_else(Avg_Loss == 100, 100, 100 - (100 / (1 + RS))),
Signal = if_else(((RS_14 < 20 & lag(RS_14) > 20 & lag(RS_14, k = 2) > 25 &
Volume > 2 * lag(rollmean(x = Volume, 4, align = "right", fill = NA)) &
Close / lag(Close) < 1 & Close / lag(Close) < lag(Close) / lag(Close, k = 2))
| (RS_14 < 30 & lag(RS_14) > 30 & lag(RS_14, k = 2) > 35 &
Volume > 2 * lag(rollmean(x = Volume, 4, align = "right", fill = NA)) &
Close / lag(Close) < 1 & (Close / lag(Close)) < (lag(Close) / lag(Close, k = 2)))
| (RS_14 < 50 & lag(RS_14) > 50 & lag(RS_14, k = 2) > 55 &
Volume > 2 * lag(rollmean(x = Volume, 4, align = "right", fill = NA)) &
Close / lag(Close) < 1 & Close / lag(Close) < lag(Close) / lag(Close, k = 2))),
1,
if_else(((RS_14 > 80 & lag(RS_14) < 80 & lag(RS_14, k = 2) < 75 &
Volume > 2 * lag(rollmean(x = Volume, 4, align = "right", fill = NA)) &
Close / lag(Close) > 1 & Close / lag(Close) > lag(Close) / lag(Close, k = 2))
| (RS_14 > 70 & lag(RS_14) < 70 & lag(RS_14, k = 2) < 65 &
Volume > 2 * lag(rollmean(x = Volume, 4, align = "right", fill = NA)) &
Close / lag(Close) > 1 & Close / lag(Close) > lag(Close) / lag(Close, k = 2))
| (RS_14 > 50 & lag(RS_14) < 50 & lag(RS_14, k = 2) < 45 &
Volume > 2 * lag(rollmean(x = Volume, 4, align = "right", fill = NA)) &
Close / lag(Close) > 1 & Close / lag(Close) > lag(Close) / lag(Close, k = 2))),
-1, 0))
)
# Performance Table
performance <- stock_data%>%
filter(Signal == 1 | Signal == -1)%>%
mutate(Year = substring(Date, 1, 4),
Purchase_Price = if_else((Signal == 1 & lag(Signal) != 1) | row_number() == min(row_number()), Close, 0),
Sold_Price = if_else(Signal == -1 & lag(Signal) != -1, Close, 0) )%>%
select(Date, Ticker, Year, Close, Signal, Purchase_Price, Sold_Price)%>%
filter(!(Purchase_Price == 0 & Sold_Price == 0))%>%
mutate(Returns = (Sold_Price / (lag(Purchase_Price)) -1) * 100)%>%
mutate(Returns = if_else(is.na(Returns) | is.nan(Returns), 0, Returns))%>%
select(Ticker, Returns)%>%
summarize(Returns = sum(Returns))
|
library(semTools)
### Name: twostage-class
### Title: Class for the Results of 2-Stage Maximum Likelihood (TSML)
### Estimation for Missing Data
### Aliases: twostage-class show,twostage-method summary,twostage-method
### anova,twostage-method vcov,twostage-method coef,twostage-method
### fitted.values,twostage-method fitted,twostage-method
### residuals,twostage-method resid,twostage-method nobs,twostage-method
### ** Examples
# See the example from the twostage function
|
/data/genthat_extracted_code/semTools/examples/twostage-class.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 494
|
r
|
library(semTools)
### Name: twostage-class
### Title: Class for the Results of 2-Stage Maximum Likelihood (TSML)
### Estimation for Missing Data
### Aliases: twostage-class show,twostage-method summary,twostage-method
### anova,twostage-method vcov,twostage-method coef,twostage-method
### fitted.values,twostage-method fitted,twostage-method
### residuals,twostage-method resid,twostage-method nobs,twostage-method
### ** Examples
# See the example from the twostage function
|
# Mehaz: http://data-steve.github.io/d3-r-chord-diagram-of-white-house-petitions-data/
# The OpenData movement has the White House producing and releasing some novel datasets. One of them is the We The People petition site. I learned about this from Proofreader.com’s interesting python work using that data. From the petition site, you can see an interesting gallery of work done in different language and for different media/platform. One such example is yoni’s r pkg. In the spirit of the open data goodness, I thought I’d play around as well with this d3 chord diagram.
# Downloading data from github repo
# if (!require("pacman")) install.packages("pacman")
pacman::p_load_current_gh("mattflor/chorddiag")
pacman::p_load(dplyr, magrittr, ggplot2, tidyr, curl)
curl::curl_download(
"https://github.com/yoni/r_we_the_people/blob/master/data/petitions.RData?raw=true"
, destfile="~/R/devdat/d3-R_Chord_Diagram_White-House/petitions.RData")
load("petitions.RData")
# Cleaning / Set up
# recover tag names and ids
p <- petitions # save some typing
ids_names <- rbind(
p[, c("issues1.id", "issues1.name")] %>% setNames(c("ids", "names"))
, p[, c("issues2.id", "issues2.name")] %>% setNames(c("ids", "names"))
, p[, c("issues3.id", "issues3.name")]%>% setNames(c("ids", "names"))
) %>%
unique() %>% na.omit()
# get only petitions with multi-tags
tag_count <- p %>%
select(id, issues1.id, issues2.id, issues3.id) %>%
tidyr::gather(order, cats, -id) %>%
filter(!is.na(cats)) %>%
mutate(order = tidyr::extract_numeric(order)) %>%
left_join(ids_names, by=c("cats"="ids"))
xtab_tag <- tag_count %>%
count(names) %>%
arrange(desc(n))
# Adjacency Matrix
#Now we build the matrix by registering whether a tag shows up for a specific petition and then creating the adjacency matrix to represent co-occurences of tags, which is what we need for the chord diagram.
# list of tags
tags <- sort(unique(ids_names$names))
# matrix to hold counts
mat <- matrix(0,nrow=nrow(tag_count),ncol=length(tags))
colnames(mat) <- tags
# Chord Diagram and choices
# I’ll save you the suspense regarding what choices I had to make in order to get chorddiag to work for me.
# get columns with tags from dataframe
p_id_nam <- p %>%
select(contains(".name")) %>%
mutate(issues1.name= ifelse(is.na(issues1.name), issues.name, issues1.name)) %>%
mutate_each(funs(ifelse(is.na(.), "", .)), starts_with("issues"))
# make matrix
for (i in seq_along(tags)) {
for (j in c(1,2,3)){ # 1,2,3 are columns I want
mat[,i] <- as.numeric(tags[i]==p_id_nam[[j]]) + mat[,i]
is.na(mat[,i]) <- 0
}
}
adjmat <- t(mat) %*% mat
# set number of colors needed
colorCount <- length(tags)
# makes function to create palette
getPalette <- grDevices::colorRampPalette(RColorBrewer::brewer.pal(9, "Set1"))
# manage use of diagonal cells in adj_mat
remove_diags <- function(mat, rm.lower = TRUE, ...) {
diag(mat) <- 0
if (isTRUE(rm.lower)) mat[lower.tri(mat)] <- 0
mat
}
# ## order plot layering by smallest to largest so larges are on top
ord <- order(rowSums(remove_diags(adjmat, FALSE)))
# Finally:
# with the diags means there's a return
chorddiag::chorddiag(adjmat[ord, ord], margin = 150, showTicks =FALSE
, groupnameFontsize = 8 # have to shrink font for web viewing
, groupnamePadding = 5
, groupThickness = .05
, chordedgeColor = "gray90"
, groupColors = getPalette(colorCount)
)
|
/d3-R_Chord_Diagram_WH.R
|
no_license
|
mutlay/d3-R_Chord_Diagram_White-House
|
R
| false
| false
| 3,534
|
r
|
# Mehaz: http://data-steve.github.io/d3-r-chord-diagram-of-white-house-petitions-data/
# The OpenData movement has the White House producing and releasing some novel datasets. One of them is the We The People petition site. I learned about this from Proofreader.com’s interesting python work using that data. From the petition site, you can see an interesting gallery of work done in different language and for different media/platform. One such example is yoni’s r pkg. In the spirit of the open data goodness, I thought I’d play around as well with this d3 chord diagram.
# Downloading data from github repo
# if (!require("pacman")) install.packages("pacman")
pacman::p_load_current_gh("mattflor/chorddiag")
pacman::p_load(dplyr, magrittr, ggplot2, tidyr, curl)
curl::curl_download(
"https://github.com/yoni/r_we_the_people/blob/master/data/petitions.RData?raw=true"
, destfile="~/R/devdat/d3-R_Chord_Diagram_White-House/petitions.RData")
load("petitions.RData")
# Cleaning / Set up
# recover tag names and ids
p <- petitions # save some typing
ids_names <- rbind(
p[, c("issues1.id", "issues1.name")] %>% setNames(c("ids", "names"))
, p[, c("issues2.id", "issues2.name")] %>% setNames(c("ids", "names"))
, p[, c("issues3.id", "issues3.name")]%>% setNames(c("ids", "names"))
) %>%
unique() %>% na.omit()
# get only petitions with multi-tags
tag_count <- p %>%
select(id, issues1.id, issues2.id, issues3.id) %>%
tidyr::gather(order, cats, -id) %>%
filter(!is.na(cats)) %>%
mutate(order = tidyr::extract_numeric(order)) %>%
left_join(ids_names, by=c("cats"="ids"))
xtab_tag <- tag_count %>%
count(names) %>%
arrange(desc(n))
# Adjacency Matrix
#Now we build the matrix by registering whether a tag shows up for a specific petition and then creating the adjacency matrix to represent co-occurences of tags, which is what we need for the chord diagram.
# list of tags
tags <- sort(unique(ids_names$names))
# matrix to hold counts
mat <- matrix(0,nrow=nrow(tag_count),ncol=length(tags))
colnames(mat) <- tags
# Chord Diagram and choices
# I’ll save you the suspense regarding what choices I had to make in order to get chorddiag to work for me.
# get columns with tags from dataframe
p_id_nam <- p %>%
select(contains(".name")) %>%
mutate(issues1.name= ifelse(is.na(issues1.name), issues.name, issues1.name)) %>%
mutate_each(funs(ifelse(is.na(.), "", .)), starts_with("issues"))
# make matrix
for (i in seq_along(tags)) {
for (j in c(1,2,3)){ # 1,2,3 are columns I want
mat[,i] <- as.numeric(tags[i]==p_id_nam[[j]]) + mat[,i]
is.na(mat[,i]) <- 0
}
}
adjmat <- t(mat) %*% mat
# set number of colors needed
colorCount <- length(tags)
# makes function to create palette
getPalette <- grDevices::colorRampPalette(RColorBrewer::brewer.pal(9, "Set1"))
# manage use of diagonal cells in adj_mat
remove_diags <- function(mat, rm.lower = TRUE, ...) {
diag(mat) <- 0
if (isTRUE(rm.lower)) mat[lower.tri(mat)] <- 0
mat
}
# ## order plot layering by smallest to largest so larges are on top
ord <- order(rowSums(remove_diags(adjmat, FALSE)))
# Finally:
# with the diags means there's a return
chorddiag::chorddiag(adjmat[ord, ord], margin = 150, showTicks =FALSE
, groupnameFontsize = 8 # have to shrink font for web viewing
, groupnamePadding = 5
, groupThickness = .05
, chordedgeColor = "gray90"
, groupColors = getPalette(colorCount)
)
|
reconVersion=2.0
path="./results"
src="./src"
source(paste(src, "Crossomics/sourceDir.R", sep="/"))
sourceDir(paste(src, "Crossomics/build_mets_set", sep="/"))
sourceDir(paste(src, "Crossomics", sep="/"))
if (reconVersion==2.2){
load(paste(src, "Recon_2.2_biomodels.RData", sep="/"))
load(paste(src, "recon2chebi_MODEL1603150001.RData", sep="/"))
rownames(recon2chebi)=recon2chebi[,1]
recon2chebi=recon2chebi[model@met_id,]
} else if (reconVersion==2.0){
load(paste(src, "Recon2.RData", sep="/"))
model=recon2$modelR204[,,1]
recon2chebi=NULL
}
message(dim(model$S))
dir.create(paste(getwd(),src, "../results/mss_0", sep="/"),showWarnings = FALSE)
dir.create(paste(getwd(),src, "../results/mss_1", sep="/"),showWarnings = FALSE)
dir.create(paste(getwd(),src, "../results/mss_2", sep="/"),showWarnings = FALSE)
dir.create(paste(getwd(),src, "../results/mss_3", sep="/"),showWarnings = FALSE)
files = list.files("./results/mss_PathwayCommons")
# HEXA i=1278
# grep("HEXA", files)
# for (i in 1:length(files)){
for (i in 1:10){
load(paste("./results/mss_PathwayCommons", files[i], sep="/"))
if (!is.null(rval)){
hgnc = unlist(strsplit(files[i], split = ".", fixed = TRUE))[1]
# outdir=paste(path, "mss_0", sep="/")
# step = 0
findMetabolicEnvironmentLocal(hgnc, model, recon2chebi, src, rval)
}
}
#
library("XLConnect")
hgnc="NGLY1"
step="3"
load(paste0("./results/metabolite_sets_step_0,1,2,3_1.1_filter_1.1/mss_", step, "/", hgnc, ".RData"))
genExcelFileShort(as.data.frame(result_mets_3), paste0("./results/MetsSets/", hgnc, "_step_", step, ".xls"))
#
|
/src_Metabolite_Set_Creation/old : unnecessary/findSurroundingsLocal.R
|
permissive
|
UMCUGenetics/Crossomics
|
R
| false
| false
| 1,690
|
r
|
reconVersion=2.0
path="./results"
src="./src"
source(paste(src, "Crossomics/sourceDir.R", sep="/"))
sourceDir(paste(src, "Crossomics/build_mets_set", sep="/"))
sourceDir(paste(src, "Crossomics", sep="/"))
if (reconVersion==2.2){
load(paste(src, "Recon_2.2_biomodels.RData", sep="/"))
load(paste(src, "recon2chebi_MODEL1603150001.RData", sep="/"))
rownames(recon2chebi)=recon2chebi[,1]
recon2chebi=recon2chebi[model@met_id,]
} else if (reconVersion==2.0){
load(paste(src, "Recon2.RData", sep="/"))
model=recon2$modelR204[,,1]
recon2chebi=NULL
}
message(dim(model$S))
dir.create(paste(getwd(),src, "../results/mss_0", sep="/"),showWarnings = FALSE)
dir.create(paste(getwd(),src, "../results/mss_1", sep="/"),showWarnings = FALSE)
dir.create(paste(getwd(),src, "../results/mss_2", sep="/"),showWarnings = FALSE)
dir.create(paste(getwd(),src, "../results/mss_3", sep="/"),showWarnings = FALSE)
files = list.files("./results/mss_PathwayCommons")
# HEXA i=1278
# grep("HEXA", files)
# for (i in 1:length(files)){
for (i in 1:10){
load(paste("./results/mss_PathwayCommons", files[i], sep="/"))
if (!is.null(rval)){
hgnc = unlist(strsplit(files[i], split = ".", fixed = TRUE))[1]
# outdir=paste(path, "mss_0", sep="/")
# step = 0
findMetabolicEnvironmentLocal(hgnc, model, recon2chebi, src, rval)
}
}
#
library("XLConnect")
hgnc="NGLY1"
step="3"
load(paste0("./results/metabolite_sets_step_0,1,2,3_1.1_filter_1.1/mss_", step, "/", hgnc, ".RData"))
genExcelFileShort(as.data.frame(result_mets_3), paste0("./results/MetsSets/", hgnc, "_step_", step, ".xls"))
#
|
## pair of functions that cache the inverse of a matrix
## creates a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y){
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(inverse) inv <<- inverse
getinv <- function() inv
list(set = set, get = get, setinv = setinv, getinv = getinv)
}
## computes the inverse of the special "matrix" returned by
## makeCacheMatrix, or retrieve the inverse from the cache
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinv()
if(!is.null(inv)){
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinv(inv)
inv
}
|
/cachematrix.R
|
no_license
|
dk245g/ProgrammingAssignment2
|
R
| false
| false
| 800
|
r
|
## pair of functions that cache the inverse of a matrix
## creates a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y){
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(inverse) inv <<- inverse
getinv <- function() inv
list(set = set, get = get, setinv = setinv, getinv = getinv)
}
## computes the inverse of the special "matrix" returned by
## makeCacheMatrix, or retrieve the inverse from the cache
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinv()
if(!is.null(inv)){
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinv(inv)
inv
}
|
#' Downloads literature from SMML Literature DB
#'
#' Searches and downloads literature entries from the SMML Literature database
#'
#' @param x a vector of class \code{character} containing fungal or plant species names
#' @param spec_type a character string specifying the type of \code{spec}. Can be either
#' \code{"plant"} or \code{"fungus"}
#' @param process logical, if \code{TRUE} downloading and extraction process is displayed
#'
#' an object of class \code{list}
#' @return a vector of mode \code{list} with literature entries for \code{x}
#'
#' @author Franz-Sebastian Krah
#'
#' @examples
#' \dontrun{
#' x <- "Polyporus badius"
#' lit <- literature(x, process = TRUE, spec_type = "fungus")
#' lit
#' }
literature <- function(x, spec_type = c("plant", "fungus"), process = TRUE)
{
if(!url.exists("r-project.org") == TRUE) stop( "Not connected to the internet. Please create a stable connection and try again." )
if(!is.character(getURL("http://nt.ars-grin.gov/fungaldatabases/index.cfm"))) stop(" Database is not available : http://nt.ars-grin.gov/fungaldatabases/index.cfm")
expect_match(spec_type, ("fungus|plant"))
if(length(grep("\\sx\\s", x)) > 0){ stop(" no hybrids allowed ") }
if(length(spec_type) == 2) stop(" 'spec_type' not specified. Please choose one of 'plant', 'fungus'")
ifelse(length(grep(" ", x)) > 0,tax <- strsplit(x, " "), tax <- strsplit(x, "_"))
## I. PARSE DATA ##
######################
if(process == TRUE) { message("... retrieving data ... for:") }
p <- foreach(i = seq_along(tax)) %do% getHF(tax[[i]], process = process, spec_type = spec_type)
## II. DATA CONDITIONS ##
#########################
taxa <- lapply(tax, function(x){paste(as.character(x[1]), as.character(x[2]))})
co <- lapply(p, getCOND)
## IV. DATA EXTRACTION ##
#########################
i <- NULL
l <- foreach(i = seq_along(p)) %do%
{
l.st <- grep("The Literature database has" , p[[i]])
# Stop
ifelse(length(l.sp <- grep("The Specimens database has", p[[i]])) > 0,l.sp,
l.sp <- grep(paste("There are no records for",taxa[[i]], "in the Specimens database"), p[[i]]))
lit <- p[[i]][(l.st + 1):(l.sp - 1)]
if(length(which(nchar(lit)==0)) > 0) {lit[-which(nchar(lit) == 0)] }
else lit
}
return(l)
}
|
/R/literature.R
|
no_license
|
nsm120/rusda
|
R
| false
| false
| 2,301
|
r
|
#' Downloads literature from SMML Literature DB
#'
#' Searches and downloads literature entries from the SMML Literature database
#'
#' @param x a vector of class \code{character} containing fungal or plant species names
#' @param spec_type a character string specifying the type of \code{spec}. Can be either
#' \code{"plant"} or \code{"fungus"}
#' @param process logical, if \code{TRUE} downloading and extraction process is displayed
#'
#' an object of class \code{list}
#' @return a vector of mode \code{list} with literature entries for \code{x}
#'
#' @author Franz-Sebastian Krah
#'
#' @examples
#' \dontrun{
#' x <- "Polyporus badius"
#' lit <- literature(x, process = TRUE, spec_type = "fungus")
#' lit
#' }
literature <- function(x, spec_type = c("plant", "fungus"), process = TRUE)
{
if(!url.exists("r-project.org") == TRUE) stop( "Not connected to the internet. Please create a stable connection and try again." )
if(!is.character(getURL("http://nt.ars-grin.gov/fungaldatabases/index.cfm"))) stop(" Database is not available : http://nt.ars-grin.gov/fungaldatabases/index.cfm")
expect_match(spec_type, ("fungus|plant"))
if(length(grep("\\sx\\s", x)) > 0){ stop(" no hybrids allowed ") }
if(length(spec_type) == 2) stop(" 'spec_type' not specified. Please choose one of 'plant', 'fungus'")
ifelse(length(grep(" ", x)) > 0,tax <- strsplit(x, " "), tax <- strsplit(x, "_"))
## I. PARSE DATA ##
######################
if(process == TRUE) { message("... retrieving data ... for:") }
p <- foreach(i = seq_along(tax)) %do% getHF(tax[[i]], process = process, spec_type = spec_type)
## II. DATA CONDITIONS ##
#########################
taxa <- lapply(tax, function(x){paste(as.character(x[1]), as.character(x[2]))})
co <- lapply(p, getCOND)
## IV. DATA EXTRACTION ##
#########################
i <- NULL
l <- foreach(i = seq_along(p)) %do%
{
l.st <- grep("The Literature database has" , p[[i]])
# Stop
ifelse(length(l.sp <- grep("The Specimens database has", p[[i]])) > 0,l.sp,
l.sp <- grep(paste("There are no records for",taxa[[i]], "in the Specimens database"), p[[i]]))
lit <- p[[i]][(l.st + 1):(l.sp - 1)]
if(length(which(nchar(lit)==0)) > 0) {lit[-which(nchar(lit) == 0)] }
else lit
}
return(l)
}
|
#cost-sensitive SVM for HAE project
#Created on 18/11/2015
#Devloped by Hui Jin
library(dplyr)
library(klaR)
library(ROCR)
library(PRROC)
#constance
inpath <- 'D:/Shire_project/02_data'
outpath <- 'D:/Shire_project/04_result'
iter <- 20
kfold <- 5
#1. reading the data
load(paste(inpath, "/dat_hae_1111_rf.RData", sep=''))
load(paste(inpath, "/dat_nonhae_1111_rf.RData", sep=''))
hae_data <- dat_hae_1111_rf
nonhae_data <- dat_nonhae_1111_rf
names(hae_data) <- tolower(names(hae_data))
names(nonhae_data) <- tolower(names(nonhae_data))
#1. tidy the data
names(hae_data) <- tolower(names(hae_data))
names(nonhae_data) <- tolower(names(nonhae_data))
#add the iteration number for non-hae data
iters <- rep(1:20, each=10, 1233)
nonhae_data <- cbind(nonhae_data, iters)
#create dummy variables for region
gen_reg_haecol <- data.frame(gender=as.factor(hae_data$gender), region=as.factor(hae_data$region))
gen_reg_hae <- model.matrix(~gender + region, gen_reg_haecol, contrasts.arg = lapply(gen_reg_haecol, contrasts, contrasts=F))[, -1]
gen_reg_hae <- data.frame(gen_reg_hae)
gen_reg_nonhaecol <- data.frame(gender=as.factor(nonhae_data$gender), region=as.factor(nonhae_data$region))
gen_reg_nonhae <- model.matrix(~gender + region, gen_reg_nonhaecol, contrasts.arg = lapply(gen_reg_nonhaecol, contrasts, contrasts=F))[, -1]
gen_reg_nonhae <- data.frame(gen_reg_nonhae)
#combine to have the total model data
hae_data2 <- hae_data %>%
select(-c(patient_id, region, gender)) %>%
bind_cols(gen_reg_hae) %>%
rename(response = hae)
nonhae_data2 <- nonhae_data %>%
select(-c(hae_patient, region, gender)) %>%
bind_cols(gen_reg_nonhae) %>%
rename(response = hae)
#save the model data for the following steps
write.csv(hae_data2, paste(inpath, '/hae_model_data.csv', sep=''), quote = T, row.names = F)
write.csv(nonhae_data2, paste(inpath, '/nonhae_model_data.csv', sep=''), quote = T, row.names = F)
#reading in the model data
hae_model_data <- read.csv(paste(inpath, '/hae_model_data.csv', sep=''), header = T)
nonhae_model_data <- read.csv(paste(inpath, '/nonhae_model_data.csv', sep=''), header = T)
|
/Cost_sens_SVM_Hui.R
|
no_license
|
jzhao0802/ShireHAE
|
R
| false
| false
| 2,184
|
r
|
#cost-sensitive SVM for HAE project
#Created on 18/11/2015
#Devloped by Hui Jin
library(dplyr)
library(klaR)
library(ROCR)
library(PRROC)
#constance
inpath <- 'D:/Shire_project/02_data'
outpath <- 'D:/Shire_project/04_result'
iter <- 20
kfold <- 5
#1. reading the data
load(paste(inpath, "/dat_hae_1111_rf.RData", sep=''))
load(paste(inpath, "/dat_nonhae_1111_rf.RData", sep=''))
hae_data <- dat_hae_1111_rf
nonhae_data <- dat_nonhae_1111_rf
names(hae_data) <- tolower(names(hae_data))
names(nonhae_data) <- tolower(names(nonhae_data))
#1. tidy the data
names(hae_data) <- tolower(names(hae_data))
names(nonhae_data) <- tolower(names(nonhae_data))
#add the iteration number for non-hae data
iters <- rep(1:20, each=10, 1233)
nonhae_data <- cbind(nonhae_data, iters)
#create dummy variables for region
gen_reg_haecol <- data.frame(gender=as.factor(hae_data$gender), region=as.factor(hae_data$region))
gen_reg_hae <- model.matrix(~gender + region, gen_reg_haecol, contrasts.arg = lapply(gen_reg_haecol, contrasts, contrasts=F))[, -1]
gen_reg_hae <- data.frame(gen_reg_hae)
gen_reg_nonhaecol <- data.frame(gender=as.factor(nonhae_data$gender), region=as.factor(nonhae_data$region))
gen_reg_nonhae <- model.matrix(~gender + region, gen_reg_nonhaecol, contrasts.arg = lapply(gen_reg_nonhaecol, contrasts, contrasts=F))[, -1]
gen_reg_nonhae <- data.frame(gen_reg_nonhae)
#combine to have the total model data
hae_data2 <- hae_data %>%
select(-c(patient_id, region, gender)) %>%
bind_cols(gen_reg_hae) %>%
rename(response = hae)
nonhae_data2 <- nonhae_data %>%
select(-c(hae_patient, region, gender)) %>%
bind_cols(gen_reg_nonhae) %>%
rename(response = hae)
#save the model data for the following steps
write.csv(hae_data2, paste(inpath, '/hae_model_data.csv', sep=''), quote = T, row.names = F)
write.csv(nonhae_data2, paste(inpath, '/nonhae_model_data.csv', sep=''), quote = T, row.names = F)
#reading in the model data
hae_model_data <- read.csv(paste(inpath, '/hae_model_data.csv', sep=''), header = T)
nonhae_model_data <- read.csv(paste(inpath, '/nonhae_model_data.csv', sep=''), header = T)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.